rs6000: rs6000_dbx_register_number for fp/ap/mq
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84 #include "tree-vrp.h"
85 #include "tree-ssanames.h"
86
87 /* This file should be included last. */
88 #include "target-def.h"
89
90 #ifndef TARGET_NO_PROTOTYPE
91 #define TARGET_NO_PROTOTYPE 0
92 #endif
93
94 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
95 systems will also set long double to be IEEE 128-bit. AIX and Darwin
96 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
97 those systems will not pick up this default. This needs to be after all
98 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
99 properly defined. */
100 #ifndef TARGET_IEEEQUAD_DEFAULT
101 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
102 #define TARGET_IEEEQUAD_DEFAULT 1
103 #else
104 #define TARGET_IEEEQUAD_DEFAULT 0
105 #endif
106 #endif
107
108 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
109
110 /* Structure used to define the rs6000 stack */
111 typedef struct rs6000_stack {
112 int reload_completed; /* stack info won't change from here on */
113 int first_gp_reg_save; /* first callee saved GP register used */
114 int first_fp_reg_save; /* first callee saved FP register used */
115 int first_altivec_reg_save; /* first callee saved AltiVec register used */
116 int lr_save_p; /* true if the link reg needs to be saved */
117 int cr_save_p; /* true if the CR reg needs to be saved */
118 unsigned int vrsave_mask; /* mask of vec registers to save */
119 int push_p; /* true if we need to allocate stack space */
120 int calls_p; /* true if the function makes any calls */
121 int world_save_p; /* true if we're saving *everything*:
122 r13-r31, cr, f14-f31, vrsave, v20-v31 */
123 enum rs6000_abi abi; /* which ABI to use */
124 int gp_save_offset; /* offset to save GP regs from initial SP */
125 int fp_save_offset; /* offset to save FP regs from initial SP */
126 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
127 int lr_save_offset; /* offset to save LR from initial SP */
128 int cr_save_offset; /* offset to save CR from initial SP */
129 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
130 int varargs_save_offset; /* offset to save the varargs registers */
131 int ehrd_offset; /* offset to EH return data */
132 int ehcr_offset; /* offset to EH CR field data */
133 int reg_size; /* register size (4 or 8) */
134 HOST_WIDE_INT vars_size; /* variable save area size */
135 int parm_size; /* outgoing parameter size */
136 int save_size; /* save area size */
137 int fixed_size; /* fixed size of stack frame */
138 int gp_size; /* size of saved GP registers */
139 int fp_size; /* size of saved FP registers */
140 int altivec_size; /* size of saved AltiVec registers */
141 int cr_size; /* size to hold CR if not in fixed area */
142 int vrsave_size; /* size to hold VRSAVE */
143 int altivec_padding_size; /* size of altivec alignment padding */
144 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
145 int savres_strategy;
146 } rs6000_stack_t;
147
148 /* A C structure for machine-specific, per-function data.
149 This is added to the cfun structure. */
150 typedef struct GTY(()) machine_function
151 {
152 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
153 int ra_needs_full_frame;
154 /* Flags if __builtin_return_address (0) was used. */
155 int ra_need_lr;
156 /* Cache lr_save_p after expansion of builtin_eh_return. */
157 int lr_save_state;
158 /* Whether we need to save the TOC to the reserved stack location in the
159 function prologue. */
160 bool save_toc_in_prologue;
161 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
162 varargs save area. */
163 HOST_WIDE_INT varargs_save_offset;
164 /* Alternative internal arg pointer for -fsplit-stack. */
165 rtx split_stack_arg_pointer;
166 bool split_stack_argp_used;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed;
169 /* The number of components we use for separate shrink-wrapping. */
170 int n_components;
171 /* The components already handled by separate shrink-wrapping, which should
172 not be considered by the prologue and epilogue. */
173 bool gpr_is_wrapped_separately[32];
174 bool fpr_is_wrapped_separately[32];
175 bool lr_is_wrapped_separately;
176 bool toc_is_wrapped_separately;
177 } machine_function;
178
179 /* Support targetm.vectorize.builtin_mask_for_load. */
180 static GTY(()) tree altivec_builtin_mask_for_load;
181
182 /* Set to nonzero once AIX common-mode calls have been defined. */
183 static GTY(()) int common_mode_defined;
184
185 /* Label number of label created for -mrelocatable, to call to so we can
186 get the address of the GOT section */
187 static int rs6000_pic_labelno;
188
189 #ifdef USING_ELFOS_H
190 /* Counter for labels which are to be placed in .fixup. */
191 int fixuplabelno = 0;
192 #endif
193
194 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 int dot_symbols;
196
197 /* Specify the machine mode that pointers have. After generation of rtl, the
198 compiler makes no further distinction between pointers and any other objects
199 of this machine mode. */
200 scalar_int_mode rs6000_pmode;
201
202 #if TARGET_ELF
203 /* Note whether IEEE 128-bit floating point was passed or returned, either as
204 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
205 floating point. We changed the default C++ mangling for these types and we
206 may want to generate a weak alias of the old mangling (U10__float128) to the
207 new mangling (u9__ieee128). */
208 static bool rs6000_passes_ieee128;
209 #endif
210
211 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
212 name used in current releases (i.e. u9__ieee128). */
213 static bool ieee128_mangling_gcc_8_1;
214
215 /* Width in bits of a pointer. */
216 unsigned rs6000_pointer_size;
217
218 #ifdef HAVE_AS_GNU_ATTRIBUTE
219 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
220 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
221 # endif
222 /* Flag whether floating point values have been passed/returned.
223 Note that this doesn't say whether fprs are used, since the
224 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
225 should be set for soft-float values passed in gprs and ieee128
226 values passed in vsx registers. */
227 static bool rs6000_passes_float;
228 static bool rs6000_passes_long_double;
229 /* Flag whether vector values have been passed/returned. */
230 static bool rs6000_passes_vector;
231 /* Flag whether small (<= 8 byte) structures have been returned. */
232 static bool rs6000_returns_struct;
233 #endif
234
235 /* Value is TRUE if register/mode pair is acceptable. */
236 static bool rs6000_hard_regno_mode_ok_p
237 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
238
239 /* Maximum number of registers needed for a given register class and mode. */
240 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
241
242 /* How many registers are needed for a given register and mode. */
243 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
244
245 /* Map register number to register class. */
246 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
247
248 static int dbg_cost_ctrl;
249
250 /* Built in types. */
251 tree rs6000_builtin_types[RS6000_BTI_MAX];
252 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
253
254 /* Flag to say the TOC is initialized */
255 int toc_initialized, need_toc_init;
256 char toc_label_name[10];
257
258 /* Cached value of rs6000_variable_issue. This is cached in
259 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
260 static short cached_can_issue_more;
261
262 static GTY(()) section *read_only_data_section;
263 static GTY(()) section *private_data_section;
264 static GTY(()) section *tls_data_section;
265 static GTY(()) section *tls_private_data_section;
266 static GTY(()) section *read_only_private_data_section;
267 static GTY(()) section *sdata2_section;
268 static GTY(()) section *toc_section;
269
270 struct builtin_description
271 {
272 const HOST_WIDE_INT mask;
273 const enum insn_code icode;
274 const char *const name;
275 const enum rs6000_builtins code;
276 };
277
278 /* Describe the vector unit used for modes. */
279 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
280 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
281
282 /* Register classes for various constraints that are based on the target
283 switches. */
284 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
285
286 /* Describe the alignment of a vector. */
287 int rs6000_vector_align[NUM_MACHINE_MODES];
288
289 /* Map selected modes to types for builtins. */
290 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
291
292 /* What modes to automatically generate reciprocal divide estimate (fre) and
293 reciprocal sqrt (frsqrte) for. */
294 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
295
296 /* Masks to determine which reciprocal esitmate instructions to generate
297 automatically. */
298 enum rs6000_recip_mask {
299 RECIP_SF_DIV = 0x001, /* Use divide estimate */
300 RECIP_DF_DIV = 0x002,
301 RECIP_V4SF_DIV = 0x004,
302 RECIP_V2DF_DIV = 0x008,
303
304 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
305 RECIP_DF_RSQRT = 0x020,
306 RECIP_V4SF_RSQRT = 0x040,
307 RECIP_V2DF_RSQRT = 0x080,
308
309 /* Various combination of flags for -mrecip=xxx. */
310 RECIP_NONE = 0,
311 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
312 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
313 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
314
315 RECIP_HIGH_PRECISION = RECIP_ALL,
316
317 /* On low precision machines like the power5, don't enable double precision
318 reciprocal square root estimate, since it isn't accurate enough. */
319 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
320 };
321
322 /* -mrecip options. */
323 static struct
324 {
325 const char *string; /* option name */
326 unsigned int mask; /* mask bits to set */
327 } recip_options[] = {
328 { "all", RECIP_ALL },
329 { "none", RECIP_NONE },
330 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
331 | RECIP_V2DF_DIV) },
332 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
333 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
334 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
335 | RECIP_V2DF_RSQRT) },
336 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
337 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
338 };
339
340 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
341 static const struct
342 {
343 const char *cpu;
344 unsigned int cpuid;
345 } cpu_is_info[] = {
346 { "power9", PPC_PLATFORM_POWER9 },
347 { "power8", PPC_PLATFORM_POWER8 },
348 { "power7", PPC_PLATFORM_POWER7 },
349 { "power6x", PPC_PLATFORM_POWER6X },
350 { "power6", PPC_PLATFORM_POWER6 },
351 { "power5+", PPC_PLATFORM_POWER5_PLUS },
352 { "power5", PPC_PLATFORM_POWER5 },
353 { "ppc970", PPC_PLATFORM_PPC970 },
354 { "power4", PPC_PLATFORM_POWER4 },
355 { "ppca2", PPC_PLATFORM_PPCA2 },
356 { "ppc476", PPC_PLATFORM_PPC476 },
357 { "ppc464", PPC_PLATFORM_PPC464 },
358 { "ppc440", PPC_PLATFORM_PPC440 },
359 { "ppc405", PPC_PLATFORM_PPC405 },
360 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
361 };
362
363 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
364 static const struct
365 {
366 const char *hwcap;
367 int mask;
368 unsigned int id;
369 } cpu_supports_info[] = {
370 /* AT_HWCAP masks. */
371 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
372 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
373 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
374 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
375 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
376 { "booke", PPC_FEATURE_BOOKE, 0 },
377 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
378 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
379 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
380 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
381 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
382 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
383 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
384 { "notb", PPC_FEATURE_NO_TB, 0 },
385 { "pa6t", PPC_FEATURE_PA6T, 0 },
386 { "power4", PPC_FEATURE_POWER4, 0 },
387 { "power5", PPC_FEATURE_POWER5, 0 },
388 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
389 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
390 { "ppc32", PPC_FEATURE_32, 0 },
391 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
392 { "ppc64", PPC_FEATURE_64, 0 },
393 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
394 { "smt", PPC_FEATURE_SMT, 0 },
395 { "spe", PPC_FEATURE_HAS_SPE, 0 },
396 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
397 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
398 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
399
400 /* AT_HWCAP2 masks. */
401 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
402 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
403 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
404 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
405 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
406 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
407 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
408 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
409 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
410 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
411 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
412 { "darn", PPC_FEATURE2_DARN, 1 },
413 { "scv", PPC_FEATURE2_SCV, 1 }
414 };
415
416 /* On PowerPC, we have a limited number of target clones that we care about
417 which means we can use an array to hold the options, rather than having more
418 elaborate data structures to identify each possible variation. Order the
419 clones from the default to the highest ISA. */
420 enum {
421 CLONE_DEFAULT = 0, /* default clone. */
422 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
423 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
424 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
425 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
426 CLONE_MAX
427 };
428
429 /* Map compiler ISA bits into HWCAP names. */
430 struct clone_map {
431 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
432 const char *name; /* name to use in __builtin_cpu_supports. */
433 };
434
435 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
436 { 0, "" }, /* Default options. */
437 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
438 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
439 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
440 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
441 };
442
443
444 /* Newer LIBCs explicitly export this symbol to declare that they provide
445 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
446 reference to this symbol whenever we expand a CPU builtin, so that
447 we never link against an old LIBC. */
448 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
449
450 /* True if we have expanded a CPU builtin. */
451 bool cpu_builtin_p;
452
453 /* Pointer to function (in rs6000-c.c) that can define or undefine target
454 macros that have changed. Languages that don't support the preprocessor
455 don't link in rs6000-c.c, so we can't call it directly. */
456 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
457
458 /* Simplfy register classes into simpler classifications. We assume
459 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
460 check for standard register classes (gpr/floating/altivec/vsx) and
461 floating/vector classes (float/altivec/vsx). */
462
463 enum rs6000_reg_type {
464 NO_REG_TYPE,
465 PSEUDO_REG_TYPE,
466 GPR_REG_TYPE,
467 VSX_REG_TYPE,
468 ALTIVEC_REG_TYPE,
469 FPR_REG_TYPE,
470 SPR_REG_TYPE,
471 CR_REG_TYPE
472 };
473
474 /* Map register class to register type. */
475 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
476
477 /* First/last register type for the 'normal' register types (i.e. general
478 purpose, floating point, altivec, and VSX registers). */
479 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
480
481 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482
483
484 /* Register classes we care about in secondary reload or go if legitimate
485 address. We only need to worry about GPR, FPR, and Altivec registers here,
486 along an ANY field that is the OR of the 3 register classes. */
487
488 enum rs6000_reload_reg_type {
489 RELOAD_REG_GPR, /* General purpose registers. */
490 RELOAD_REG_FPR, /* Traditional floating point regs. */
491 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
492 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
493 N_RELOAD_REG
494 };
495
496 /* For setting up register classes, loop through the 3 register classes mapping
497 into real registers, and skip the ANY class, which is just an OR of the
498 bits. */
499 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
500 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
501
502 /* Map reload register type to a register in the register class. */
503 struct reload_reg_map_type {
504 const char *name; /* Register class name. */
505 int reg; /* Register in the register class. */
506 };
507
508 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
509 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
510 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
511 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
512 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 };
514
515 /* Mask bits for each register class, indexed per mode. Historically the
516 compiler has been more restrictive which types can do PRE_MODIFY instead of
517 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
518 typedef unsigned char addr_mask_type;
519
520 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
521 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
522 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
523 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
524 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
525 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
526 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
527 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
528
529 /* Register type masks based on the type, of valid addressing modes. */
530 struct rs6000_reg_addr {
531 enum insn_code reload_load; /* INSN to reload for loading. */
532 enum insn_code reload_store; /* INSN to reload for storing. */
533 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
534 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
535 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
536 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
537 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
538 };
539
540 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
541
542 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
543 static inline bool
544 mode_supports_pre_incdec_p (machine_mode mode)
545 {
546 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
547 != 0);
548 }
549
550 /* Helper function to say whether a mode supports PRE_MODIFY. */
551 static inline bool
552 mode_supports_pre_modify_p (machine_mode mode)
553 {
554 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
555 != 0);
556 }
557
558 /* Return true if we have D-form addressing in altivec registers. */
559 static inline bool
560 mode_supports_vmx_dform (machine_mode mode)
561 {
562 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
563 }
564
565 /* Return true if we have D-form addressing in VSX registers. This addressing
566 is more limited than normal d-form addressing in that the offset must be
567 aligned on a 16-byte boundary. */
568 static inline bool
569 mode_supports_dq_form (machine_mode mode)
570 {
571 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
572 != 0);
573 }
574
575 /* Given that there exists at least one variable that is set (produced)
576 by OUT_INSN and read (consumed) by IN_INSN, return true iff
577 IN_INSN represents one or more memory store operations and none of
578 the variables set by OUT_INSN is used by IN_INSN as the address of a
579 store operation. If either IN_INSN or OUT_INSN does not represent
580 a "single" RTL SET expression (as loosely defined by the
581 implementation of the single_set function) or a PARALLEL with only
582 SETs, CLOBBERs, and USEs inside, this function returns false.
583
584 This rs6000-specific version of store_data_bypass_p checks for
585 certain conditions that result in assertion failures (and internal
586 compiler errors) in the generic store_data_bypass_p function and
587 returns false rather than calling store_data_bypass_p if one of the
588 problematic conditions is detected. */
589
590 int
591 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
592 {
593 rtx out_set, in_set;
594 rtx out_pat, in_pat;
595 rtx out_exp, in_exp;
596 int i, j;
597
598 in_set = single_set (in_insn);
599 if (in_set)
600 {
601 if (MEM_P (SET_DEST (in_set)))
602 {
603 out_set = single_set (out_insn);
604 if (!out_set)
605 {
606 out_pat = PATTERN (out_insn);
607 if (GET_CODE (out_pat) == PARALLEL)
608 {
609 for (i = 0; i < XVECLEN (out_pat, 0); i++)
610 {
611 out_exp = XVECEXP (out_pat, 0, i);
612 if ((GET_CODE (out_exp) == CLOBBER)
613 || (GET_CODE (out_exp) == USE))
614 continue;
615 else if (GET_CODE (out_exp) != SET)
616 return false;
617 }
618 }
619 }
620 }
621 }
622 else
623 {
624 in_pat = PATTERN (in_insn);
625 if (GET_CODE (in_pat) != PARALLEL)
626 return false;
627
628 for (i = 0; i < XVECLEN (in_pat, 0); i++)
629 {
630 in_exp = XVECEXP (in_pat, 0, i);
631 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
632 continue;
633 else if (GET_CODE (in_exp) != SET)
634 return false;
635
636 if (MEM_P (SET_DEST (in_exp)))
637 {
638 out_set = single_set (out_insn);
639 if (!out_set)
640 {
641 out_pat = PATTERN (out_insn);
642 if (GET_CODE (out_pat) != PARALLEL)
643 return false;
644 for (j = 0; j < XVECLEN (out_pat, 0); j++)
645 {
646 out_exp = XVECEXP (out_pat, 0, j);
647 if ((GET_CODE (out_exp) == CLOBBER)
648 || (GET_CODE (out_exp) == USE))
649 continue;
650 else if (GET_CODE (out_exp) != SET)
651 return false;
652 }
653 }
654 }
655 }
656 }
657 return store_data_bypass_p (out_insn, in_insn);
658 }
659
660 \f
661 /* Processor costs (relative to an add) */
662
663 const struct processor_costs *rs6000_cost;
664
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
683 };
684
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
703 };
704
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
723 };
724
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
743 };
744
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
763 };
764
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
783 };
784
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
803 };
804
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
823 };
824
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
843 };
844
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
863 };
864
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
883 };
884
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
903 };
904
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
923 };
924
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
943 };
944
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
964 };
965
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
984 };
985
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1004 };
1005
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1024 };
1025
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1039 32,
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1044 };
1045
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1064 };
1065
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1084 };
1085
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1104 };
1105
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1124 };
1125
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1144 };
1145
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1164 };
1165
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1184 };
1185
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1204 };
1205
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1224 };
1225
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1244 };
1245
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1259 64,
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1264 };
1265
1266 \f
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_X
1277
1278 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1279 { NAME, ICODE, MASK, ATTR },
1280
1281 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1282 { NAME, ICODE, MASK, ATTR },
1283
1284 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1285 { NAME, ICODE, MASK, ATTR },
1286
1287 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1288 { NAME, ICODE, MASK, ATTR },
1289
1290 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1291 { NAME, ICODE, MASK, ATTR },
1292
1293 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1294 { NAME, ICODE, MASK, ATTR },
1295
1296 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1297 { NAME, ICODE, MASK, ATTR },
1298
1299 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1300 { NAME, ICODE, MASK, ATTR },
1301
1302 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1303 { NAME, ICODE, MASK, ATTR },
1304
1305 struct rs6000_builtin_info_type {
1306 const char *name;
1307 const enum insn_code icode;
1308 const HOST_WIDE_INT mask;
1309 const unsigned attr;
1310 };
1311
1312 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1313 {
1314 #include "rs6000-builtin.def"
1315 };
1316
1317 #undef RS6000_BUILTIN_0
1318 #undef RS6000_BUILTIN_1
1319 #undef RS6000_BUILTIN_2
1320 #undef RS6000_BUILTIN_3
1321 #undef RS6000_BUILTIN_A
1322 #undef RS6000_BUILTIN_D
1323 #undef RS6000_BUILTIN_H
1324 #undef RS6000_BUILTIN_P
1325 #undef RS6000_BUILTIN_X
1326
1327 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1328 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1329
1330 \f
1331 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1332 static struct machine_function * rs6000_init_machine_status (void);
1333 static int rs6000_ra_ever_killed (void);
1334 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1337 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1338 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1339 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1340 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1341 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1342 bool);
1343 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1344 unsigned int);
1345 static bool is_microcoded_insn (rtx_insn *);
1346 static bool is_nonpipeline_insn (rtx_insn *);
1347 static bool is_cracked_insn (rtx_insn *);
1348 static bool is_load_insn (rtx, rtx *);
1349 static bool is_store_insn (rtx, rtx *);
1350 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1351 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1352 static bool insn_must_be_first_in_group (rtx_insn *);
1353 static bool insn_must_be_last_in_group (rtx_insn *);
1354 static void altivec_init_builtins (void);
1355 static tree builtin_function_type (machine_mode, machine_mode,
1356 machine_mode, machine_mode,
1357 enum rs6000_builtins, const char *name);
1358 static void rs6000_common_init_builtins (void);
1359 static void htm_init_builtins (void);
1360 static rs6000_stack_t *rs6000_stack_info (void);
1361 static void is_altivec_return_reg (rtx, void *);
1362 int easy_vector_constant (rtx, machine_mode);
1363 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1364 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1365 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1366 bool, bool);
1367 #if TARGET_MACHO
1368 static void macho_branch_islands (void);
1369 static tree get_prev_label (tree);
1370 #endif
1371 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1372 int, int *);
1373 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1374 int, int, int *);
1375 static bool rs6000_mode_dependent_address (const_rtx);
1376 static bool rs6000_debug_mode_dependent_address (const_rtx);
1377 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1378 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1379 machine_mode, rtx);
1380 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1381 machine_mode,
1382 rtx);
1383 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1384 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1385 enum reg_class);
1386 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1387 reg_class_t,
1388 reg_class_t);
1389 static bool rs6000_debug_can_change_mode_class (machine_mode,
1390 machine_mode,
1391 reg_class_t);
1392 static bool rs6000_save_toc_in_prologue_p (void);
1393 static rtx rs6000_internal_arg_pointer (void);
1394
1395 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1396 int, int *)
1397 = rs6000_legitimize_reload_address;
1398
1399 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1400 = rs6000_mode_dependent_address;
1401
1402 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1403 machine_mode, rtx)
1404 = rs6000_secondary_reload_class;
1405
1406 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1407 = rs6000_preferred_reload_class;
1408
1409 const int INSN_NOT_AVAILABLE = -1;
1410
1411 static void rs6000_print_isa_options (FILE *, int, const char *,
1412 HOST_WIDE_INT);
1413 static void rs6000_print_builtin_options (FILE *, int, const char *,
1414 HOST_WIDE_INT);
1415 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1416
1417 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1418 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1419 enum rs6000_reg_type,
1420 machine_mode,
1421 secondary_reload_info *,
1422 bool);
1423 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1424 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1425 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1426
1427 /* Hash table stuff for keeping track of TOC entries. */
1428
1429 struct GTY((for_user)) toc_hash_struct
1430 {
1431 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1432 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1433 rtx key;
1434 machine_mode key_mode;
1435 int labelno;
1436 };
1437
1438 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1439 {
1440 static hashval_t hash (toc_hash_struct *);
1441 static bool equal (toc_hash_struct *, toc_hash_struct *);
1442 };
1443
1444 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1445
1446 /* Hash table to keep track of the argument types for builtin functions. */
1447
1448 struct GTY((for_user)) builtin_hash_struct
1449 {
1450 tree type;
1451 machine_mode mode[4]; /* return value + 3 arguments. */
1452 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1453 };
1454
1455 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1456 {
1457 static hashval_t hash (builtin_hash_struct *);
1458 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1459 };
1460
1461 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1462
1463 \f
1464 /* Default register names. */
1465 char rs6000_reg_names[][8] =
1466 {
1467 "0", "1", "2", "3", "4", "5", "6", "7",
1468 "8", "9", "10", "11", "12", "13", "14", "15",
1469 "16", "17", "18", "19", "20", "21", "22", "23",
1470 "24", "25", "26", "27", "28", "29", "30", "31",
1471 "0", "1", "2", "3", "4", "5", "6", "7",
1472 "8", "9", "10", "11", "12", "13", "14", "15",
1473 "16", "17", "18", "19", "20", "21", "22", "23",
1474 "24", "25", "26", "27", "28", "29", "30", "31",
1475 "mq", "lr", "ctr","ap",
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "ca",
1478 /* AltiVec registers. */
1479 "0", "1", "2", "3", "4", "5", "6", "7",
1480 "8", "9", "10", "11", "12", "13", "14", "15",
1481 "16", "17", "18", "19", "20", "21", "22", "23",
1482 "24", "25", "26", "27", "28", "29", "30", "31",
1483 "vrsave", "vscr",
1484 /* Soft frame pointer. */
1485 "sfp",
1486 /* HTM SPR registers. */
1487 "tfhar", "tfiar", "texasr"
1488 };
1489
1490 #ifdef TARGET_REGNAMES
1491 static const char alt_reg_names[][8] =
1492 {
1493 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1494 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1495 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1496 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1497 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1498 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1499 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1500 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1501 "mq", "lr", "ctr", "ap",
1502 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1503 "ca",
1504 /* AltiVec registers. */
1505 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1506 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1507 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1508 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1509 "vrsave", "vscr",
1510 /* Soft frame pointer. */
1511 "sfp",
1512 /* HTM SPR registers. */
1513 "tfhar", "tfiar", "texasr"
1514 };
1515 #endif
1516
1517 /* Table of valid machine attributes. */
1518
1519 static const struct attribute_spec rs6000_attribute_table[] =
1520 {
1521 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1522 affects_type_identity, handler, exclude } */
1523 { "altivec", 1, 1, false, true, false, false,
1524 rs6000_handle_altivec_attribute, NULL },
1525 { "longcall", 0, 0, false, true, true, false,
1526 rs6000_handle_longcall_attribute, NULL },
1527 { "shortcall", 0, 0, false, true, true, false,
1528 rs6000_handle_longcall_attribute, NULL },
1529 { "ms_struct", 0, 0, false, false, false, false,
1530 rs6000_handle_struct_attribute, NULL },
1531 { "gcc_struct", 0, 0, false, false, false, false,
1532 rs6000_handle_struct_attribute, NULL },
1533 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1534 SUBTARGET_ATTRIBUTE_TABLE,
1535 #endif
1536 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1537 };
1538 \f
1539 #ifndef TARGET_PROFILE_KERNEL
1540 #define TARGET_PROFILE_KERNEL 0
1541 #endif
1542
1543 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1544 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1545 \f
1546 /* Initialize the GCC target structure. */
1547 #undef TARGET_ATTRIBUTE_TABLE
1548 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1549 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1550 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1551 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1552 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1553
1554 #undef TARGET_ASM_ALIGNED_DI_OP
1555 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1556
1557 /* Default unaligned ops are only provided for ELF. Find the ops needed
1558 for non-ELF systems. */
1559 #ifndef OBJECT_FORMAT_ELF
1560 #if TARGET_XCOFF
1561 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1562 64-bit targets. */
1563 #undef TARGET_ASM_UNALIGNED_HI_OP
1564 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1565 #undef TARGET_ASM_UNALIGNED_SI_OP
1566 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1567 #undef TARGET_ASM_UNALIGNED_DI_OP
1568 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1569 #else
1570 /* For Darwin. */
1571 #undef TARGET_ASM_UNALIGNED_HI_OP
1572 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1573 #undef TARGET_ASM_UNALIGNED_SI_OP
1574 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1575 #undef TARGET_ASM_UNALIGNED_DI_OP
1576 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1577 #undef TARGET_ASM_ALIGNED_DI_OP
1578 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1579 #endif
1580 #endif
1581
1582 /* This hook deals with fixups for relocatable code and DI-mode objects
1583 in 64-bit code. */
1584 #undef TARGET_ASM_INTEGER
1585 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1586
1587 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1588 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1589 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1590 #endif
1591
1592 #undef TARGET_SET_UP_BY_PROLOGUE
1593 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1594
1595 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1596 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1597 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1598 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1599 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1600 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1601 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1602 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1603 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1605 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1606 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1607
1608 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1609 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1610
1611 #undef TARGET_INTERNAL_ARG_POINTER
1612 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1613
1614 #undef TARGET_HAVE_TLS
1615 #define TARGET_HAVE_TLS HAVE_AS_TLS
1616
1617 #undef TARGET_CANNOT_FORCE_CONST_MEM
1618 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1619
1620 #undef TARGET_DELEGITIMIZE_ADDRESS
1621 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1622
1623 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1624 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1625
1626 #undef TARGET_LEGITIMATE_COMBINED_INSN
1627 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1628
1629 #undef TARGET_ASM_FUNCTION_PROLOGUE
1630 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1631 #undef TARGET_ASM_FUNCTION_EPILOGUE
1632 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1633
1634 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1635 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1636
1637 #undef TARGET_LEGITIMIZE_ADDRESS
1638 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1639
1640 #undef TARGET_SCHED_VARIABLE_ISSUE
1641 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1642
1643 #undef TARGET_SCHED_ISSUE_RATE
1644 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1645 #undef TARGET_SCHED_ADJUST_COST
1646 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1647 #undef TARGET_SCHED_ADJUST_PRIORITY
1648 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1649 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1650 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1651 #undef TARGET_SCHED_INIT
1652 #define TARGET_SCHED_INIT rs6000_sched_init
1653 #undef TARGET_SCHED_FINISH
1654 #define TARGET_SCHED_FINISH rs6000_sched_finish
1655 #undef TARGET_SCHED_REORDER
1656 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1657 #undef TARGET_SCHED_REORDER2
1658 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1659
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1662
1663 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1664 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1665
1666 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1667 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1668 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1669 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1670 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1671 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1672 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1673 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1674
1675 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1676 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1677
1678 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1679 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1680 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1681 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1682 rs6000_builtin_support_vector_misalignment
1683 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1684 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1685 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1686 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1687 rs6000_builtin_vectorization_cost
1688 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1689 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1690 rs6000_preferred_simd_mode
1691 #undef TARGET_VECTORIZE_INIT_COST
1692 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1693 #undef TARGET_VECTORIZE_ADD_STMT_COST
1694 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1695 #undef TARGET_VECTORIZE_FINISH_COST
1696 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1697 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1698 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1699
1700 #undef TARGET_INIT_BUILTINS
1701 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1702 #undef TARGET_BUILTIN_DECL
1703 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1704
1705 #undef TARGET_FOLD_BUILTIN
1706 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1707 #undef TARGET_GIMPLE_FOLD_BUILTIN
1708 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1709
1710 #undef TARGET_EXPAND_BUILTIN
1711 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1712
1713 #undef TARGET_MANGLE_TYPE
1714 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1715
1716 #undef TARGET_INIT_LIBFUNCS
1717 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1718
1719 #if TARGET_MACHO
1720 #undef TARGET_BINDS_LOCAL_P
1721 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1722 #endif
1723
1724 #undef TARGET_MS_BITFIELD_LAYOUT_P
1725 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1726
1727 #undef TARGET_ASM_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1729
1730 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1731 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1732
1733 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1734 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1735
1736 #undef TARGET_REGISTER_MOVE_COST
1737 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1738 #undef TARGET_MEMORY_MOVE_COST
1739 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1740 #undef TARGET_CANNOT_COPY_INSN_P
1741 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1742 #undef TARGET_RTX_COSTS
1743 #define TARGET_RTX_COSTS rs6000_rtx_costs
1744 #undef TARGET_ADDRESS_COST
1745 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1746 #undef TARGET_INSN_COST
1747 #define TARGET_INSN_COST rs6000_insn_cost
1748
1749 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1750 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1751
1752 #undef TARGET_PROMOTE_FUNCTION_MODE
1753 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1754
1755 #undef TARGET_RETURN_IN_MEMORY
1756 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1757
1758 #undef TARGET_RETURN_IN_MSB
1759 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1760
1761 #undef TARGET_SETUP_INCOMING_VARARGS
1762 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1763
1764 /* Always strict argument naming on rs6000. */
1765 #undef TARGET_STRICT_ARGUMENT_NAMING
1766 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1767 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1768 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1769 #undef TARGET_SPLIT_COMPLEX_ARG
1770 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1771 #undef TARGET_MUST_PASS_IN_STACK
1772 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1773 #undef TARGET_PASS_BY_REFERENCE
1774 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1775 #undef TARGET_ARG_PARTIAL_BYTES
1776 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1777 #undef TARGET_FUNCTION_ARG_ADVANCE
1778 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1779 #undef TARGET_FUNCTION_ARG
1780 #define TARGET_FUNCTION_ARG rs6000_function_arg
1781 #undef TARGET_FUNCTION_ARG_PADDING
1782 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1783 #undef TARGET_FUNCTION_ARG_BOUNDARY
1784 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1785
1786 #undef TARGET_BUILD_BUILTIN_VA_LIST
1787 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1788
1789 #undef TARGET_EXPAND_BUILTIN_VA_START
1790 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1791
1792 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1793 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1794
1795 #undef TARGET_EH_RETURN_FILTER_MODE
1796 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1797
1798 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1799 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1800
1801 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1802 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1803
1804 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1805 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1806
1807 #undef TARGET_FLOATN_MODE
1808 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1809
1810 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1811 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1812
1813 #undef TARGET_MD_ASM_ADJUST
1814 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1815
1816 #undef TARGET_OPTION_OVERRIDE
1817 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1818
1819 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1820 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1821 rs6000_builtin_vectorized_function
1822
1823 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1824 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1825 rs6000_builtin_md_vectorized_function
1826
1827 #undef TARGET_STACK_PROTECT_GUARD
1828 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1829
1830 #if !TARGET_MACHO
1831 #undef TARGET_STACK_PROTECT_FAIL
1832 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1833 #endif
1834
1835 #ifdef HAVE_AS_TLS
1836 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1837 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1838 #endif
1839
1840 /* Use a 32-bit anchor range. This leads to sequences like:
1841
1842 addis tmp,anchor,high
1843 add dest,tmp,low
1844
1845 where tmp itself acts as an anchor, and can be shared between
1846 accesses to the same 64k page. */
1847 #undef TARGET_MIN_ANCHOR_OFFSET
1848 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1849 #undef TARGET_MAX_ANCHOR_OFFSET
1850 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1851 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1852 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1853 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1854 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1855
1856 #undef TARGET_BUILTIN_RECIPROCAL
1857 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1858
1859 #undef TARGET_SECONDARY_RELOAD
1860 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1861 #undef TARGET_SECONDARY_MEMORY_NEEDED
1862 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1863 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1864 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1865
1866 #undef TARGET_LEGITIMATE_ADDRESS_P
1867 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1868
1869 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1870 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1871
1872 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1873 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1874
1875 #undef TARGET_CAN_ELIMINATE
1876 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1877
1878 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1879 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1880
1881 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1882 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1883
1884 #undef TARGET_TRAMPOLINE_INIT
1885 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1886
1887 #undef TARGET_FUNCTION_VALUE
1888 #define TARGET_FUNCTION_VALUE rs6000_function_value
1889
1890 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1891 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1892
1893 #undef TARGET_OPTION_SAVE
1894 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1895
1896 #undef TARGET_OPTION_RESTORE
1897 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1898
1899 #undef TARGET_OPTION_PRINT
1900 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1901
1902 #undef TARGET_CAN_INLINE_P
1903 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1904
1905 #undef TARGET_SET_CURRENT_FUNCTION
1906 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1907
1908 #undef TARGET_LEGITIMATE_CONSTANT_P
1909 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1910
1911 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1912 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1913
1914 #undef TARGET_CAN_USE_DOLOOP_P
1915 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1916
1917 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1918 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1919
1920 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1921 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1922 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1923 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1924 #undef TARGET_UNWIND_WORD_MODE
1925 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1926
1927 #undef TARGET_OFFLOAD_OPTIONS
1928 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1929
1930 #undef TARGET_C_MODE_FOR_SUFFIX
1931 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1932
1933 #undef TARGET_INVALID_BINARY_OP
1934 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1935
1936 #undef TARGET_OPTAB_SUPPORTED_P
1937 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1938
1939 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1940 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1941
1942 #undef TARGET_COMPARE_VERSION_PRIORITY
1943 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1944
1945 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1946 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1947 rs6000_generate_version_dispatcher_body
1948
1949 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1950 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1951 rs6000_get_function_versions_dispatcher
1952
1953 #undef TARGET_OPTION_FUNCTION_VERSIONS
1954 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1955
1956 #undef TARGET_HARD_REGNO_NREGS
1957 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1958 #undef TARGET_HARD_REGNO_MODE_OK
1959 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1960
1961 #undef TARGET_MODES_TIEABLE_P
1962 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1963
1964 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1965 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1966 rs6000_hard_regno_call_part_clobbered
1967
1968 #undef TARGET_SLOW_UNALIGNED_ACCESS
1969 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1970
1971 #undef TARGET_CAN_CHANGE_MODE_CLASS
1972 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1973
1974 #undef TARGET_CONSTANT_ALIGNMENT
1975 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1976
1977 #undef TARGET_STARTING_FRAME_OFFSET
1978 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1979
1980 #if TARGET_ELF && RS6000_WEAK
1981 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1982 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1983 #endif
1984
1985 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1986 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1987
1988 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1989 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1990 \f
1991
1992 /* Processor table. */
1993 struct rs6000_ptt
1994 {
1995 const char *const name; /* Canonical processor name. */
1996 const enum processor_type processor; /* Processor type enum value. */
1997 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1998 };
1999
2000 static struct rs6000_ptt const processor_target_table[] =
2001 {
2002 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2003 #include "rs6000-cpus.def"
2004 #undef RS6000_CPU
2005 };
2006
2007 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2008 name is invalid. */
2009
2010 static int
2011 rs6000_cpu_name_lookup (const char *name)
2012 {
2013 size_t i;
2014
2015 if (name != NULL)
2016 {
2017 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2018 if (! strcmp (name, processor_target_table[i].name))
2019 return (int)i;
2020 }
2021
2022 return -1;
2023 }
2024
2025 \f
2026 /* Return number of consecutive hard regs needed starting at reg REGNO
2027 to hold something of mode MODE.
2028 This is ordinarily the length in words of a value of mode MODE
2029 but can be less for certain modes in special long registers.
2030
2031 POWER and PowerPC GPRs hold 32 bits worth;
2032 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2033
2034 static int
2035 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2036 {
2037 unsigned HOST_WIDE_INT reg_size;
2038
2039 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2040 128-bit floating point that can go in vector registers, which has VSX
2041 memory addressing. */
2042 if (FP_REGNO_P (regno))
2043 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2044 ? UNITS_PER_VSX_WORD
2045 : UNITS_PER_FP_WORD);
2046
2047 else if (ALTIVEC_REGNO_P (regno))
2048 reg_size = UNITS_PER_ALTIVEC_WORD;
2049
2050 else
2051 reg_size = UNITS_PER_WORD;
2052
2053 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2054 }
2055
2056 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2057 MODE. */
2058 static int
2059 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2060 {
2061 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2062
2063 if (COMPLEX_MODE_P (mode))
2064 mode = GET_MODE_INNER (mode);
2065
2066 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2067 register combinations, and use PTImode where we need to deal with quad
2068 word memory operations. Don't allow quad words in the argument or frame
2069 pointer registers, just registers 0..31. */
2070 if (mode == PTImode)
2071 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2072 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2073 && ((regno & 1) == 0));
2074
2075 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2076 implementations. Don't allow an item to be split between a FP register
2077 and an Altivec register. Allow TImode in all VSX registers if the user
2078 asked for it. */
2079 if (TARGET_VSX && VSX_REGNO_P (regno)
2080 && (VECTOR_MEM_VSX_P (mode)
2081 || FLOAT128_VECTOR_P (mode)
2082 || reg_addr[mode].scalar_in_vmx_p
2083 || mode == TImode
2084 || (TARGET_VADDUQM && mode == V1TImode)))
2085 {
2086 if (FP_REGNO_P (regno))
2087 return FP_REGNO_P (last_regno);
2088
2089 if (ALTIVEC_REGNO_P (regno))
2090 {
2091 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2092 return 0;
2093
2094 return ALTIVEC_REGNO_P (last_regno);
2095 }
2096 }
2097
2098 /* The GPRs can hold any mode, but values bigger than one register
2099 cannot go past R31. */
2100 if (INT_REGNO_P (regno))
2101 return INT_REGNO_P (last_regno);
2102
2103 /* The float registers (except for VSX vector modes) can only hold floating
2104 modes and DImode. */
2105 if (FP_REGNO_P (regno))
2106 {
2107 if (FLOAT128_VECTOR_P (mode))
2108 return false;
2109
2110 if (SCALAR_FLOAT_MODE_P (mode)
2111 && (mode != TDmode || (regno % 2) == 0)
2112 && FP_REGNO_P (last_regno))
2113 return 1;
2114
2115 if (GET_MODE_CLASS (mode) == MODE_INT)
2116 {
2117 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2118 return 1;
2119
2120 if (TARGET_P8_VECTOR && (mode == SImode))
2121 return 1;
2122
2123 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2124 return 1;
2125 }
2126
2127 return 0;
2128 }
2129
2130 /* The CR register can only hold CC modes. */
2131 if (CR_REGNO_P (regno))
2132 return GET_MODE_CLASS (mode) == MODE_CC;
2133
2134 if (CA_REGNO_P (regno))
2135 return mode == Pmode || mode == SImode;
2136
2137 /* AltiVec only in AldyVec registers. */
2138 if (ALTIVEC_REGNO_P (regno))
2139 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2140 || mode == V1TImode);
2141
2142 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2143 and it must be able to fit within the register set. */
2144
2145 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2146 }
2147
2148 /* Implement TARGET_HARD_REGNO_NREGS. */
2149
2150 static unsigned int
2151 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2152 {
2153 return rs6000_hard_regno_nregs[mode][regno];
2154 }
2155
2156 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2157
2158 static bool
2159 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2160 {
2161 return rs6000_hard_regno_mode_ok_p[mode][regno];
2162 }
2163
2164 /* Implement TARGET_MODES_TIEABLE_P.
2165
2166 PTImode cannot tie with other modes because PTImode is restricted to even
2167 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2168 57744).
2169
2170 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2171 128-bit floating point on VSX systems ties with other vectors. */
2172
2173 static bool
2174 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2175 {
2176 if (mode1 == PTImode)
2177 return mode2 == PTImode;
2178 if (mode2 == PTImode)
2179 return false;
2180
2181 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2182 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2183 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2184 return false;
2185
2186 if (SCALAR_FLOAT_MODE_P (mode1))
2187 return SCALAR_FLOAT_MODE_P (mode2);
2188 if (SCALAR_FLOAT_MODE_P (mode2))
2189 return false;
2190
2191 if (GET_MODE_CLASS (mode1) == MODE_CC)
2192 return GET_MODE_CLASS (mode2) == MODE_CC;
2193 if (GET_MODE_CLASS (mode2) == MODE_CC)
2194 return false;
2195
2196 return true;
2197 }
2198
2199 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2200
2201 static bool
2202 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2203 unsigned int regno, machine_mode mode)
2204 {
2205 if (TARGET_32BIT
2206 && TARGET_POWERPC64
2207 && GET_MODE_SIZE (mode) > 4
2208 && INT_REGNO_P (regno))
2209 return true;
2210
2211 if (TARGET_VSX
2212 && FP_REGNO_P (regno)
2213 && GET_MODE_SIZE (mode) > 8
2214 && !FLOAT128_2REG_P (mode))
2215 return true;
2216
2217 return false;
2218 }
2219
2220 /* Print interesting facts about registers. */
2221 static void
2222 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2223 {
2224 int r, m;
2225
2226 for (r = first_regno; r <= last_regno; ++r)
2227 {
2228 const char *comma = "";
2229 int len;
2230
2231 if (first_regno == last_regno)
2232 fprintf (stderr, "%s:\t", reg_name);
2233 else
2234 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2235
2236 len = 8;
2237 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2238 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2239 {
2240 if (len > 70)
2241 {
2242 fprintf (stderr, ",\n\t");
2243 len = 8;
2244 comma = "";
2245 }
2246
2247 if (rs6000_hard_regno_nregs[m][r] > 1)
2248 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2249 rs6000_hard_regno_nregs[m][r]);
2250 else
2251 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2252
2253 comma = ", ";
2254 }
2255
2256 if (call_used_regs[r])
2257 {
2258 if (len > 70)
2259 {
2260 fprintf (stderr, ",\n\t");
2261 len = 8;
2262 comma = "";
2263 }
2264
2265 len += fprintf (stderr, "%s%s", comma, "call-used");
2266 comma = ", ";
2267 }
2268
2269 if (fixed_regs[r])
2270 {
2271 if (len > 70)
2272 {
2273 fprintf (stderr, ",\n\t");
2274 len = 8;
2275 comma = "";
2276 }
2277
2278 len += fprintf (stderr, "%s%s", comma, "fixed");
2279 comma = ", ";
2280 }
2281
2282 if (len > 70)
2283 {
2284 fprintf (stderr, ",\n\t");
2285 comma = "";
2286 }
2287
2288 len += fprintf (stderr, "%sreg-class = %s", comma,
2289 reg_class_names[(int)rs6000_regno_regclass[r]]);
2290 comma = ", ";
2291
2292 if (len > 70)
2293 {
2294 fprintf (stderr, ",\n\t");
2295 comma = "";
2296 }
2297
2298 fprintf (stderr, "%sregno = %d\n", comma, r);
2299 }
2300 }
2301
2302 static const char *
2303 rs6000_debug_vector_unit (enum rs6000_vector v)
2304 {
2305 const char *ret;
2306
2307 switch (v)
2308 {
2309 case VECTOR_NONE: ret = "none"; break;
2310 case VECTOR_ALTIVEC: ret = "altivec"; break;
2311 case VECTOR_VSX: ret = "vsx"; break;
2312 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2313 default: ret = "unknown"; break;
2314 }
2315
2316 return ret;
2317 }
2318
2319 /* Inner function printing just the address mask for a particular reload
2320 register class. */
2321 DEBUG_FUNCTION char *
2322 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2323 {
2324 static char ret[8];
2325 char *p = ret;
2326
2327 if ((mask & RELOAD_REG_VALID) != 0)
2328 *p++ = 'v';
2329 else if (keep_spaces)
2330 *p++ = ' ';
2331
2332 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2333 *p++ = 'm';
2334 else if (keep_spaces)
2335 *p++ = ' ';
2336
2337 if ((mask & RELOAD_REG_INDEXED) != 0)
2338 *p++ = 'i';
2339 else if (keep_spaces)
2340 *p++ = ' ';
2341
2342 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2343 *p++ = 'O';
2344 else if ((mask & RELOAD_REG_OFFSET) != 0)
2345 *p++ = 'o';
2346 else if (keep_spaces)
2347 *p++ = ' ';
2348
2349 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2350 *p++ = '+';
2351 else if (keep_spaces)
2352 *p++ = ' ';
2353
2354 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2355 *p++ = '+';
2356 else if (keep_spaces)
2357 *p++ = ' ';
2358
2359 if ((mask & RELOAD_REG_AND_M16) != 0)
2360 *p++ = '&';
2361 else if (keep_spaces)
2362 *p++ = ' ';
2363
2364 *p = '\0';
2365
2366 return ret;
2367 }
2368
2369 /* Print the address masks in a human readble fashion. */
2370 DEBUG_FUNCTION void
2371 rs6000_debug_print_mode (ssize_t m)
2372 {
2373 ssize_t rc;
2374 int spaces = 0;
2375
2376 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2377 for (rc = 0; rc < N_RELOAD_REG; rc++)
2378 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2379 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2380
2381 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2382 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2383 {
2384 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2385 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2386 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2387 spaces = 0;
2388 }
2389 else
2390 spaces += sizeof (" Reload=sl") - 1;
2391
2392 if (reg_addr[m].scalar_in_vmx_p)
2393 {
2394 fprintf (stderr, "%*s Upper=y", spaces, "");
2395 spaces = 0;
2396 }
2397 else
2398 spaces += sizeof (" Upper=y") - 1;
2399
2400 if (rs6000_vector_unit[m] != VECTOR_NONE
2401 || rs6000_vector_mem[m] != VECTOR_NONE)
2402 {
2403 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2404 spaces, "",
2405 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2406 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2407 }
2408
2409 fputs ("\n", stderr);
2410 }
2411
2412 #define DEBUG_FMT_ID "%-32s= "
2413 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2414 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2415 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2416
2417 /* Print various interesting information with -mdebug=reg. */
2418 static void
2419 rs6000_debug_reg_global (void)
2420 {
2421 static const char *const tf[2] = { "false", "true" };
2422 const char *nl = (const char *)0;
2423 int m;
2424 size_t m1, m2, v;
2425 char costly_num[20];
2426 char nop_num[20];
2427 char flags_buffer[40];
2428 const char *costly_str;
2429 const char *nop_str;
2430 const char *trace_str;
2431 const char *abi_str;
2432 const char *cmodel_str;
2433 struct cl_target_option cl_opts;
2434
2435 /* Modes we want tieable information on. */
2436 static const machine_mode print_tieable_modes[] = {
2437 QImode,
2438 HImode,
2439 SImode,
2440 DImode,
2441 TImode,
2442 PTImode,
2443 SFmode,
2444 DFmode,
2445 TFmode,
2446 IFmode,
2447 KFmode,
2448 SDmode,
2449 DDmode,
2450 TDmode,
2451 V16QImode,
2452 V8HImode,
2453 V4SImode,
2454 V2DImode,
2455 V1TImode,
2456 V32QImode,
2457 V16HImode,
2458 V8SImode,
2459 V4DImode,
2460 V2TImode,
2461 V4SFmode,
2462 V2DFmode,
2463 V8SFmode,
2464 V4DFmode,
2465 CCmode,
2466 CCUNSmode,
2467 CCEQmode,
2468 };
2469
2470 /* Virtual regs we are interested in. */
2471 const static struct {
2472 int regno; /* register number. */
2473 const char *name; /* register name. */
2474 } virtual_regs[] = {
2475 { STACK_POINTER_REGNUM, "stack pointer:" },
2476 { TOC_REGNUM, "toc: " },
2477 { STATIC_CHAIN_REGNUM, "static chain: " },
2478 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2479 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2480 { ARG_POINTER_REGNUM, "arg pointer: " },
2481 { FRAME_POINTER_REGNUM, "frame pointer:" },
2482 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2483 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2484 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2485 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2486 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2487 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2488 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2489 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2490 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2491 };
2492
2493 fputs ("\nHard register information:\n", stderr);
2494 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2495 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2496 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2497 LAST_ALTIVEC_REGNO,
2498 "vs");
2499 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2500 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2501 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2502 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2503 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2504 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2505
2506 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2507 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2508 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2509
2510 fprintf (stderr,
2511 "\n"
2512 "d reg_class = %s\n"
2513 "f reg_class = %s\n"
2514 "v reg_class = %s\n"
2515 "wa reg_class = %s\n"
2516 "wb reg_class = %s\n"
2517 "wd reg_class = %s\n"
2518 "we reg_class = %s\n"
2519 "wf reg_class = %s\n"
2520 "wg reg_class = %s\n"
2521 "wh reg_class = %s\n"
2522 "wi reg_class = %s\n"
2523 "wj reg_class = %s\n"
2524 "wk reg_class = %s\n"
2525 "wl reg_class = %s\n"
2526 "wm reg_class = %s\n"
2527 "wo reg_class = %s\n"
2528 "wp reg_class = %s\n"
2529 "wq reg_class = %s\n"
2530 "wr reg_class = %s\n"
2531 "ws reg_class = %s\n"
2532 "wt reg_class = %s\n"
2533 "wu reg_class = %s\n"
2534 "wv reg_class = %s\n"
2535 "ww reg_class = %s\n"
2536 "wx reg_class = %s\n"
2537 "wy reg_class = %s\n"
2538 "wz reg_class = %s\n"
2539 "wA reg_class = %s\n"
2540 "wH reg_class = %s\n"
2541 "wI reg_class = %s\n"
2542 "wJ reg_class = %s\n"
2543 "wK reg_class = %s\n"
2544 "\n",
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2573 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2574 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2575 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2576 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2577
2578 nl = "\n";
2579 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2580 rs6000_debug_print_mode (m);
2581
2582 fputs ("\n", stderr);
2583
2584 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2585 {
2586 machine_mode mode1 = print_tieable_modes[m1];
2587 bool first_time = true;
2588
2589 nl = (const char *)0;
2590 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2591 {
2592 machine_mode mode2 = print_tieable_modes[m2];
2593 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2594 {
2595 if (first_time)
2596 {
2597 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2598 nl = "\n";
2599 first_time = false;
2600 }
2601
2602 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2603 }
2604 }
2605
2606 if (!first_time)
2607 fputs ("\n", stderr);
2608 }
2609
2610 if (nl)
2611 fputs (nl, stderr);
2612
2613 if (rs6000_recip_control)
2614 {
2615 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2616
2617 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2618 if (rs6000_recip_bits[m])
2619 {
2620 fprintf (stderr,
2621 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2622 GET_MODE_NAME (m),
2623 (RS6000_RECIP_AUTO_RE_P (m)
2624 ? "auto"
2625 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2626 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2627 ? "auto"
2628 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2629 }
2630
2631 fputs ("\n", stderr);
2632 }
2633
2634 if (rs6000_cpu_index >= 0)
2635 {
2636 const char *name = processor_target_table[rs6000_cpu_index].name;
2637 HOST_WIDE_INT flags
2638 = processor_target_table[rs6000_cpu_index].target_enable;
2639
2640 sprintf (flags_buffer, "-mcpu=%s flags", name);
2641 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2642 }
2643 else
2644 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2645
2646 if (rs6000_tune_index >= 0)
2647 {
2648 const char *name = processor_target_table[rs6000_tune_index].name;
2649 HOST_WIDE_INT flags
2650 = processor_target_table[rs6000_tune_index].target_enable;
2651
2652 sprintf (flags_buffer, "-mtune=%s flags", name);
2653 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2654 }
2655 else
2656 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2657
2658 cl_target_option_save (&cl_opts, &global_options);
2659 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2660 rs6000_isa_flags);
2661
2662 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2663 rs6000_isa_flags_explicit);
2664
2665 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2666 rs6000_builtin_mask);
2667
2668 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2669
2670 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2671 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2672
2673 switch (rs6000_sched_costly_dep)
2674 {
2675 case max_dep_latency:
2676 costly_str = "max_dep_latency";
2677 break;
2678
2679 case no_dep_costly:
2680 costly_str = "no_dep_costly";
2681 break;
2682
2683 case all_deps_costly:
2684 costly_str = "all_deps_costly";
2685 break;
2686
2687 case true_store_to_load_dep_costly:
2688 costly_str = "true_store_to_load_dep_costly";
2689 break;
2690
2691 case store_to_load_dep_costly:
2692 costly_str = "store_to_load_dep_costly";
2693 break;
2694
2695 default:
2696 costly_str = costly_num;
2697 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2698 break;
2699 }
2700
2701 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2702
2703 switch (rs6000_sched_insert_nops)
2704 {
2705 case sched_finish_regroup_exact:
2706 nop_str = "sched_finish_regroup_exact";
2707 break;
2708
2709 case sched_finish_pad_groups:
2710 nop_str = "sched_finish_pad_groups";
2711 break;
2712
2713 case sched_finish_none:
2714 nop_str = "sched_finish_none";
2715 break;
2716
2717 default:
2718 nop_str = nop_num;
2719 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2720 break;
2721 }
2722
2723 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2724
2725 switch (rs6000_sdata)
2726 {
2727 default:
2728 case SDATA_NONE:
2729 break;
2730
2731 case SDATA_DATA:
2732 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2733 break;
2734
2735 case SDATA_SYSV:
2736 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2737 break;
2738
2739 case SDATA_EABI:
2740 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2741 break;
2742
2743 }
2744
2745 switch (rs6000_traceback)
2746 {
2747 case traceback_default: trace_str = "default"; break;
2748 case traceback_none: trace_str = "none"; break;
2749 case traceback_part: trace_str = "part"; break;
2750 case traceback_full: trace_str = "full"; break;
2751 default: trace_str = "unknown"; break;
2752 }
2753
2754 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2755
2756 switch (rs6000_current_cmodel)
2757 {
2758 case CMODEL_SMALL: cmodel_str = "small"; break;
2759 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2760 case CMODEL_LARGE: cmodel_str = "large"; break;
2761 default: cmodel_str = "unknown"; break;
2762 }
2763
2764 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2765
2766 switch (rs6000_current_abi)
2767 {
2768 case ABI_NONE: abi_str = "none"; break;
2769 case ABI_AIX: abi_str = "aix"; break;
2770 case ABI_ELFv2: abi_str = "ELFv2"; break;
2771 case ABI_V4: abi_str = "V4"; break;
2772 case ABI_DARWIN: abi_str = "darwin"; break;
2773 default: abi_str = "unknown"; break;
2774 }
2775
2776 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2777
2778 if (rs6000_altivec_abi)
2779 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2780
2781 if (rs6000_darwin64_abi)
2782 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2783
2784 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2785 (TARGET_SOFT_FLOAT ? "true" : "false"));
2786
2787 if (TARGET_LINK_STACK)
2788 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2789
2790 if (TARGET_P8_FUSION)
2791 {
2792 char options[80];
2793
2794 strcpy (options, "power8");
2795 if (TARGET_P8_FUSION_SIGN)
2796 strcat (options, ", sign");
2797
2798 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2799 }
2800
2801 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2802 TARGET_SECURE_PLT ? "secure" : "bss");
2803 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2804 aix_struct_return ? "aix" : "sysv");
2805 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2806 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2807 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2808 tf[!!rs6000_align_branch_targets]);
2809 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2810 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2811 rs6000_long_double_type_size);
2812 if (rs6000_long_double_type_size > 64)
2813 {
2814 fprintf (stderr, DEBUG_FMT_S, "long double type",
2815 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2816 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2817 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2818 }
2819 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2820 (int)rs6000_sched_restricted_insns_priority);
2821 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2822 (int)END_BUILTINS);
2823 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2824 (int)RS6000_BUILTIN_COUNT);
2825
2826 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2827 (int)TARGET_FLOAT128_ENABLE_TYPE);
2828
2829 if (TARGET_VSX)
2830 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2831 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2832
2833 if (TARGET_DIRECT_MOVE_128)
2834 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2835 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2836 }
2837
2838 \f
2839 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2840 legitimate address support to figure out the appropriate addressing to
2841 use. */
2842
2843 static void
2844 rs6000_setup_reg_addr_masks (void)
2845 {
2846 ssize_t rc, reg, m, nregs;
2847 addr_mask_type any_addr_mask, addr_mask;
2848
2849 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2850 {
2851 machine_mode m2 = (machine_mode) m;
2852 bool complex_p = false;
2853 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2854 size_t msize;
2855
2856 if (COMPLEX_MODE_P (m2))
2857 {
2858 complex_p = true;
2859 m2 = GET_MODE_INNER (m2);
2860 }
2861
2862 msize = GET_MODE_SIZE (m2);
2863
2864 /* SDmode is special in that we want to access it only via REG+REG
2865 addressing on power7 and above, since we want to use the LFIWZX and
2866 STFIWZX instructions to load it. */
2867 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2868
2869 any_addr_mask = 0;
2870 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2871 {
2872 addr_mask = 0;
2873 reg = reload_reg_map[rc].reg;
2874
2875 /* Can mode values go in the GPR/FPR/Altivec registers? */
2876 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2877 {
2878 bool small_int_vsx_p = (small_int_p
2879 && (rc == RELOAD_REG_FPR
2880 || rc == RELOAD_REG_VMX));
2881
2882 nregs = rs6000_hard_regno_nregs[m][reg];
2883 addr_mask |= RELOAD_REG_VALID;
2884
2885 /* Indicate if the mode takes more than 1 physical register. If
2886 it takes a single register, indicate it can do REG+REG
2887 addressing. Small integers in VSX registers can only do
2888 REG+REG addressing. */
2889 if (small_int_vsx_p)
2890 addr_mask |= RELOAD_REG_INDEXED;
2891 else if (nregs > 1 || m == BLKmode || complex_p)
2892 addr_mask |= RELOAD_REG_MULTIPLE;
2893 else
2894 addr_mask |= RELOAD_REG_INDEXED;
2895
2896 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2897 addressing. If we allow scalars into Altivec registers,
2898 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2899
2900 For VSX systems, we don't allow update addressing for
2901 DFmode/SFmode if those registers can go in both the
2902 traditional floating point registers and Altivec registers.
2903 The load/store instructions for the Altivec registers do not
2904 have update forms. If we allowed update addressing, it seems
2905 to break IV-OPT code using floating point if the index type is
2906 int instead of long (PR target/81550 and target/84042). */
2907
2908 if (TARGET_UPDATE
2909 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2910 && msize <= 8
2911 && !VECTOR_MODE_P (m2)
2912 && !FLOAT128_VECTOR_P (m2)
2913 && !complex_p
2914 && (m != E_DFmode || !TARGET_VSX)
2915 && (m != E_SFmode || !TARGET_P8_VECTOR)
2916 && !small_int_vsx_p)
2917 {
2918 addr_mask |= RELOAD_REG_PRE_INCDEC;
2919
2920 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2921 we don't allow PRE_MODIFY for some multi-register
2922 operations. */
2923 switch (m)
2924 {
2925 default:
2926 addr_mask |= RELOAD_REG_PRE_MODIFY;
2927 break;
2928
2929 case E_DImode:
2930 if (TARGET_POWERPC64)
2931 addr_mask |= RELOAD_REG_PRE_MODIFY;
2932 break;
2933
2934 case E_DFmode:
2935 case E_DDmode:
2936 if (TARGET_HARD_FLOAT)
2937 addr_mask |= RELOAD_REG_PRE_MODIFY;
2938 break;
2939 }
2940 }
2941 }
2942
2943 /* GPR and FPR registers can do REG+OFFSET addressing, except
2944 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2945 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2946 if ((addr_mask != 0) && !indexed_only_p
2947 && msize <= 8
2948 && (rc == RELOAD_REG_GPR
2949 || ((msize == 8 || m2 == SFmode)
2950 && (rc == RELOAD_REG_FPR
2951 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2952 addr_mask |= RELOAD_REG_OFFSET;
2953
2954 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2955 instructions are enabled. The offset for 128-bit VSX registers is
2956 only 12-bits. While GPRs can handle the full offset range, VSX
2957 registers can only handle the restricted range. */
2958 else if ((addr_mask != 0) && !indexed_only_p
2959 && msize == 16 && TARGET_P9_VECTOR
2960 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2961 || (m2 == TImode && TARGET_VSX)))
2962 {
2963 addr_mask |= RELOAD_REG_OFFSET;
2964 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2965 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2966 }
2967
2968 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2969 addressing on 128-bit types. */
2970 if (rc == RELOAD_REG_VMX && msize == 16
2971 && (addr_mask & RELOAD_REG_VALID) != 0)
2972 addr_mask |= RELOAD_REG_AND_M16;
2973
2974 reg_addr[m].addr_mask[rc] = addr_mask;
2975 any_addr_mask |= addr_mask;
2976 }
2977
2978 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2979 }
2980 }
2981
2982 \f
2983 /* Initialize the various global tables that are based on register size. */
2984 static void
2985 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2986 {
2987 ssize_t r, m, c;
2988 int align64;
2989 int align32;
2990
2991 /* Precalculate REGNO_REG_CLASS. */
2992 rs6000_regno_regclass[0] = GENERAL_REGS;
2993 for (r = 1; r < 32; ++r)
2994 rs6000_regno_regclass[r] = BASE_REGS;
2995
2996 for (r = 32; r < 64; ++r)
2997 rs6000_regno_regclass[r] = FLOAT_REGS;
2998
2999 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
3000 rs6000_regno_regclass[r] = NO_REGS;
3001
3002 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3003 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3004
3005 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3006 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3007 rs6000_regno_regclass[r] = CR_REGS;
3008
3009 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3010 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3011 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3012 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3013 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3014 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3015 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3016 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3017 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3018 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3019
3020 /* Precalculate register class to simpler reload register class. We don't
3021 need all of the register classes that are combinations of different
3022 classes, just the simple ones that have constraint letters. */
3023 for (c = 0; c < N_REG_CLASSES; c++)
3024 reg_class_to_reg_type[c] = NO_REG_TYPE;
3025
3026 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3029 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3031 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3032 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3033 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3034 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3035 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3036
3037 if (TARGET_VSX)
3038 {
3039 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3040 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3041 }
3042 else
3043 {
3044 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3045 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3046 }
3047
3048 /* Precalculate the valid memory formats as well as the vector information,
3049 this must be set up before the rs6000_hard_regno_nregs_internal calls
3050 below. */
3051 gcc_assert ((int)VECTOR_NONE == 0);
3052 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3053 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem));
3054
3055 gcc_assert ((int)CODE_FOR_nothing == 0);
3056 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3057
3058 gcc_assert ((int)NO_REGS == 0);
3059 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3060
3061 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3062 believes it can use native alignment or still uses 128-bit alignment. */
3063 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3064 {
3065 align64 = 64;
3066 align32 = 32;
3067 }
3068 else
3069 {
3070 align64 = 128;
3071 align32 = 128;
3072 }
3073
3074 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3075 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3076 if (TARGET_FLOAT128_TYPE)
3077 {
3078 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3079 rs6000_vector_align[KFmode] = 128;
3080
3081 if (FLOAT128_IEEE_P (TFmode))
3082 {
3083 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3084 rs6000_vector_align[TFmode] = 128;
3085 }
3086 }
3087
3088 /* V2DF mode, VSX only. */
3089 if (TARGET_VSX)
3090 {
3091 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3092 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3093 rs6000_vector_align[V2DFmode] = align64;
3094 }
3095
3096 /* V4SF mode, either VSX or Altivec. */
3097 if (TARGET_VSX)
3098 {
3099 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3100 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3101 rs6000_vector_align[V4SFmode] = align32;
3102 }
3103 else if (TARGET_ALTIVEC)
3104 {
3105 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3106 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3107 rs6000_vector_align[V4SFmode] = align32;
3108 }
3109
3110 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3111 and stores. */
3112 if (TARGET_ALTIVEC)
3113 {
3114 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3115 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3116 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3117 rs6000_vector_align[V4SImode] = align32;
3118 rs6000_vector_align[V8HImode] = align32;
3119 rs6000_vector_align[V16QImode] = align32;
3120
3121 if (TARGET_VSX)
3122 {
3123 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3124 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3125 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3126 }
3127 else
3128 {
3129 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3130 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3131 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3132 }
3133 }
3134
3135 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3136 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3137 if (TARGET_VSX)
3138 {
3139 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3140 rs6000_vector_unit[V2DImode]
3141 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3142 rs6000_vector_align[V2DImode] = align64;
3143
3144 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3145 rs6000_vector_unit[V1TImode]
3146 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3147 rs6000_vector_align[V1TImode] = 128;
3148 }
3149
3150 /* DFmode, see if we want to use the VSX unit. Memory is handled
3151 differently, so don't set rs6000_vector_mem. */
3152 if (TARGET_VSX)
3153 {
3154 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3155 rs6000_vector_align[DFmode] = 64;
3156 }
3157
3158 /* SFmode, see if we want to use the VSX unit. */
3159 if (TARGET_P8_VECTOR)
3160 {
3161 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3162 rs6000_vector_align[SFmode] = 32;
3163 }
3164
3165 /* Allow TImode in VSX register and set the VSX memory macros. */
3166 if (TARGET_VSX)
3167 {
3168 rs6000_vector_mem[TImode] = VECTOR_VSX;
3169 rs6000_vector_align[TImode] = align64;
3170 }
3171
3172 /* Register class constraints for the constraints that depend on compile
3173 switches. When the VSX code was added, different constraints were added
3174 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3175 of the VSX registers are used. The register classes for scalar floating
3176 point types is set, based on whether we allow that type into the upper
3177 (Altivec) registers. GCC has register classes to target the Altivec
3178 registers for load/store operations, to select using a VSX memory
3179 operation instead of the traditional floating point operation. The
3180 constraints are:
3181
3182 d - Register class to use with traditional DFmode instructions.
3183 f - Register class to use with traditional SFmode instructions.
3184 v - Altivec register.
3185 wa - Any VSX register.
3186 wc - Reserved to represent individual CR bits (used in LLVM).
3187 wd - Preferred register class for V2DFmode.
3188 wf - Preferred register class for V4SFmode.
3189 wg - Float register for power6x move insns.
3190 wh - FP register for direct move instructions.
3191 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3192 wj - FP or VSX register to hold 64-bit integers for direct moves.
3193 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3194 wl - Float register if we can do 32-bit signed int loads.
3195 wm - VSX register for ISA 2.07 direct move operations.
3196 wn - always NO_REGS.
3197 wr - GPR if 64-bit mode is permitted.
3198 ws - Register class to do ISA 2.06 DF operations.
3199 wt - VSX register for TImode in VSX registers.
3200 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3201 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3202 ww - Register class to do SF conversions in with VSX operations.
3203 wx - Float register if we can do 32-bit int stores.
3204 wy - Register class to do ISA 2.07 SF operations.
3205 wz - Float register if we can do 32-bit unsigned int loads.
3206 wH - Altivec register if SImode is allowed in VSX registers.
3207 wI - Float register if SImode is allowed in VSX registers.
3208 wJ - Float register if QImode/HImode are allowed in VSX registers.
3209 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3210
3211 if (TARGET_HARD_FLOAT)
3212 {
3213 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3214 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3215 }
3216
3217 if (TARGET_VSX)
3218 {
3219 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3220 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3222 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3223 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3224 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3225 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3226 }
3227
3228 /* Add conditional constraints based on various options, to allow us to
3229 collapse multiple insn patterns. */
3230 if (TARGET_ALTIVEC)
3231 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3232
3233 if (TARGET_MFPGPR) /* DFmode */
3234 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3235
3236 if (TARGET_LFIWAX)
3237 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3238
3239 if (TARGET_DIRECT_MOVE)
3240 {
3241 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3242 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3243 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3244 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3245 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3246 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3247 }
3248
3249 if (TARGET_POWERPC64)
3250 {
3251 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3252 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3253 }
3254
3255 if (TARGET_P8_VECTOR) /* SFmode */
3256 {
3257 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3258 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3259 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3260 }
3261 else if (TARGET_VSX)
3262 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3263
3264 if (TARGET_STFIWX)
3265 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3266
3267 if (TARGET_LFIWZX)
3268 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3269
3270 if (TARGET_FLOAT128_TYPE)
3271 {
3272 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3273 if (FLOAT128_IEEE_P (TFmode))
3274 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3275 }
3276
3277 if (TARGET_P9_VECTOR)
3278 {
3279 /* Support for new D-form instructions. */
3280 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3281
3282 /* Support for ISA 3.0 (power9) vectors. */
3283 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3284 }
3285
3286 /* Support for new direct moves (ISA 3.0 + 64bit). */
3287 if (TARGET_DIRECT_MOVE_128)
3288 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3289
3290 /* Support small integers in VSX registers. */
3291 if (TARGET_P8_VECTOR)
3292 {
3293 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3294 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3295 if (TARGET_P9_VECTOR)
3296 {
3297 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3298 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3299 }
3300 }
3301
3302 /* Set up the reload helper and direct move functions. */
3303 if (TARGET_VSX || TARGET_ALTIVEC)
3304 {
3305 if (TARGET_64BIT)
3306 {
3307 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3308 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3309 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3310 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3311 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3312 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3313 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3314 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3315 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3316 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3317 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3318 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3319 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3320 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3321 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3322 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3323 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3324 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3325 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3326 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3327
3328 if (FLOAT128_VECTOR_P (KFmode))
3329 {
3330 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3331 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3332 }
3333
3334 if (FLOAT128_VECTOR_P (TFmode))
3335 {
3336 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3337 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3338 }
3339
3340 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3341 available. */
3342 if (TARGET_NO_SDMODE_STACK)
3343 {
3344 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3345 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3346 }
3347
3348 if (TARGET_VSX)
3349 {
3350 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3351 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3352 }
3353
3354 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3355 {
3356 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3357 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3358 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3359 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3360 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3361 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3362 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3363 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3364 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3365
3366 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3367 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3368 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3369 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3370 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3371 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3372 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3373 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3374 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3375
3376 if (FLOAT128_VECTOR_P (KFmode))
3377 {
3378 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3379 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3380 }
3381
3382 if (FLOAT128_VECTOR_P (TFmode))
3383 {
3384 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3385 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3386 }
3387 }
3388 }
3389 else
3390 {
3391 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3392 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3393 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3394 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3395 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3396 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3397 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3398 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3399 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3400 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3401 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3402 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3403 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3404 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3405 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3406 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3407 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3408 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3409 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3410 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3411
3412 if (FLOAT128_VECTOR_P (KFmode))
3413 {
3414 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3415 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3416 }
3417
3418 if (FLOAT128_IEEE_P (TFmode))
3419 {
3420 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3421 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3422 }
3423
3424 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3425 available. */
3426 if (TARGET_NO_SDMODE_STACK)
3427 {
3428 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3429 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3430 }
3431
3432 if (TARGET_VSX)
3433 {
3434 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3435 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3436 }
3437
3438 if (TARGET_DIRECT_MOVE)
3439 {
3440 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3441 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3442 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3443 }
3444 }
3445
3446 reg_addr[DFmode].scalar_in_vmx_p = true;
3447 reg_addr[DImode].scalar_in_vmx_p = true;
3448
3449 if (TARGET_P8_VECTOR)
3450 {
3451 reg_addr[SFmode].scalar_in_vmx_p = true;
3452 reg_addr[SImode].scalar_in_vmx_p = true;
3453
3454 if (TARGET_P9_VECTOR)
3455 {
3456 reg_addr[HImode].scalar_in_vmx_p = true;
3457 reg_addr[QImode].scalar_in_vmx_p = true;
3458 }
3459 }
3460 }
3461
3462 /* Precalculate HARD_REGNO_NREGS. */
3463 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3464 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3465 rs6000_hard_regno_nregs[m][r]
3466 = rs6000_hard_regno_nregs_internal (r, (machine_mode) m);
3467
3468 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3469 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3470 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3471 rs6000_hard_regno_mode_ok_p[m][r]
3472 = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m);
3473
3474 /* Precalculate CLASS_MAX_NREGS sizes. */
3475 for (c = 0; c < LIM_REG_CLASSES; ++c)
3476 {
3477 int reg_size;
3478
3479 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3480 reg_size = UNITS_PER_VSX_WORD;
3481
3482 else if (c == ALTIVEC_REGS)
3483 reg_size = UNITS_PER_ALTIVEC_WORD;
3484
3485 else if (c == FLOAT_REGS)
3486 reg_size = UNITS_PER_FP_WORD;
3487
3488 else
3489 reg_size = UNITS_PER_WORD;
3490
3491 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3492 {
3493 machine_mode m2 = (machine_mode)m;
3494 int reg_size2 = reg_size;
3495
3496 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3497 in VSX. */
3498 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3499 reg_size2 = UNITS_PER_FP_WORD;
3500
3501 rs6000_class_max_nregs[m][c]
3502 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3503 }
3504 }
3505
3506 /* Calculate which modes to automatically generate code to use a the
3507 reciprocal divide and square root instructions. In the future, possibly
3508 automatically generate the instructions even if the user did not specify
3509 -mrecip. The older machines double precision reciprocal sqrt estimate is
3510 not accurate enough. */
3511 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3512 if (TARGET_FRES)
3513 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3514 if (TARGET_FRE)
3515 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3516 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3517 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3518 if (VECTOR_UNIT_VSX_P (V2DFmode))
3519 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3520
3521 if (TARGET_FRSQRTES)
3522 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3523 if (TARGET_FRSQRTE)
3524 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3525 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3526 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3527 if (VECTOR_UNIT_VSX_P (V2DFmode))
3528 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3529
3530 if (rs6000_recip_control)
3531 {
3532 if (!flag_finite_math_only)
3533 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3534 "-ffast-math");
3535 if (flag_trapping_math)
3536 warning (0, "%qs requires %qs or %qs", "-mrecip",
3537 "-fno-trapping-math", "-ffast-math");
3538 if (!flag_reciprocal_math)
3539 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3540 "-ffast-math");
3541 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3542 {
3543 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3544 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3545 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546
3547 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3548 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3549 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3550
3551 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3552 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3553 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3554
3555 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3556 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3557 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3558
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3560 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3561 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3562
3563 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3564 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3565 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3566
3567 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3568 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3569 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3570
3571 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3572 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3573 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3574 }
3575 }
3576
3577 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3578 legitimate address support to figure out the appropriate addressing to
3579 use. */
3580 rs6000_setup_reg_addr_masks ();
3581
3582 if (global_init_p || TARGET_DEBUG_TARGET)
3583 {
3584 if (TARGET_DEBUG_REG)
3585 rs6000_debug_reg_global ();
3586
3587 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3588 fprintf (stderr,
3589 "SImode variable mult cost = %d\n"
3590 "SImode constant mult cost = %d\n"
3591 "SImode short constant mult cost = %d\n"
3592 "DImode multipliciation cost = %d\n"
3593 "SImode division cost = %d\n"
3594 "DImode division cost = %d\n"
3595 "Simple fp operation cost = %d\n"
3596 "DFmode multiplication cost = %d\n"
3597 "SFmode division cost = %d\n"
3598 "DFmode division cost = %d\n"
3599 "cache line size = %d\n"
3600 "l1 cache size = %d\n"
3601 "l2 cache size = %d\n"
3602 "simultaneous prefetches = %d\n"
3603 "\n",
3604 rs6000_cost->mulsi,
3605 rs6000_cost->mulsi_const,
3606 rs6000_cost->mulsi_const9,
3607 rs6000_cost->muldi,
3608 rs6000_cost->divsi,
3609 rs6000_cost->divdi,
3610 rs6000_cost->fp,
3611 rs6000_cost->dmul,
3612 rs6000_cost->sdiv,
3613 rs6000_cost->ddiv,
3614 rs6000_cost->cache_line_size,
3615 rs6000_cost->l1_cache_size,
3616 rs6000_cost->l2_cache_size,
3617 rs6000_cost->simultaneous_prefetches);
3618 }
3619 }
3620
3621 #if TARGET_MACHO
3622 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3623
3624 static void
3625 darwin_rs6000_override_options (void)
3626 {
3627 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3628 off. */
3629 rs6000_altivec_abi = 1;
3630 TARGET_ALTIVEC_VRSAVE = 1;
3631 rs6000_current_abi = ABI_DARWIN;
3632
3633 if (DEFAULT_ABI == ABI_DARWIN
3634 && TARGET_64BIT)
3635 darwin_one_byte_bool = 1;
3636
3637 if (TARGET_64BIT && ! TARGET_POWERPC64)
3638 {
3639 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3640 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3641 }
3642 if (flag_mkernel)
3643 {
3644 rs6000_default_long_calls = 1;
3645 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3646 }
3647
3648 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3649 Altivec. */
3650 if (!flag_mkernel && !flag_apple_kext
3651 && TARGET_64BIT
3652 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3653 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3654
3655 /* Unless the user (not the configurer) has explicitly overridden
3656 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3657 G4 unless targeting the kernel. */
3658 if (!flag_mkernel
3659 && !flag_apple_kext
3660 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3661 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3662 && ! global_options_set.x_rs6000_cpu_index)
3663 {
3664 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3665 }
3666 }
3667 #endif
3668
3669 /* If not otherwise specified by a target, make 'long double' equivalent to
3670 'double'. */
3671
3672 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3673 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3674 #endif
3675
3676 /* Return the builtin mask of the various options used that could affect which
3677 builtins were used. In the past we used target_flags, but we've run out of
3678 bits, and some options are no longer in target_flags. */
3679
3680 HOST_WIDE_INT
3681 rs6000_builtin_mask_calculate (void)
3682 {
3683 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3684 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3685 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3686 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3687 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3688 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3689 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3690 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3691 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3692 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3693 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3694 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3695 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3696 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3697 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3698 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3699 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3700 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3701 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3702 | ((TARGET_LONG_DOUBLE_128
3703 && TARGET_HARD_FLOAT
3704 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3705 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3706 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3707 }
3708
3709 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3710 to clobber the XER[CA] bit because clobbering that bit without telling
3711 the compiler worked just fine with versions of GCC before GCC 5, and
3712 breaking a lot of older code in ways that are hard to track down is
3713 not such a great idea. */
3714
3715 static rtx_insn *
3716 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3717 vec<const char *> &/*constraints*/,
3718 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3719 {
3720 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3721 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3722 return NULL;
3723 }
3724
3725 /* Override command line options.
3726
3727 Combine build-specific configuration information with options
3728 specified on the command line to set various state variables which
3729 influence code generation, optimization, and expansion of built-in
3730 functions. Assure that command-line configuration preferences are
3731 compatible with each other and with the build configuration; issue
3732 warnings while adjusting configuration or error messages while
3733 rejecting configuration.
3734
3735 Upon entry to this function:
3736
3737 This function is called once at the beginning of
3738 compilation, and then again at the start and end of compiling
3739 each section of code that has a different configuration, as
3740 indicated, for example, by adding the
3741
3742 __attribute__((__target__("cpu=power9")))
3743
3744 qualifier to a function definition or, for example, by bracketing
3745 code between
3746
3747 #pragma GCC target("altivec")
3748
3749 and
3750
3751 #pragma GCC reset_options
3752
3753 directives. Parameter global_init_p is true for the initial
3754 invocation, which initializes global variables, and false for all
3755 subsequent invocations.
3756
3757
3758 Various global state information is assumed to be valid. This
3759 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3760 default CPU specified at build configure time, TARGET_DEFAULT,
3761 representing the default set of option flags for the default
3762 target, and global_options_set.x_rs6000_isa_flags, representing
3763 which options were requested on the command line.
3764
3765 Upon return from this function:
3766
3767 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3768 was set by name on the command line. Additionally, if certain
3769 attributes are automatically enabled or disabled by this function
3770 in order to assure compatibility between options and
3771 configuration, the flags associated with those attributes are
3772 also set. By setting these "explicit bits", we avoid the risk
3773 that other code might accidentally overwrite these particular
3774 attributes with "default values".
3775
3776 The various bits of rs6000_isa_flags are set to indicate the
3777 target options that have been selected for the most current
3778 compilation efforts. This has the effect of also turning on the
3779 associated TARGET_XXX values since these are macros which are
3780 generally defined to test the corresponding bit of the
3781 rs6000_isa_flags variable.
3782
3783 The variable rs6000_builtin_mask is set to represent the target
3784 options for the most current compilation efforts, consistent with
3785 the current contents of rs6000_isa_flags. This variable controls
3786 expansion of built-in functions.
3787
3788 Various other global variables and fields of global structures
3789 (over 50 in all) are initialized to reflect the desired options
3790 for the most current compilation efforts. */
3791
3792 static bool
3793 rs6000_option_override_internal (bool global_init_p)
3794 {
3795 bool ret = true;
3796
3797 HOST_WIDE_INT set_masks;
3798 HOST_WIDE_INT ignore_masks;
3799 int cpu_index = -1;
3800 int tune_index;
3801 struct cl_target_option *main_target_opt
3802 = ((global_init_p || target_option_default_node == NULL)
3803 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3804
3805 /* Print defaults. */
3806 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3807 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3808
3809 /* Remember the explicit arguments. */
3810 if (global_init_p)
3811 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3812
3813 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3814 library functions, so warn about it. The flag may be useful for
3815 performance studies from time to time though, so don't disable it
3816 entirely. */
3817 if (global_options_set.x_rs6000_alignment_flags
3818 && rs6000_alignment_flags == MASK_ALIGN_POWER
3819 && DEFAULT_ABI == ABI_DARWIN
3820 && TARGET_64BIT)
3821 warning (0, "%qs is not supported for 64-bit Darwin;"
3822 " it is incompatible with the installed C and C++ libraries",
3823 "-malign-power");
3824
3825 /* Numerous experiment shows that IRA based loop pressure
3826 calculation works better for RTL loop invariant motion on targets
3827 with enough (>= 32) registers. It is an expensive optimization.
3828 So it is on only for peak performance. */
3829 if (optimize >= 3 && global_init_p
3830 && !global_options_set.x_flag_ira_loop_pressure)
3831 flag_ira_loop_pressure = 1;
3832
3833 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3834 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3835 options were already specified. */
3836 if (flag_sanitize & SANITIZE_USER_ADDRESS
3837 && !global_options_set.x_flag_asynchronous_unwind_tables)
3838 flag_asynchronous_unwind_tables = 1;
3839
3840 /* Set the pointer size. */
3841 if (TARGET_64BIT)
3842 {
3843 rs6000_pmode = DImode;
3844 rs6000_pointer_size = 64;
3845 }
3846 else
3847 {
3848 rs6000_pmode = SImode;
3849 rs6000_pointer_size = 32;
3850 }
3851
3852 /* Some OSs don't support saving the high part of 64-bit registers on context
3853 switch. Other OSs don't support saving Altivec registers. On those OSs,
3854 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3855 if the user wants either, the user must explicitly specify them and we
3856 won't interfere with the user's specification. */
3857
3858 set_masks = POWERPC_MASKS;
3859 #ifdef OS_MISSING_POWERPC64
3860 if (OS_MISSING_POWERPC64)
3861 set_masks &= ~OPTION_MASK_POWERPC64;
3862 #endif
3863 #ifdef OS_MISSING_ALTIVEC
3864 if (OS_MISSING_ALTIVEC)
3865 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3866 | OTHER_VSX_VECTOR_MASKS);
3867 #endif
3868
3869 /* Don't override by the processor default if given explicitly. */
3870 set_masks &= ~rs6000_isa_flags_explicit;
3871
3872 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3873 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3874
3875 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3876 the cpu in a target attribute or pragma, but did not specify a tuning
3877 option, use the cpu for the tuning option rather than the option specified
3878 with -mtune on the command line. Process a '--with-cpu' configuration
3879 request as an implicit --cpu. */
3880 if (rs6000_cpu_index >= 0)
3881 cpu_index = rs6000_cpu_index;
3882 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3883 cpu_index = main_target_opt->x_rs6000_cpu_index;
3884 else if (OPTION_TARGET_CPU_DEFAULT)
3885 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3886
3887 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3888 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3889 with those from the cpu, except for options that were explicitly set. If
3890 we don't have a cpu, do not override the target bits set in
3891 TARGET_DEFAULT. */
3892 if (cpu_index >= 0)
3893 {
3894 rs6000_cpu_index = cpu_index;
3895 rs6000_isa_flags &= ~set_masks;
3896 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3897 & set_masks);
3898 }
3899 else
3900 {
3901 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3902 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3903 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3904 to using rs6000_isa_flags, we need to do the initialization here.
3905
3906 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3907 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3908 HOST_WIDE_INT flags;
3909 if (TARGET_DEFAULT)
3910 flags = TARGET_DEFAULT;
3911 else
3912 {
3913 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3914 const char *default_cpu = (!TARGET_POWERPC64
3915 ? "powerpc"
3916 : (BYTES_BIG_ENDIAN
3917 ? "powerpc64"
3918 : "powerpc64le"));
3919 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3920 flags = processor_target_table[default_cpu_index].target_enable;
3921 }
3922 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3923 }
3924
3925 if (rs6000_tune_index >= 0)
3926 tune_index = rs6000_tune_index;
3927 else if (cpu_index >= 0)
3928 rs6000_tune_index = tune_index = cpu_index;
3929 else
3930 {
3931 size_t i;
3932 enum processor_type tune_proc
3933 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3934
3935 tune_index = -1;
3936 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3937 if (processor_target_table[i].processor == tune_proc)
3938 {
3939 tune_index = i;
3940 break;
3941 }
3942 }
3943
3944 if (cpu_index >= 0)
3945 rs6000_cpu = processor_target_table[cpu_index].processor;
3946 else
3947 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3948
3949 gcc_assert (tune_index >= 0);
3950 rs6000_tune = processor_target_table[tune_index].processor;
3951
3952 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3953 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3954 || rs6000_cpu == PROCESSOR_PPCE5500)
3955 {
3956 if (TARGET_ALTIVEC)
3957 error ("AltiVec not supported in this target");
3958 }
3959
3960 /* If we are optimizing big endian systems for space, use the load/store
3961 multiple instructions. */
3962 if (BYTES_BIG_ENDIAN && optimize_size)
3963 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3964
3965 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3966 because the hardware doesn't support the instructions used in little
3967 endian mode, and causes an alignment trap. The 750 does not cause an
3968 alignment trap (except when the target is unaligned). */
3969
3970 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3971 {
3972 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3973 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3974 warning (0, "%qs is not supported on little endian systems",
3975 "-mmultiple");
3976 }
3977
3978 /* If little-endian, default to -mstrict-align on older processors.
3979 Testing for htm matches power8 and later. */
3980 if (!BYTES_BIG_ENDIAN
3981 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3982 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3983
3984 if (!rs6000_fold_gimple)
3985 fprintf (stderr,
3986 "gimple folding of rs6000 builtins has been disabled.\n");
3987
3988 /* Add some warnings for VSX. */
3989 if (TARGET_VSX)
3990 {
3991 const char *msg = NULL;
3992 if (!TARGET_HARD_FLOAT)
3993 {
3994 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3995 msg = N_("%<-mvsx%> requires hardware floating point");
3996 else
3997 {
3998 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3999 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4000 }
4001 }
4002 else if (TARGET_AVOID_XFORM > 0)
4003 msg = N_("%<-mvsx%> needs indexed addressing");
4004 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4005 & OPTION_MASK_ALTIVEC))
4006 {
4007 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4008 msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
4009 else
4010 msg = N_("%<-mno-altivec%> disables vsx");
4011 }
4012
4013 if (msg)
4014 {
4015 warning (0, msg);
4016 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4017 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4018 }
4019 }
4020
4021 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4022 the -mcpu setting to enable options that conflict. */
4023 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4024 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4025 | OPTION_MASK_ALTIVEC
4026 | OPTION_MASK_VSX)) != 0)
4027 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4028 | OPTION_MASK_DIRECT_MOVE)
4029 & ~rs6000_isa_flags_explicit);
4030
4031 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4032 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4033
4034 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4035 off all of the options that depend on those flags. */
4036 ignore_masks = rs6000_disable_incompatible_switches ();
4037
4038 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4039 unless the user explicitly used the -mno-<option> to disable the code. */
4040 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4041 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4042 else if (TARGET_P9_MINMAX)
4043 {
4044 if (cpu_index >= 0)
4045 {
4046 if (cpu_index == PROCESSOR_POWER9)
4047 {
4048 /* legacy behavior: allow -mcpu=power9 with certain
4049 capabilities explicitly disabled. */
4050 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4051 }
4052 else
4053 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4054 "for <xxx> less than power9", "-mcpu");
4055 }
4056 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4057 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4058 & rs6000_isa_flags_explicit))
4059 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4060 were explicitly cleared. */
4061 error ("%qs incompatible with explicitly disabled options",
4062 "-mpower9-minmax");
4063 else
4064 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4065 }
4066 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4067 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4068 else if (TARGET_VSX)
4069 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4070 else if (TARGET_POPCNTD)
4071 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4072 else if (TARGET_DFP)
4073 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4074 else if (TARGET_CMPB)
4075 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4076 else if (TARGET_FPRND)
4077 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4078 else if (TARGET_POPCNTB)
4079 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4080 else if (TARGET_ALTIVEC)
4081 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4082
4083 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4084 {
4085 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4086 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4087 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4088 }
4089
4090 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4091 {
4092 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4093 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4094 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4095 }
4096
4097 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4098 {
4099 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4100 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4101 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4102 }
4103
4104 if (TARGET_P8_VECTOR && !TARGET_VSX)
4105 {
4106 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4107 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4108 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4109 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4110 {
4111 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4112 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4113 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4114 }
4115 else
4116 {
4117 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4118 not explicit. */
4119 rs6000_isa_flags |= OPTION_MASK_VSX;
4120 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4121 }
4122 }
4123
4124 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4125 {
4126 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4127 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4128 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4129 }
4130
4131 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4132 silently turn off quad memory mode. */
4133 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4134 {
4135 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4136 warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
4137
4138 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4139 warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
4140
4141 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4142 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4143 }
4144
4145 /* Non-atomic quad memory load/store are disabled for little endian, since
4146 the words are reversed, but atomic operations can still be done by
4147 swapping the words. */
4148 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4149 {
4150 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4151 warning (0, N_("%<-mquad-memory%> is not available in little endian "
4152 "mode"));
4153
4154 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4155 }
4156
4157 /* Assume if the user asked for normal quad memory instructions, they want
4158 the atomic versions as well, unless they explicity told us not to use quad
4159 word atomic instructions. */
4160 if (TARGET_QUAD_MEMORY
4161 && !TARGET_QUAD_MEMORY_ATOMIC
4162 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4163 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4164
4165 /* If we can shrink-wrap the TOC register save separately, then use
4166 -msave-toc-indirect unless explicitly disabled. */
4167 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4168 && flag_shrink_wrap_separate
4169 && optimize_function_for_speed_p (cfun))
4170 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4171
4172 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4173 generating power8 instructions. Power9 does not optimize power8 fusion
4174 cases. */
4175 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4176 {
4177 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4178 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4179 else
4180 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4181 }
4182
4183 /* Setting additional fusion flags turns on base fusion. */
4184 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4185 {
4186 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4187 {
4188 if (TARGET_P8_FUSION_SIGN)
4189 error ("%qs requires %qs", "-mpower8-fusion-sign",
4190 "-mpower8-fusion");
4191
4192 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4193 }
4194 else
4195 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4196 }
4197
4198 /* Power8 does not fuse sign extended loads with the addis. If we are
4199 optimizing at high levels for speed, convert a sign extended load into a
4200 zero extending load, and an explicit sign extension. */
4201 if (TARGET_P8_FUSION
4202 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4203 && optimize_function_for_speed_p (cfun)
4204 && optimize >= 3)
4205 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4206
4207 /* ISA 3.0 vector instructions include ISA 2.07. */
4208 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4209 {
4210 /* We prefer to not mention undocumented options in
4211 error messages. However, if users have managed to select
4212 power9-vector without selecting power8-vector, they
4213 already know about undocumented flags. */
4214 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4215 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4216 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4217 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4218 {
4219 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4220 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4221 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4222 }
4223 else
4224 {
4225 /* OPTION_MASK_P9_VECTOR is explicit and
4226 OPTION_MASK_P8_VECTOR is not explicit. */
4227 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4228 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4229 }
4230 }
4231
4232 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4233 support. If we only have ISA 2.06 support, and the user did not specify
4234 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4235 but we don't enable the full vectorization support */
4236 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4237 TARGET_ALLOW_MOVMISALIGN = 1;
4238
4239 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4240 {
4241 if (TARGET_ALLOW_MOVMISALIGN > 0
4242 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4243 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4244
4245 TARGET_ALLOW_MOVMISALIGN = 0;
4246 }
4247
4248 /* Determine when unaligned vector accesses are permitted, and when
4249 they are preferred over masked Altivec loads. Note that if
4250 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4251 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4252 not true. */
4253 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4254 {
4255 if (!TARGET_VSX)
4256 {
4257 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4258 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4259
4260 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4261 }
4262
4263 else if (!TARGET_ALLOW_MOVMISALIGN)
4264 {
4265 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4266 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4267 "-mallow-movmisalign");
4268
4269 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4270 }
4271 }
4272
4273 /* Use long double size to select the appropriate long double. We use
4274 TYPE_PRECISION to differentiate the 3 different long double types. We map
4275 128 into the precision used for TFmode. */
4276 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4277 ? 64
4278 : FLOAT_PRECISION_TFmode);
4279
4280 /* Set long double size before the IEEE 128-bit tests. */
4281 if (!global_options_set.x_rs6000_long_double_type_size)
4282 {
4283 if (main_target_opt != NULL
4284 && (main_target_opt->x_rs6000_long_double_type_size
4285 != default_long_double_size))
4286 error ("target attribute or pragma changes long double size");
4287 else
4288 rs6000_long_double_type_size = default_long_double_size;
4289 }
4290 else if (rs6000_long_double_type_size == 128)
4291 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4292 else if (global_options_set.x_rs6000_ieeequad)
4293 {
4294 if (global_options.x_rs6000_ieeequad)
4295 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4296 else
4297 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4298 }
4299
4300 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4301 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4302 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4303 those systems will not pick up this default. Warn if the user changes the
4304 default unless -Wno-psabi. */
4305 if (!global_options_set.x_rs6000_ieeequad)
4306 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4307
4308 else
4309 {
4310 if (global_options.x_rs6000_ieeequad
4311 && (!TARGET_POPCNTD || !TARGET_VSX))
4312 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4313
4314 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4315 {
4316 static bool warned_change_long_double;
4317 if (!warned_change_long_double)
4318 {
4319 warned_change_long_double = true;
4320 if (TARGET_IEEEQUAD)
4321 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4322 else
4323 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4324 }
4325 }
4326 }
4327
4328 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4329 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4330 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4331 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4332 the keyword as well as the type. */
4333 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4334
4335 /* IEEE 128-bit floating point requires VSX support. */
4336 if (TARGET_FLOAT128_KEYWORD)
4337 {
4338 if (!TARGET_VSX)
4339 {
4340 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4341 error ("%qs requires VSX support", "%<-mfloat128%>");
4342
4343 TARGET_FLOAT128_TYPE = 0;
4344 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4345 | OPTION_MASK_FLOAT128_HW);
4346 }
4347 else if (!TARGET_FLOAT128_TYPE)
4348 {
4349 TARGET_FLOAT128_TYPE = 1;
4350 warning (0, "The %<-mfloat128%> option may not be fully supported");
4351 }
4352 }
4353
4354 /* Enable the __float128 keyword under Linux by default. */
4355 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4356 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4357 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4358
4359 /* If we have are supporting the float128 type and full ISA 3.0 support,
4360 enable -mfloat128-hardware by default. However, don't enable the
4361 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4362 because sometimes the compiler wants to put things in an integer
4363 container, and if we don't have __int128 support, it is impossible. */
4364 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4365 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4366 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4367 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4368
4369 if (TARGET_FLOAT128_HW
4370 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4371 {
4372 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4373 error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
4374
4375 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4376 }
4377
4378 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4379 {
4380 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4381 error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
4382
4383 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4384 }
4385
4386 /* Print the options after updating the defaults. */
4387 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4388 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4389
4390 /* E500mc does "better" if we inline more aggressively. Respect the
4391 user's opinion, though. */
4392 if (rs6000_block_move_inline_limit == 0
4393 && (rs6000_tune == PROCESSOR_PPCE500MC
4394 || rs6000_tune == PROCESSOR_PPCE500MC64
4395 || rs6000_tune == PROCESSOR_PPCE5500
4396 || rs6000_tune == PROCESSOR_PPCE6500))
4397 rs6000_block_move_inline_limit = 128;
4398
4399 /* store_one_arg depends on expand_block_move to handle at least the
4400 size of reg_parm_stack_space. */
4401 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4402 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4403
4404 if (global_init_p)
4405 {
4406 /* If the appropriate debug option is enabled, replace the target hooks
4407 with debug versions that call the real version and then prints
4408 debugging information. */
4409 if (TARGET_DEBUG_COST)
4410 {
4411 targetm.rtx_costs = rs6000_debug_rtx_costs;
4412 targetm.address_cost = rs6000_debug_address_cost;
4413 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4414 }
4415
4416 if (TARGET_DEBUG_ADDR)
4417 {
4418 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4419 targetm.legitimize_address = rs6000_debug_legitimize_address;
4420 rs6000_secondary_reload_class_ptr
4421 = rs6000_debug_secondary_reload_class;
4422 targetm.secondary_memory_needed
4423 = rs6000_debug_secondary_memory_needed;
4424 targetm.can_change_mode_class
4425 = rs6000_debug_can_change_mode_class;
4426 rs6000_preferred_reload_class_ptr
4427 = rs6000_debug_preferred_reload_class;
4428 rs6000_legitimize_reload_address_ptr
4429 = rs6000_debug_legitimize_reload_address;
4430 rs6000_mode_dependent_address_ptr
4431 = rs6000_debug_mode_dependent_address;
4432 }
4433
4434 if (rs6000_veclibabi_name)
4435 {
4436 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4437 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4438 else
4439 {
4440 error ("unknown vectorization library ABI type (%qs) for "
4441 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4442 ret = false;
4443 }
4444 }
4445 }
4446
4447 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4448 target attribute or pragma which automatically enables both options,
4449 unless the altivec ABI was set. This is set by default for 64-bit, but
4450 not for 32-bit. */
4451 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4452 {
4453 TARGET_FLOAT128_TYPE = 0;
4454 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4455 | OPTION_MASK_FLOAT128_KEYWORD)
4456 & ~rs6000_isa_flags_explicit);
4457 }
4458
4459 /* Enable Altivec ABI for AIX -maltivec. */
4460 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4461 {
4462 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4463 error ("target attribute or pragma changes AltiVec ABI");
4464 else
4465 rs6000_altivec_abi = 1;
4466 }
4467
4468 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4469 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4470 be explicitly overridden in either case. */
4471 if (TARGET_ELF)
4472 {
4473 if (!global_options_set.x_rs6000_altivec_abi
4474 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4475 {
4476 if (main_target_opt != NULL &&
4477 !main_target_opt->x_rs6000_altivec_abi)
4478 error ("target attribute or pragma changes AltiVec ABI");
4479 else
4480 rs6000_altivec_abi = 1;
4481 }
4482 }
4483
4484 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4485 So far, the only darwin64 targets are also MACH-O. */
4486 if (TARGET_MACHO
4487 && DEFAULT_ABI == ABI_DARWIN
4488 && TARGET_64BIT)
4489 {
4490 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4491 error ("target attribute or pragma changes darwin64 ABI");
4492 else
4493 {
4494 rs6000_darwin64_abi = 1;
4495 /* Default to natural alignment, for better performance. */
4496 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4497 }
4498 }
4499
4500 /* Place FP constants in the constant pool instead of TOC
4501 if section anchors enabled. */
4502 if (flag_section_anchors
4503 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4504 TARGET_NO_FP_IN_TOC = 1;
4505
4506 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4507 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4508
4509 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4510 SUBTARGET_OVERRIDE_OPTIONS;
4511 #endif
4512 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4513 SUBSUBTARGET_OVERRIDE_OPTIONS;
4514 #endif
4515 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4516 SUB3TARGET_OVERRIDE_OPTIONS;
4517 #endif
4518
4519 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4520 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4521
4522 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4523 && rs6000_tune != PROCESSOR_POWER5
4524 && rs6000_tune != PROCESSOR_POWER6
4525 && rs6000_tune != PROCESSOR_POWER7
4526 && rs6000_tune != PROCESSOR_POWER8
4527 && rs6000_tune != PROCESSOR_POWER9
4528 && rs6000_tune != PROCESSOR_PPCA2
4529 && rs6000_tune != PROCESSOR_CELL
4530 && rs6000_tune != PROCESSOR_PPC476);
4531 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4532 || rs6000_tune == PROCESSOR_POWER5
4533 || rs6000_tune == PROCESSOR_POWER7
4534 || rs6000_tune == PROCESSOR_POWER8);
4535 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4536 || rs6000_tune == PROCESSOR_POWER5
4537 || rs6000_tune == PROCESSOR_POWER6
4538 || rs6000_tune == PROCESSOR_POWER7
4539 || rs6000_tune == PROCESSOR_POWER8
4540 || rs6000_tune == PROCESSOR_POWER9
4541 || rs6000_tune == PROCESSOR_PPCE500MC
4542 || rs6000_tune == PROCESSOR_PPCE500MC64
4543 || rs6000_tune == PROCESSOR_PPCE5500
4544 || rs6000_tune == PROCESSOR_PPCE6500);
4545
4546 /* Allow debug switches to override the above settings. These are set to -1
4547 in rs6000.opt to indicate the user hasn't directly set the switch. */
4548 if (TARGET_ALWAYS_HINT >= 0)
4549 rs6000_always_hint = TARGET_ALWAYS_HINT;
4550
4551 if (TARGET_SCHED_GROUPS >= 0)
4552 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4553
4554 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4555 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4556
4557 rs6000_sched_restricted_insns_priority
4558 = (rs6000_sched_groups ? 1 : 0);
4559
4560 /* Handle -msched-costly-dep option. */
4561 rs6000_sched_costly_dep
4562 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4563
4564 if (rs6000_sched_costly_dep_str)
4565 {
4566 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4567 rs6000_sched_costly_dep = no_dep_costly;
4568 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4569 rs6000_sched_costly_dep = all_deps_costly;
4570 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4571 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4572 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4573 rs6000_sched_costly_dep = store_to_load_dep_costly;
4574 else
4575 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4576 atoi (rs6000_sched_costly_dep_str));
4577 }
4578
4579 /* Handle -minsert-sched-nops option. */
4580 rs6000_sched_insert_nops
4581 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4582
4583 if (rs6000_sched_insert_nops_str)
4584 {
4585 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4586 rs6000_sched_insert_nops = sched_finish_none;
4587 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4588 rs6000_sched_insert_nops = sched_finish_pad_groups;
4589 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4590 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4591 else
4592 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4593 atoi (rs6000_sched_insert_nops_str));
4594 }
4595
4596 /* Handle stack protector */
4597 if (!global_options_set.x_rs6000_stack_protector_guard)
4598 #ifdef TARGET_THREAD_SSP_OFFSET
4599 rs6000_stack_protector_guard = SSP_TLS;
4600 #else
4601 rs6000_stack_protector_guard = SSP_GLOBAL;
4602 #endif
4603
4604 #ifdef TARGET_THREAD_SSP_OFFSET
4605 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4606 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4607 #endif
4608
4609 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4610 {
4611 char *endp;
4612 const char *str = rs6000_stack_protector_guard_offset_str;
4613
4614 errno = 0;
4615 long offset = strtol (str, &endp, 0);
4616 if (!*str || *endp || errno)
4617 error ("%qs is not a valid number in %qs", str,
4618 "-mstack-protector-guard-offset=");
4619
4620 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4621 || (TARGET_64BIT && (offset & 3)))
4622 error ("%qs is not a valid offset in %qs", str,
4623 "-mstack-protector-guard-offset=");
4624
4625 rs6000_stack_protector_guard_offset = offset;
4626 }
4627
4628 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4629 {
4630 const char *str = rs6000_stack_protector_guard_reg_str;
4631 int reg = decode_reg_name (str);
4632
4633 if (!IN_RANGE (reg, 1, 31))
4634 error ("%qs is not a valid base register in %qs", str,
4635 "-mstack-protector-guard-reg=");
4636
4637 rs6000_stack_protector_guard_reg = reg;
4638 }
4639
4640 if (rs6000_stack_protector_guard == SSP_TLS
4641 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4642 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4643
4644 if (global_init_p)
4645 {
4646 #ifdef TARGET_REGNAMES
4647 /* If the user desires alternate register names, copy in the
4648 alternate names now. */
4649 if (TARGET_REGNAMES)
4650 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4651 #endif
4652
4653 /* Set aix_struct_return last, after the ABI is determined.
4654 If -maix-struct-return or -msvr4-struct-return was explicitly
4655 used, don't override with the ABI default. */
4656 if (!global_options_set.x_aix_struct_return)
4657 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4658
4659 #if 0
4660 /* IBM XL compiler defaults to unsigned bitfields. */
4661 if (TARGET_XL_COMPAT)
4662 flag_signed_bitfields = 0;
4663 #endif
4664
4665 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4666 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4667
4668 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4669
4670 /* We can only guarantee the availability of DI pseudo-ops when
4671 assembling for 64-bit targets. */
4672 if (!TARGET_64BIT)
4673 {
4674 targetm.asm_out.aligned_op.di = NULL;
4675 targetm.asm_out.unaligned_op.di = NULL;
4676 }
4677
4678
4679 /* Set branch target alignment, if not optimizing for size. */
4680 if (!optimize_size)
4681 {
4682 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4683 aligned 8byte to avoid misprediction by the branch predictor. */
4684 if (rs6000_tune == PROCESSOR_TITAN
4685 || rs6000_tune == PROCESSOR_CELL)
4686 {
4687 if (flag_align_functions && !str_align_functions)
4688 str_align_functions = "8";
4689 if (flag_align_jumps && !str_align_jumps)
4690 str_align_jumps = "8";
4691 if (flag_align_loops && !str_align_loops)
4692 str_align_loops = "8";
4693 }
4694 if (rs6000_align_branch_targets)
4695 {
4696 if (flag_align_functions && !str_align_functions)
4697 str_align_functions = "16";
4698 if (flag_align_jumps && !str_align_jumps)
4699 str_align_jumps = "16";
4700 if (flag_align_loops && !str_align_loops)
4701 {
4702 can_override_loop_align = 1;
4703 str_align_loops = "16";
4704 }
4705 }
4706
4707 if (flag_align_jumps && !str_align_jumps)
4708 str_align_jumps = "16";
4709 if (flag_align_loops && !str_align_loops)
4710 str_align_loops = "16";
4711 }
4712
4713 /* Arrange to save and restore machine status around nested functions. */
4714 init_machine_status = rs6000_init_machine_status;
4715
4716 /* We should always be splitting complex arguments, but we can't break
4717 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4718 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4719 targetm.calls.split_complex_arg = NULL;
4720
4721 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4722 if (DEFAULT_ABI == ABI_AIX)
4723 targetm.calls.custom_function_descriptors = 0;
4724 }
4725
4726 /* Initialize rs6000_cost with the appropriate target costs. */
4727 if (optimize_size)
4728 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4729 else
4730 switch (rs6000_tune)
4731 {
4732 case PROCESSOR_RS64A:
4733 rs6000_cost = &rs64a_cost;
4734 break;
4735
4736 case PROCESSOR_MPCCORE:
4737 rs6000_cost = &mpccore_cost;
4738 break;
4739
4740 case PROCESSOR_PPC403:
4741 rs6000_cost = &ppc403_cost;
4742 break;
4743
4744 case PROCESSOR_PPC405:
4745 rs6000_cost = &ppc405_cost;
4746 break;
4747
4748 case PROCESSOR_PPC440:
4749 rs6000_cost = &ppc440_cost;
4750 break;
4751
4752 case PROCESSOR_PPC476:
4753 rs6000_cost = &ppc476_cost;
4754 break;
4755
4756 case PROCESSOR_PPC601:
4757 rs6000_cost = &ppc601_cost;
4758 break;
4759
4760 case PROCESSOR_PPC603:
4761 rs6000_cost = &ppc603_cost;
4762 break;
4763
4764 case PROCESSOR_PPC604:
4765 rs6000_cost = &ppc604_cost;
4766 break;
4767
4768 case PROCESSOR_PPC604e:
4769 rs6000_cost = &ppc604e_cost;
4770 break;
4771
4772 case PROCESSOR_PPC620:
4773 rs6000_cost = &ppc620_cost;
4774 break;
4775
4776 case PROCESSOR_PPC630:
4777 rs6000_cost = &ppc630_cost;
4778 break;
4779
4780 case PROCESSOR_CELL:
4781 rs6000_cost = &ppccell_cost;
4782 break;
4783
4784 case PROCESSOR_PPC750:
4785 case PROCESSOR_PPC7400:
4786 rs6000_cost = &ppc750_cost;
4787 break;
4788
4789 case PROCESSOR_PPC7450:
4790 rs6000_cost = &ppc7450_cost;
4791 break;
4792
4793 case PROCESSOR_PPC8540:
4794 case PROCESSOR_PPC8548:
4795 rs6000_cost = &ppc8540_cost;
4796 break;
4797
4798 case PROCESSOR_PPCE300C2:
4799 case PROCESSOR_PPCE300C3:
4800 rs6000_cost = &ppce300c2c3_cost;
4801 break;
4802
4803 case PROCESSOR_PPCE500MC:
4804 rs6000_cost = &ppce500mc_cost;
4805 break;
4806
4807 case PROCESSOR_PPCE500MC64:
4808 rs6000_cost = &ppce500mc64_cost;
4809 break;
4810
4811 case PROCESSOR_PPCE5500:
4812 rs6000_cost = &ppce5500_cost;
4813 break;
4814
4815 case PROCESSOR_PPCE6500:
4816 rs6000_cost = &ppce6500_cost;
4817 break;
4818
4819 case PROCESSOR_TITAN:
4820 rs6000_cost = &titan_cost;
4821 break;
4822
4823 case PROCESSOR_POWER4:
4824 case PROCESSOR_POWER5:
4825 rs6000_cost = &power4_cost;
4826 break;
4827
4828 case PROCESSOR_POWER6:
4829 rs6000_cost = &power6_cost;
4830 break;
4831
4832 case PROCESSOR_POWER7:
4833 rs6000_cost = &power7_cost;
4834 break;
4835
4836 case PROCESSOR_POWER8:
4837 rs6000_cost = &power8_cost;
4838 break;
4839
4840 case PROCESSOR_POWER9:
4841 rs6000_cost = &power9_cost;
4842 break;
4843
4844 case PROCESSOR_PPCA2:
4845 rs6000_cost = &ppca2_cost;
4846 break;
4847
4848 default:
4849 gcc_unreachable ();
4850 }
4851
4852 if (global_init_p)
4853 {
4854 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4855 rs6000_cost->simultaneous_prefetches,
4856 global_options.x_param_values,
4857 global_options_set.x_param_values);
4858 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4859 global_options.x_param_values,
4860 global_options_set.x_param_values);
4861 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4862 rs6000_cost->cache_line_size,
4863 global_options.x_param_values,
4864 global_options_set.x_param_values);
4865 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4866 global_options.x_param_values,
4867 global_options_set.x_param_values);
4868
4869 /* Increase loop peeling limits based on performance analysis. */
4870 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4871 global_options.x_param_values,
4872 global_options_set.x_param_values);
4873 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4874 global_options.x_param_values,
4875 global_options_set.x_param_values);
4876
4877 /* Use the 'model' -fsched-pressure algorithm by default. */
4878 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4879 SCHED_PRESSURE_MODEL,
4880 global_options.x_param_values,
4881 global_options_set.x_param_values);
4882
4883 /* If using typedef char *va_list, signal that
4884 __builtin_va_start (&ap, 0) can be optimized to
4885 ap = __builtin_next_arg (0). */
4886 if (DEFAULT_ABI != ABI_V4)
4887 targetm.expand_builtin_va_start = NULL;
4888 }
4889
4890 /* If not explicitly specified via option, decide whether to generate indexed
4891 load/store instructions. A value of -1 indicates that the
4892 initial value of this variable has not been overwritten. During
4893 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4894 if (TARGET_AVOID_XFORM == -1)
4895 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4896 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4897 need indexed accesses and the type used is the scalar type of the element
4898 being loaded or stored. */
4899 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4900 && !TARGET_ALTIVEC);
4901
4902 /* Set the -mrecip options. */
4903 if (rs6000_recip_name)
4904 {
4905 char *p = ASTRDUP (rs6000_recip_name);
4906 char *q;
4907 unsigned int mask, i;
4908 bool invert;
4909
4910 while ((q = strtok (p, ",")) != NULL)
4911 {
4912 p = NULL;
4913 if (*q == '!')
4914 {
4915 invert = true;
4916 q++;
4917 }
4918 else
4919 invert = false;
4920
4921 if (!strcmp (q, "default"))
4922 mask = ((TARGET_RECIP_PRECISION)
4923 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4924 else
4925 {
4926 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4927 if (!strcmp (q, recip_options[i].string))
4928 {
4929 mask = recip_options[i].mask;
4930 break;
4931 }
4932
4933 if (i == ARRAY_SIZE (recip_options))
4934 {
4935 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4936 invert = false;
4937 mask = 0;
4938 ret = false;
4939 }
4940 }
4941
4942 if (invert)
4943 rs6000_recip_control &= ~mask;
4944 else
4945 rs6000_recip_control |= mask;
4946 }
4947 }
4948
4949 /* Set the builtin mask of the various options used that could affect which
4950 builtins were used. In the past we used target_flags, but we've run out
4951 of bits, and some options are no longer in target_flags. */
4952 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4953 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4954 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4955 rs6000_builtin_mask);
4956
4957 /* Initialize all of the registers. */
4958 rs6000_init_hard_regno_mode_ok (global_init_p);
4959
4960 /* Save the initial options in case the user does function specific options */
4961 if (global_init_p)
4962 target_option_default_node = target_option_current_node
4963 = build_target_option_node (&global_options);
4964
4965 /* If not explicitly specified via option, decide whether to generate the
4966 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4967 if (TARGET_LINK_STACK == -1)
4968 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4969
4970 /* Deprecate use of -mno-speculate-indirect-jumps. */
4971 if (!rs6000_speculate_indirect_jumps)
4972 warning (0, "%qs is deprecated and not recommended in any circumstances",
4973 "-mno-speculate-indirect-jumps");
4974
4975 return ret;
4976 }
4977
4978 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4979 define the target cpu type. */
4980
4981 static void
4982 rs6000_option_override (void)
4983 {
4984 (void) rs6000_option_override_internal (true);
4985 }
4986
4987 \f
4988 /* Implement targetm.vectorize.builtin_mask_for_load. */
4989 static tree
4990 rs6000_builtin_mask_for_load (void)
4991 {
4992 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4993 if ((TARGET_ALTIVEC && !TARGET_VSX)
4994 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4995 return altivec_builtin_mask_for_load;
4996 else
4997 return 0;
4998 }
4999
5000 /* Implement LOOP_ALIGN. */
5001 align_flags
5002 rs6000_loop_align (rtx label)
5003 {
5004 basic_block bb;
5005 int ninsns;
5006
5007 /* Don't override loop alignment if -falign-loops was specified. */
5008 if (!can_override_loop_align)
5009 return align_loops;
5010
5011 bb = BLOCK_FOR_INSN (label);
5012 ninsns = num_loop_insns(bb->loop_father);
5013
5014 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5015 if (ninsns > 4 && ninsns <= 8
5016 && (rs6000_tune == PROCESSOR_POWER4
5017 || rs6000_tune == PROCESSOR_POWER5
5018 || rs6000_tune == PROCESSOR_POWER6
5019 || rs6000_tune == PROCESSOR_POWER7
5020 || rs6000_tune == PROCESSOR_POWER8))
5021 return align_flags (5);
5022 else
5023 return align_loops;
5024 }
5025
5026 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5027 after applying N number of iterations. This routine does not determine
5028 how may iterations are required to reach desired alignment. */
5029
5030 static bool
5031 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5032 {
5033 if (is_packed)
5034 return false;
5035
5036 if (TARGET_32BIT)
5037 {
5038 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5039 return true;
5040
5041 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5042 return true;
5043
5044 return false;
5045 }
5046 else
5047 {
5048 if (TARGET_MACHO)
5049 return false;
5050
5051 /* Assuming that all other types are naturally aligned. CHECKME! */
5052 return true;
5053 }
5054 }
5055
5056 /* Return true if the vector misalignment factor is supported by the
5057 target. */
5058 static bool
5059 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5060 const_tree type,
5061 int misalignment,
5062 bool is_packed)
5063 {
5064 if (TARGET_VSX)
5065 {
5066 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5067 return true;
5068
5069 /* Return if movmisalign pattern is not supported for this mode. */
5070 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5071 return false;
5072
5073 if (misalignment == -1)
5074 {
5075 /* Misalignment factor is unknown at compile time but we know
5076 it's word aligned. */
5077 if (rs6000_vector_alignment_reachable (type, is_packed))
5078 {
5079 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5080
5081 if (element_size == 64 || element_size == 32)
5082 return true;
5083 }
5084
5085 return false;
5086 }
5087
5088 /* VSX supports word-aligned vector. */
5089 if (misalignment % 4 == 0)
5090 return true;
5091 }
5092 return false;
5093 }
5094
5095 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5096 static int
5097 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5098 tree vectype, int misalign)
5099 {
5100 unsigned elements;
5101 tree elem_type;
5102
5103 switch (type_of_cost)
5104 {
5105 case scalar_stmt:
5106 case scalar_load:
5107 case scalar_store:
5108 case vector_stmt:
5109 case vector_load:
5110 case vector_store:
5111 case vec_to_scalar:
5112 case scalar_to_vec:
5113 case cond_branch_not_taken:
5114 return 1;
5115
5116 case vec_perm:
5117 if (TARGET_VSX)
5118 return 3;
5119 else
5120 return 1;
5121
5122 case vec_promote_demote:
5123 if (TARGET_VSX)
5124 return 4;
5125 else
5126 return 1;
5127
5128 case cond_branch_taken:
5129 return 3;
5130
5131 case unaligned_load:
5132 case vector_gather_load:
5133 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5134 return 1;
5135
5136 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5137 {
5138 elements = TYPE_VECTOR_SUBPARTS (vectype);
5139 if (elements == 2)
5140 /* Double word aligned. */
5141 return 2;
5142
5143 if (elements == 4)
5144 {
5145 switch (misalign)
5146 {
5147 case 8:
5148 /* Double word aligned. */
5149 return 2;
5150
5151 case -1:
5152 /* Unknown misalignment. */
5153 case 4:
5154 case 12:
5155 /* Word aligned. */
5156 return 22;
5157
5158 default:
5159 gcc_unreachable ();
5160 }
5161 }
5162 }
5163
5164 if (TARGET_ALTIVEC)
5165 /* Misaligned loads are not supported. */
5166 gcc_unreachable ();
5167
5168 return 2;
5169
5170 case unaligned_store:
5171 case vector_scatter_store:
5172 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5173 return 1;
5174
5175 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5176 {
5177 elements = TYPE_VECTOR_SUBPARTS (vectype);
5178 if (elements == 2)
5179 /* Double word aligned. */
5180 return 2;
5181
5182 if (elements == 4)
5183 {
5184 switch (misalign)
5185 {
5186 case 8:
5187 /* Double word aligned. */
5188 return 2;
5189
5190 case -1:
5191 /* Unknown misalignment. */
5192 case 4:
5193 case 12:
5194 /* Word aligned. */
5195 return 23;
5196
5197 default:
5198 gcc_unreachable ();
5199 }
5200 }
5201 }
5202
5203 if (TARGET_ALTIVEC)
5204 /* Misaligned stores are not supported. */
5205 gcc_unreachable ();
5206
5207 return 2;
5208
5209 case vec_construct:
5210 /* This is a rough approximation assuming non-constant elements
5211 constructed into a vector via element insertion. FIXME:
5212 vec_construct is not granular enough for uniformly good
5213 decisions. If the initialization is a splat, this is
5214 cheaper than we estimate. Improve this someday. */
5215 elem_type = TREE_TYPE (vectype);
5216 /* 32-bit vectors loaded into registers are stored as double
5217 precision, so we need 2 permutes, 2 converts, and 1 merge
5218 to construct a vector of short floats from them. */
5219 if (SCALAR_FLOAT_TYPE_P (elem_type)
5220 && TYPE_PRECISION (elem_type) == 32)
5221 return 5;
5222 /* On POWER9, integer vector types are built up in GPRs and then
5223 use a direct move (2 cycles). For POWER8 this is even worse,
5224 as we need two direct moves and a merge, and the direct moves
5225 are five cycles. */
5226 else if (INTEGRAL_TYPE_P (elem_type))
5227 {
5228 if (TARGET_P9_VECTOR)
5229 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5230 else
5231 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5232 }
5233 else
5234 /* V2DFmode doesn't need a direct move. */
5235 return 2;
5236
5237 default:
5238 gcc_unreachable ();
5239 }
5240 }
5241
5242 /* Implement targetm.vectorize.preferred_simd_mode. */
5243
5244 static machine_mode
5245 rs6000_preferred_simd_mode (scalar_mode mode)
5246 {
5247 if (TARGET_VSX)
5248 switch (mode)
5249 {
5250 case E_DFmode:
5251 return V2DFmode;
5252 default:;
5253 }
5254 if (TARGET_ALTIVEC || TARGET_VSX)
5255 switch (mode)
5256 {
5257 case E_SFmode:
5258 return V4SFmode;
5259 case E_TImode:
5260 return V1TImode;
5261 case E_DImode:
5262 return V2DImode;
5263 case E_SImode:
5264 return V4SImode;
5265 case E_HImode:
5266 return V8HImode;
5267 case E_QImode:
5268 return V16QImode;
5269 default:;
5270 }
5271 return word_mode;
5272 }
5273
5274 typedef struct _rs6000_cost_data
5275 {
5276 struct loop *loop_info;
5277 unsigned cost[3];
5278 } rs6000_cost_data;
5279
5280 /* Test for likely overcommitment of vector hardware resources. If a
5281 loop iteration is relatively large, and too large a percentage of
5282 instructions in the loop are vectorized, the cost model may not
5283 adequately reflect delays from unavailable vector resources.
5284 Penalize the loop body cost for this case. */
5285
5286 static void
5287 rs6000_density_test (rs6000_cost_data *data)
5288 {
5289 const int DENSITY_PCT_THRESHOLD = 85;
5290 const int DENSITY_SIZE_THRESHOLD = 70;
5291 const int DENSITY_PENALTY = 10;
5292 struct loop *loop = data->loop_info;
5293 basic_block *bbs = get_loop_body (loop);
5294 int nbbs = loop->num_nodes;
5295 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5296 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5297 int i, density_pct;
5298
5299 for (i = 0; i < nbbs; i++)
5300 {
5301 basic_block bb = bbs[i];
5302 gimple_stmt_iterator gsi;
5303
5304 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5305 {
5306 gimple *stmt = gsi_stmt (gsi);
5307 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5308
5309 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5310 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5311 not_vec_cost++;
5312 }
5313 }
5314
5315 free (bbs);
5316 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5317
5318 if (density_pct > DENSITY_PCT_THRESHOLD
5319 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5320 {
5321 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5322 if (dump_enabled_p ())
5323 dump_printf_loc (MSG_NOTE, vect_location,
5324 "density %d%%, cost %d exceeds threshold, penalizing "
5325 "loop body cost by %d%%", density_pct,
5326 vec_cost + not_vec_cost, DENSITY_PENALTY);
5327 }
5328 }
5329
5330 /* Implement targetm.vectorize.init_cost. */
5331
5332 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5333 instruction is needed by the vectorization. */
5334 static bool rs6000_vect_nonmem;
5335
5336 static void *
5337 rs6000_init_cost (struct loop *loop_info)
5338 {
5339 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5340 data->loop_info = loop_info;
5341 data->cost[vect_prologue] = 0;
5342 data->cost[vect_body] = 0;
5343 data->cost[vect_epilogue] = 0;
5344 rs6000_vect_nonmem = false;
5345 return data;
5346 }
5347
5348 /* Implement targetm.vectorize.add_stmt_cost. */
5349
5350 static unsigned
5351 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5352 struct _stmt_vec_info *stmt_info, int misalign,
5353 enum vect_cost_model_location where)
5354 {
5355 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5356 unsigned retval = 0;
5357
5358 if (flag_vect_cost_model)
5359 {
5360 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5361 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5362 misalign);
5363 /* Statements in an inner loop relative to the loop being
5364 vectorized are weighted more heavily. The value here is
5365 arbitrary and could potentially be improved with analysis. */
5366 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5367 count *= 50; /* FIXME. */
5368
5369 retval = (unsigned) (count * stmt_cost);
5370 cost_data->cost[where] += retval;
5371
5372 /* Check whether we're doing something other than just a copy loop.
5373 Not all such loops may be profitably vectorized; see
5374 rs6000_finish_cost. */
5375 if ((kind == vec_to_scalar || kind == vec_perm
5376 || kind == vec_promote_demote || kind == vec_construct
5377 || kind == scalar_to_vec)
5378 || (where == vect_body && kind == vector_stmt))
5379 rs6000_vect_nonmem = true;
5380 }
5381
5382 return retval;
5383 }
5384
5385 /* Implement targetm.vectorize.finish_cost. */
5386
5387 static void
5388 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5389 unsigned *body_cost, unsigned *epilogue_cost)
5390 {
5391 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5392
5393 if (cost_data->loop_info)
5394 rs6000_density_test (cost_data);
5395
5396 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5397 that require versioning for any reason. The vectorization is at
5398 best a wash inside the loop, and the versioning checks make
5399 profitability highly unlikely and potentially quite harmful. */
5400 if (cost_data->loop_info)
5401 {
5402 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5403 if (!rs6000_vect_nonmem
5404 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5405 && LOOP_REQUIRES_VERSIONING (vec_info))
5406 cost_data->cost[vect_body] += 10000;
5407 }
5408
5409 *prologue_cost = cost_data->cost[vect_prologue];
5410 *body_cost = cost_data->cost[vect_body];
5411 *epilogue_cost = cost_data->cost[vect_epilogue];
5412 }
5413
5414 /* Implement targetm.vectorize.destroy_cost_data. */
5415
5416 static void
5417 rs6000_destroy_cost_data (void *data)
5418 {
5419 free (data);
5420 }
5421
5422 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5423 library with vectorized intrinsics. */
5424
5425 static tree
5426 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5427 tree type_in)
5428 {
5429 char name[32];
5430 const char *suffix = NULL;
5431 tree fntype, new_fndecl, bdecl = NULL_TREE;
5432 int n_args = 1;
5433 const char *bname;
5434 machine_mode el_mode, in_mode;
5435 int n, in_n;
5436
5437 /* Libmass is suitable for unsafe math only as it does not correctly support
5438 parts of IEEE with the required precision such as denormals. Only support
5439 it if we have VSX to use the simd d2 or f4 functions.
5440 XXX: Add variable length support. */
5441 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5442 return NULL_TREE;
5443
5444 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5445 n = TYPE_VECTOR_SUBPARTS (type_out);
5446 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5447 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5448 if (el_mode != in_mode
5449 || n != in_n)
5450 return NULL_TREE;
5451
5452 switch (fn)
5453 {
5454 CASE_CFN_ATAN2:
5455 CASE_CFN_HYPOT:
5456 CASE_CFN_POW:
5457 n_args = 2;
5458 gcc_fallthrough ();
5459
5460 CASE_CFN_ACOS:
5461 CASE_CFN_ACOSH:
5462 CASE_CFN_ASIN:
5463 CASE_CFN_ASINH:
5464 CASE_CFN_ATAN:
5465 CASE_CFN_ATANH:
5466 CASE_CFN_CBRT:
5467 CASE_CFN_COS:
5468 CASE_CFN_COSH:
5469 CASE_CFN_ERF:
5470 CASE_CFN_ERFC:
5471 CASE_CFN_EXP2:
5472 CASE_CFN_EXP:
5473 CASE_CFN_EXPM1:
5474 CASE_CFN_LGAMMA:
5475 CASE_CFN_LOG10:
5476 CASE_CFN_LOG1P:
5477 CASE_CFN_LOG2:
5478 CASE_CFN_LOG:
5479 CASE_CFN_SIN:
5480 CASE_CFN_SINH:
5481 CASE_CFN_SQRT:
5482 CASE_CFN_TAN:
5483 CASE_CFN_TANH:
5484 if (el_mode == DFmode && n == 2)
5485 {
5486 bdecl = mathfn_built_in (double_type_node, fn);
5487 suffix = "d2"; /* pow -> powd2 */
5488 }
5489 else if (el_mode == SFmode && n == 4)
5490 {
5491 bdecl = mathfn_built_in (float_type_node, fn);
5492 suffix = "4"; /* powf -> powf4 */
5493 }
5494 else
5495 return NULL_TREE;
5496 if (!bdecl)
5497 return NULL_TREE;
5498 break;
5499
5500 default:
5501 return NULL_TREE;
5502 }
5503
5504 gcc_assert (suffix != NULL);
5505 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5506 if (!bname)
5507 return NULL_TREE;
5508
5509 strcpy (name, bname + sizeof ("__builtin_") - 1);
5510 strcat (name, suffix);
5511
5512 if (n_args == 1)
5513 fntype = build_function_type_list (type_out, type_in, NULL);
5514 else if (n_args == 2)
5515 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5516 else
5517 gcc_unreachable ();
5518
5519 /* Build a function declaration for the vectorized function. */
5520 new_fndecl = build_decl (BUILTINS_LOCATION,
5521 FUNCTION_DECL, get_identifier (name), fntype);
5522 TREE_PUBLIC (new_fndecl) = 1;
5523 DECL_EXTERNAL (new_fndecl) = 1;
5524 DECL_IS_NOVOPS (new_fndecl) = 1;
5525 TREE_READONLY (new_fndecl) = 1;
5526
5527 return new_fndecl;
5528 }
5529
5530 /* Returns a function decl for a vectorized version of the builtin function
5531 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5532 if it is not available. */
5533
5534 static tree
5535 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5536 tree type_in)
5537 {
5538 machine_mode in_mode, out_mode;
5539 int in_n, out_n;
5540
5541 if (TARGET_DEBUG_BUILTIN)
5542 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5543 combined_fn_name (combined_fn (fn)),
5544 GET_MODE_NAME (TYPE_MODE (type_out)),
5545 GET_MODE_NAME (TYPE_MODE (type_in)));
5546
5547 if (TREE_CODE (type_out) != VECTOR_TYPE
5548 || TREE_CODE (type_in) != VECTOR_TYPE)
5549 return NULL_TREE;
5550
5551 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5552 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5553 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5554 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5555
5556 switch (fn)
5557 {
5558 CASE_CFN_COPYSIGN:
5559 if (VECTOR_UNIT_VSX_P (V2DFmode)
5560 && out_mode == DFmode && out_n == 2
5561 && in_mode == DFmode && in_n == 2)
5562 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5563 if (VECTOR_UNIT_VSX_P (V4SFmode)
5564 && out_mode == SFmode && out_n == 4
5565 && in_mode == SFmode && in_n == 4)
5566 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5567 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5568 && out_mode == SFmode && out_n == 4
5569 && in_mode == SFmode && in_n == 4)
5570 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5571 break;
5572 CASE_CFN_CEIL:
5573 if (VECTOR_UNIT_VSX_P (V2DFmode)
5574 && out_mode == DFmode && out_n == 2
5575 && in_mode == DFmode && in_n == 2)
5576 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5577 if (VECTOR_UNIT_VSX_P (V4SFmode)
5578 && out_mode == SFmode && out_n == 4
5579 && in_mode == SFmode && in_n == 4)
5580 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5581 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5582 && out_mode == SFmode && out_n == 4
5583 && in_mode == SFmode && in_n == 4)
5584 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5585 break;
5586 CASE_CFN_FLOOR:
5587 if (VECTOR_UNIT_VSX_P (V2DFmode)
5588 && out_mode == DFmode && out_n == 2
5589 && in_mode == DFmode && in_n == 2)
5590 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5591 if (VECTOR_UNIT_VSX_P (V4SFmode)
5592 && out_mode == SFmode && out_n == 4
5593 && in_mode == SFmode && in_n == 4)
5594 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5595 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5596 && out_mode == SFmode && out_n == 4
5597 && in_mode == SFmode && in_n == 4)
5598 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5599 break;
5600 CASE_CFN_FMA:
5601 if (VECTOR_UNIT_VSX_P (V2DFmode)
5602 && out_mode == DFmode && out_n == 2
5603 && in_mode == DFmode && in_n == 2)
5604 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5605 if (VECTOR_UNIT_VSX_P (V4SFmode)
5606 && out_mode == SFmode && out_n == 4
5607 && in_mode == SFmode && in_n == 4)
5608 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5609 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5610 && out_mode == SFmode && out_n == 4
5611 && in_mode == SFmode && in_n == 4)
5612 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5613 break;
5614 CASE_CFN_TRUNC:
5615 if (VECTOR_UNIT_VSX_P (V2DFmode)
5616 && out_mode == DFmode && out_n == 2
5617 && in_mode == DFmode && in_n == 2)
5618 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5619 if (VECTOR_UNIT_VSX_P (V4SFmode)
5620 && out_mode == SFmode && out_n == 4
5621 && in_mode == SFmode && in_n == 4)
5622 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5623 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5624 && out_mode == SFmode && out_n == 4
5625 && in_mode == SFmode && in_n == 4)
5626 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5627 break;
5628 CASE_CFN_NEARBYINT:
5629 if (VECTOR_UNIT_VSX_P (V2DFmode)
5630 && flag_unsafe_math_optimizations
5631 && out_mode == DFmode && out_n == 2
5632 && in_mode == DFmode && in_n == 2)
5633 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5634 if (VECTOR_UNIT_VSX_P (V4SFmode)
5635 && flag_unsafe_math_optimizations
5636 && out_mode == SFmode && out_n == 4
5637 && in_mode == SFmode && in_n == 4)
5638 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5639 break;
5640 CASE_CFN_RINT:
5641 if (VECTOR_UNIT_VSX_P (V2DFmode)
5642 && !flag_trapping_math
5643 && out_mode == DFmode && out_n == 2
5644 && in_mode == DFmode && in_n == 2)
5645 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5646 if (VECTOR_UNIT_VSX_P (V4SFmode)
5647 && !flag_trapping_math
5648 && out_mode == SFmode && out_n == 4
5649 && in_mode == SFmode && in_n == 4)
5650 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5651 break;
5652 default:
5653 break;
5654 }
5655
5656 /* Generate calls to libmass if appropriate. */
5657 if (rs6000_veclib_handler)
5658 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5659
5660 return NULL_TREE;
5661 }
5662
5663 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5664
5665 static tree
5666 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5667 tree type_in)
5668 {
5669 machine_mode in_mode, out_mode;
5670 int in_n, out_n;
5671
5672 if (TARGET_DEBUG_BUILTIN)
5673 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5674 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5675 GET_MODE_NAME (TYPE_MODE (type_out)),
5676 GET_MODE_NAME (TYPE_MODE (type_in)));
5677
5678 if (TREE_CODE (type_out) != VECTOR_TYPE
5679 || TREE_CODE (type_in) != VECTOR_TYPE)
5680 return NULL_TREE;
5681
5682 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5683 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5684 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5685 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5686
5687 enum rs6000_builtins fn
5688 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5689 switch (fn)
5690 {
5691 case RS6000_BUILTIN_RSQRTF:
5692 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5693 && out_mode == SFmode && out_n == 4
5694 && in_mode == SFmode && in_n == 4)
5695 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5696 break;
5697 case RS6000_BUILTIN_RSQRT:
5698 if (VECTOR_UNIT_VSX_P (V2DFmode)
5699 && out_mode == DFmode && out_n == 2
5700 && in_mode == DFmode && in_n == 2)
5701 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5702 break;
5703 case RS6000_BUILTIN_RECIPF:
5704 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5705 && out_mode == SFmode && out_n == 4
5706 && in_mode == SFmode && in_n == 4)
5707 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5708 break;
5709 case RS6000_BUILTIN_RECIP:
5710 if (VECTOR_UNIT_VSX_P (V2DFmode)
5711 && out_mode == DFmode && out_n == 2
5712 && in_mode == DFmode && in_n == 2)
5713 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5714 break;
5715 default:
5716 break;
5717 }
5718 return NULL_TREE;
5719 }
5720 \f
5721 /* Default CPU string for rs6000*_file_start functions. */
5722 static const char *rs6000_default_cpu;
5723
5724 /* Do anything needed at the start of the asm file. */
5725
5726 static void
5727 rs6000_file_start (void)
5728 {
5729 char buffer[80];
5730 const char *start = buffer;
5731 FILE *file = asm_out_file;
5732
5733 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5734
5735 default_file_start ();
5736
5737 if (flag_verbose_asm)
5738 {
5739 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5740
5741 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5742 {
5743 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5744 start = "";
5745 }
5746
5747 if (global_options_set.x_rs6000_cpu_index)
5748 {
5749 fprintf (file, "%s -mcpu=%s", start,
5750 processor_target_table[rs6000_cpu_index].name);
5751 start = "";
5752 }
5753
5754 if (global_options_set.x_rs6000_tune_index)
5755 {
5756 fprintf (file, "%s -mtune=%s", start,
5757 processor_target_table[rs6000_tune_index].name);
5758 start = "";
5759 }
5760
5761 if (PPC405_ERRATUM77)
5762 {
5763 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5764 start = "";
5765 }
5766
5767 #ifdef USING_ELFOS_H
5768 switch (rs6000_sdata)
5769 {
5770 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5771 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5772 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5773 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5774 }
5775
5776 if (rs6000_sdata && g_switch_value)
5777 {
5778 fprintf (file, "%s -G %d", start,
5779 g_switch_value);
5780 start = "";
5781 }
5782 #endif
5783
5784 if (*start == '\0')
5785 putc ('\n', file);
5786 }
5787
5788 #ifdef USING_ELFOS_H
5789 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5790 && !global_options_set.x_rs6000_cpu_index)
5791 {
5792 fputs ("\t.machine ", asm_out_file);
5793 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5794 fputs ("power9\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5796 fputs ("power8\n", asm_out_file);
5797 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5798 fputs ("power7\n", asm_out_file);
5799 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5800 fputs ("power6\n", asm_out_file);
5801 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5802 fputs ("power5\n", asm_out_file);
5803 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5804 fputs ("power4\n", asm_out_file);
5805 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5806 fputs ("ppc64\n", asm_out_file);
5807 else
5808 fputs ("ppc\n", asm_out_file);
5809 }
5810 #endif
5811
5812 if (DEFAULT_ABI == ABI_ELFv2)
5813 fprintf (file, "\t.abiversion 2\n");
5814 }
5815
5816 \f
5817 /* Return nonzero if this function is known to have a null epilogue. */
5818
5819 int
5820 direct_return (void)
5821 {
5822 if (reload_completed)
5823 {
5824 rs6000_stack_t *info = rs6000_stack_info ();
5825
5826 if (info->first_gp_reg_save == 32
5827 && info->first_fp_reg_save == 64
5828 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5829 && ! info->lr_save_p
5830 && ! info->cr_save_p
5831 && info->vrsave_size == 0
5832 && ! info->push_p)
5833 return 1;
5834 }
5835
5836 return 0;
5837 }
5838
5839 /* Helper for num_insns_constant. Calculate number of instructions to
5840 load VALUE to a single gpr using combinations of addi, addis, ori,
5841 oris and sldi instructions. */
5842
5843 static int
5844 num_insns_constant_gpr (HOST_WIDE_INT value)
5845 {
5846 /* signed constant loadable with addi */
5847 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5848 return 1;
5849
5850 /* constant loadable with addis */
5851 else if ((value & 0xffff) == 0
5852 && (value >> 31 == -1 || value >> 31 == 0))
5853 return 1;
5854
5855 else if (TARGET_POWERPC64)
5856 {
5857 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5858 HOST_WIDE_INT high = value >> 31;
5859
5860 if (high == 0 || high == -1)
5861 return 2;
5862
5863 high >>= 1;
5864
5865 if (low == 0)
5866 return num_insns_constant_gpr (high) + 1;
5867 else if (high == 0)
5868 return num_insns_constant_gpr (low) + 1;
5869 else
5870 return (num_insns_constant_gpr (high)
5871 + num_insns_constant_gpr (low) + 1);
5872 }
5873
5874 else
5875 return 2;
5876 }
5877
5878 /* Helper for num_insns_constant. Allow constants formed by the
5879 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5880 and handle modes that require multiple gprs. */
5881
5882 static int
5883 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5884 {
5885 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5886 int total = 0;
5887 while (nregs-- > 0)
5888 {
5889 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5890 int insns = num_insns_constant_gpr (low);
5891 if (insns > 2
5892 /* We won't get more than 2 from num_insns_constant_gpr
5893 except when TARGET_POWERPC64 and mode is DImode or
5894 wider, so the register mode must be DImode. */
5895 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5896 insns = 2;
5897 total += insns;
5898 value >>= BITS_PER_WORD;
5899 }
5900 return total;
5901 }
5902
5903 /* Return the number of instructions it takes to form a constant in as
5904 many gprs are needed for MODE. */
5905
5906 int
5907 num_insns_constant (rtx op, machine_mode mode)
5908 {
5909 HOST_WIDE_INT val;
5910
5911 switch (GET_CODE (op))
5912 {
5913 case CONST_INT:
5914 val = INTVAL (op);
5915 break;
5916
5917 case CONST_WIDE_INT:
5918 {
5919 int insns = 0;
5920 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5921 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5922 DImode);
5923 return insns;
5924 }
5925
5926 case CONST_DOUBLE:
5927 {
5928 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5929
5930 if (mode == SFmode || mode == SDmode)
5931 {
5932 long l;
5933
5934 if (mode == SDmode)
5935 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5936 else
5937 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5938 /* See the first define_split in rs6000.md handling a
5939 const_double_operand. */
5940 val = l;
5941 mode = SImode;
5942 }
5943 else if (mode == DFmode || mode == DDmode)
5944 {
5945 long l[2];
5946
5947 if (mode == DDmode)
5948 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5949 else
5950 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5951
5952 /* See the second (32-bit) and third (64-bit) define_split
5953 in rs6000.md handling a const_double_operand. */
5954 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5955 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5956 mode = DImode;
5957 }
5958 else if (mode == TFmode || mode == TDmode
5959 || mode == KFmode || mode == IFmode)
5960 {
5961 long l[4];
5962 int insns;
5963
5964 if (mode == TDmode)
5965 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5966 else
5967 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5968
5969 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5970 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5971 insns = num_insns_constant_multi (val, DImode);
5972 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5973 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5974 insns += num_insns_constant_multi (val, DImode);
5975 return insns;
5976 }
5977 else
5978 gcc_unreachable ();
5979 }
5980 break;
5981
5982 default:
5983 gcc_unreachable ();
5984 }
5985
5986 return num_insns_constant_multi (val, mode);
5987 }
5988
5989 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5990 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5991 corresponding element of the vector, but for V4SFmode, the
5992 corresponding "float" is interpreted as an SImode integer. */
5993
5994 HOST_WIDE_INT
5995 const_vector_elt_as_int (rtx op, unsigned int elt)
5996 {
5997 rtx tmp;
5998
5999 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6000 gcc_assert (GET_MODE (op) != V2DImode
6001 && GET_MODE (op) != V2DFmode);
6002
6003 tmp = CONST_VECTOR_ELT (op, elt);
6004 if (GET_MODE (op) == V4SFmode)
6005 tmp = gen_lowpart (SImode, tmp);
6006 return INTVAL (tmp);
6007 }
6008
6009 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6010 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6011 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6012 all items are set to the same value and contain COPIES replicas of the
6013 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6014 operand and the others are set to the value of the operand's msb. */
6015
6016 static bool
6017 vspltis_constant (rtx op, unsigned step, unsigned copies)
6018 {
6019 machine_mode mode = GET_MODE (op);
6020 machine_mode inner = GET_MODE_INNER (mode);
6021
6022 unsigned i;
6023 unsigned nunits;
6024 unsigned bitsize;
6025 unsigned mask;
6026
6027 HOST_WIDE_INT val;
6028 HOST_WIDE_INT splat_val;
6029 HOST_WIDE_INT msb_val;
6030
6031 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6032 return false;
6033
6034 nunits = GET_MODE_NUNITS (mode);
6035 bitsize = GET_MODE_BITSIZE (inner);
6036 mask = GET_MODE_MASK (inner);
6037
6038 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6039 splat_val = val;
6040 msb_val = val >= 0 ? 0 : -1;
6041
6042 /* Construct the value to be splatted, if possible. If not, return 0. */
6043 for (i = 2; i <= copies; i *= 2)
6044 {
6045 HOST_WIDE_INT small_val;
6046 bitsize /= 2;
6047 small_val = splat_val >> bitsize;
6048 mask >>= bitsize;
6049 if (splat_val != ((HOST_WIDE_INT)
6050 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6051 | (small_val & mask)))
6052 return false;
6053 splat_val = small_val;
6054 }
6055
6056 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6057 if (EASY_VECTOR_15 (splat_val))
6058 ;
6059
6060 /* Also check if we can splat, and then add the result to itself. Do so if
6061 the value is positive, of if the splat instruction is using OP's mode;
6062 for splat_val < 0, the splat and the add should use the same mode. */
6063 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6064 && (splat_val >= 0 || (step == 1 && copies == 1)))
6065 ;
6066
6067 /* Also check if are loading up the most significant bit which can be done by
6068 loading up -1 and shifting the value left by -1. */
6069 else if (EASY_VECTOR_MSB (splat_val, inner))
6070 ;
6071
6072 else
6073 return false;
6074
6075 /* Check if VAL is present in every STEP-th element, and the
6076 other elements are filled with its most significant bit. */
6077 for (i = 1; i < nunits; ++i)
6078 {
6079 HOST_WIDE_INT desired_val;
6080 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6081 if ((i & (step - 1)) == 0)
6082 desired_val = val;
6083 else
6084 desired_val = msb_val;
6085
6086 if (desired_val != const_vector_elt_as_int (op, elt))
6087 return false;
6088 }
6089
6090 return true;
6091 }
6092
6093 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6094 instruction, filling in the bottom elements with 0 or -1.
6095
6096 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6097 for the number of zeroes to shift in, or negative for the number of 0xff
6098 bytes to shift in.
6099
6100 OP is a CONST_VECTOR. */
6101
6102 int
6103 vspltis_shifted (rtx op)
6104 {
6105 machine_mode mode = GET_MODE (op);
6106 machine_mode inner = GET_MODE_INNER (mode);
6107
6108 unsigned i, j;
6109 unsigned nunits;
6110 unsigned mask;
6111
6112 HOST_WIDE_INT val;
6113
6114 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6115 return false;
6116
6117 /* We need to create pseudo registers to do the shift, so don't recognize
6118 shift vector constants after reload. */
6119 if (!can_create_pseudo_p ())
6120 return false;
6121
6122 nunits = GET_MODE_NUNITS (mode);
6123 mask = GET_MODE_MASK (inner);
6124
6125 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6126
6127 /* Check if the value can really be the operand of a vspltis[bhw]. */
6128 if (EASY_VECTOR_15 (val))
6129 ;
6130
6131 /* Also check if we are loading up the most significant bit which can be done
6132 by loading up -1 and shifting the value left by -1. */
6133 else if (EASY_VECTOR_MSB (val, inner))
6134 ;
6135
6136 else
6137 return 0;
6138
6139 /* Check if VAL is present in every STEP-th element until we find elements
6140 that are 0 or all 1 bits. */
6141 for (i = 1; i < nunits; ++i)
6142 {
6143 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6144 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6145
6146 /* If the value isn't the splat value, check for the remaining elements
6147 being 0/-1. */
6148 if (val != elt_val)
6149 {
6150 if (elt_val == 0)
6151 {
6152 for (j = i+1; j < nunits; ++j)
6153 {
6154 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6155 if (const_vector_elt_as_int (op, elt2) != 0)
6156 return 0;
6157 }
6158
6159 return (nunits - i) * GET_MODE_SIZE (inner);
6160 }
6161
6162 else if ((elt_val & mask) == mask)
6163 {
6164 for (j = i+1; j < nunits; ++j)
6165 {
6166 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6167 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6168 return 0;
6169 }
6170
6171 return -((nunits - i) * GET_MODE_SIZE (inner));
6172 }
6173
6174 else
6175 return 0;
6176 }
6177 }
6178
6179 /* If all elements are equal, we don't need to do VLSDOI. */
6180 return 0;
6181 }
6182
6183
6184 /* Return true if OP is of the given MODE and can be synthesized
6185 with a vspltisb, vspltish or vspltisw. */
6186
6187 bool
6188 easy_altivec_constant (rtx op, machine_mode mode)
6189 {
6190 unsigned step, copies;
6191
6192 if (mode == VOIDmode)
6193 mode = GET_MODE (op);
6194 else if (mode != GET_MODE (op))
6195 return false;
6196
6197 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6198 constants. */
6199 if (mode == V2DFmode)
6200 return zero_constant (op, mode);
6201
6202 else if (mode == V2DImode)
6203 {
6204 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6205 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6206 return false;
6207
6208 if (zero_constant (op, mode))
6209 return true;
6210
6211 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6212 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6213 return true;
6214
6215 return false;
6216 }
6217
6218 /* V1TImode is a special container for TImode. Ignore for now. */
6219 else if (mode == V1TImode)
6220 return false;
6221
6222 /* Start with a vspltisw. */
6223 step = GET_MODE_NUNITS (mode) / 4;
6224 copies = 1;
6225
6226 if (vspltis_constant (op, step, copies))
6227 return true;
6228
6229 /* Then try with a vspltish. */
6230 if (step == 1)
6231 copies <<= 1;
6232 else
6233 step >>= 1;
6234
6235 if (vspltis_constant (op, step, copies))
6236 return true;
6237
6238 /* And finally a vspltisb. */
6239 if (step == 1)
6240 copies <<= 1;
6241 else
6242 step >>= 1;
6243
6244 if (vspltis_constant (op, step, copies))
6245 return true;
6246
6247 if (vspltis_shifted (op) != 0)
6248 return true;
6249
6250 return false;
6251 }
6252
6253 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6254 result is OP. Abort if it is not possible. */
6255
6256 rtx
6257 gen_easy_altivec_constant (rtx op)
6258 {
6259 machine_mode mode = GET_MODE (op);
6260 int nunits = GET_MODE_NUNITS (mode);
6261 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6262 unsigned step = nunits / 4;
6263 unsigned copies = 1;
6264
6265 /* Start with a vspltisw. */
6266 if (vspltis_constant (op, step, copies))
6267 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6268
6269 /* Then try with a vspltish. */
6270 if (step == 1)
6271 copies <<= 1;
6272 else
6273 step >>= 1;
6274
6275 if (vspltis_constant (op, step, copies))
6276 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6277
6278 /* And finally a vspltisb. */
6279 if (step == 1)
6280 copies <<= 1;
6281 else
6282 step >>= 1;
6283
6284 if (vspltis_constant (op, step, copies))
6285 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6286
6287 gcc_unreachable ();
6288 }
6289
6290 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6291 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6292
6293 Return the number of instructions needed (1 or 2) into the address pointed
6294 via NUM_INSNS_PTR.
6295
6296 Return the constant that is being split via CONSTANT_PTR. */
6297
6298 bool
6299 xxspltib_constant_p (rtx op,
6300 machine_mode mode,
6301 int *num_insns_ptr,
6302 int *constant_ptr)
6303 {
6304 size_t nunits = GET_MODE_NUNITS (mode);
6305 size_t i;
6306 HOST_WIDE_INT value;
6307 rtx element;
6308
6309 /* Set the returned values to out of bound values. */
6310 *num_insns_ptr = -1;
6311 *constant_ptr = 256;
6312
6313 if (!TARGET_P9_VECTOR)
6314 return false;
6315
6316 if (mode == VOIDmode)
6317 mode = GET_MODE (op);
6318
6319 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6320 return false;
6321
6322 /* Handle (vec_duplicate <constant>). */
6323 if (GET_CODE (op) == VEC_DUPLICATE)
6324 {
6325 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6326 && mode != V2DImode)
6327 return false;
6328
6329 element = XEXP (op, 0);
6330 if (!CONST_INT_P (element))
6331 return false;
6332
6333 value = INTVAL (element);
6334 if (!IN_RANGE (value, -128, 127))
6335 return false;
6336 }
6337
6338 /* Handle (const_vector [...]). */
6339 else if (GET_CODE (op) == CONST_VECTOR)
6340 {
6341 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6342 && mode != V2DImode)
6343 return false;
6344
6345 element = CONST_VECTOR_ELT (op, 0);
6346 if (!CONST_INT_P (element))
6347 return false;
6348
6349 value = INTVAL (element);
6350 if (!IN_RANGE (value, -128, 127))
6351 return false;
6352
6353 for (i = 1; i < nunits; i++)
6354 {
6355 element = CONST_VECTOR_ELT (op, i);
6356 if (!CONST_INT_P (element))
6357 return false;
6358
6359 if (value != INTVAL (element))
6360 return false;
6361 }
6362 }
6363
6364 /* Handle integer constants being loaded into the upper part of the VSX
6365 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6366 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6367 else if (CONST_INT_P (op))
6368 {
6369 if (!SCALAR_INT_MODE_P (mode))
6370 return false;
6371
6372 value = INTVAL (op);
6373 if (!IN_RANGE (value, -128, 127))
6374 return false;
6375
6376 if (!IN_RANGE (value, -1, 0))
6377 {
6378 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6379 return false;
6380
6381 if (EASY_VECTOR_15 (value))
6382 return false;
6383 }
6384 }
6385
6386 else
6387 return false;
6388
6389 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6390 sign extend. Special case 0/-1 to allow getting any VSX register instead
6391 of an Altivec register. */
6392 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6393 && EASY_VECTOR_15 (value))
6394 return false;
6395
6396 /* Return # of instructions and the constant byte for XXSPLTIB. */
6397 if (mode == V16QImode)
6398 *num_insns_ptr = 1;
6399
6400 else if (IN_RANGE (value, -1, 0))
6401 *num_insns_ptr = 1;
6402
6403 else
6404 *num_insns_ptr = 2;
6405
6406 *constant_ptr = (int) value;
6407 return true;
6408 }
6409
6410 const char *
6411 output_vec_const_move (rtx *operands)
6412 {
6413 int shift;
6414 machine_mode mode;
6415 rtx dest, vec;
6416
6417 dest = operands[0];
6418 vec = operands[1];
6419 mode = GET_MODE (dest);
6420
6421 if (TARGET_VSX)
6422 {
6423 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6424 int xxspltib_value = 256;
6425 int num_insns = -1;
6426
6427 if (zero_constant (vec, mode))
6428 {
6429 if (TARGET_P9_VECTOR)
6430 return "xxspltib %x0,0";
6431
6432 else if (dest_vmx_p)
6433 return "vspltisw %0,0";
6434
6435 else
6436 return "xxlxor %x0,%x0,%x0";
6437 }
6438
6439 if (all_ones_constant (vec, mode))
6440 {
6441 if (TARGET_P9_VECTOR)
6442 return "xxspltib %x0,255";
6443
6444 else if (dest_vmx_p)
6445 return "vspltisw %0,-1";
6446
6447 else if (TARGET_P8_VECTOR)
6448 return "xxlorc %x0,%x0,%x0";
6449
6450 else
6451 gcc_unreachable ();
6452 }
6453
6454 if (TARGET_P9_VECTOR
6455 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6456 {
6457 if (num_insns == 1)
6458 {
6459 operands[2] = GEN_INT (xxspltib_value & 0xff);
6460 return "xxspltib %x0,%2";
6461 }
6462
6463 return "#";
6464 }
6465 }
6466
6467 if (TARGET_ALTIVEC)
6468 {
6469 rtx splat_vec;
6470
6471 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6472 if (zero_constant (vec, mode))
6473 return "vspltisw %0,0";
6474
6475 if (all_ones_constant (vec, mode))
6476 return "vspltisw %0,-1";
6477
6478 /* Do we need to construct a value using VSLDOI? */
6479 shift = vspltis_shifted (vec);
6480 if (shift != 0)
6481 return "#";
6482
6483 splat_vec = gen_easy_altivec_constant (vec);
6484 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6485 operands[1] = XEXP (splat_vec, 0);
6486 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6487 return "#";
6488
6489 switch (GET_MODE (splat_vec))
6490 {
6491 case E_V4SImode:
6492 return "vspltisw %0,%1";
6493
6494 case E_V8HImode:
6495 return "vspltish %0,%1";
6496
6497 case E_V16QImode:
6498 return "vspltisb %0,%1";
6499
6500 default:
6501 gcc_unreachable ();
6502 }
6503 }
6504
6505 gcc_unreachable ();
6506 }
6507
6508 /* Initialize vector TARGET to VALS. */
6509
6510 void
6511 rs6000_expand_vector_init (rtx target, rtx vals)
6512 {
6513 machine_mode mode = GET_MODE (target);
6514 machine_mode inner_mode = GET_MODE_INNER (mode);
6515 int n_elts = GET_MODE_NUNITS (mode);
6516 int n_var = 0, one_var = -1;
6517 bool all_same = true, all_const_zero = true;
6518 rtx x, mem;
6519 int i;
6520
6521 for (i = 0; i < n_elts; ++i)
6522 {
6523 x = XVECEXP (vals, 0, i);
6524 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6525 ++n_var, one_var = i;
6526 else if (x != CONST0_RTX (inner_mode))
6527 all_const_zero = false;
6528
6529 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6530 all_same = false;
6531 }
6532
6533 if (n_var == 0)
6534 {
6535 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6536 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6537 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6538 {
6539 /* Zero register. */
6540 emit_move_insn (target, CONST0_RTX (mode));
6541 return;
6542 }
6543 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6544 {
6545 /* Splat immediate. */
6546 emit_insn (gen_rtx_SET (target, const_vec));
6547 return;
6548 }
6549 else
6550 {
6551 /* Load from constant pool. */
6552 emit_move_insn (target, const_vec);
6553 return;
6554 }
6555 }
6556
6557 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6558 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6559 {
6560 rtx op[2];
6561 size_t i;
6562 size_t num_elements = all_same ? 1 : 2;
6563 for (i = 0; i < num_elements; i++)
6564 {
6565 op[i] = XVECEXP (vals, 0, i);
6566 /* Just in case there is a SUBREG with a smaller mode, do a
6567 conversion. */
6568 if (GET_MODE (op[i]) != inner_mode)
6569 {
6570 rtx tmp = gen_reg_rtx (inner_mode);
6571 convert_move (tmp, op[i], 0);
6572 op[i] = tmp;
6573 }
6574 /* Allow load with splat double word. */
6575 else if (MEM_P (op[i]))
6576 {
6577 if (!all_same)
6578 op[i] = force_reg (inner_mode, op[i]);
6579 }
6580 else if (!REG_P (op[i]))
6581 op[i] = force_reg (inner_mode, op[i]);
6582 }
6583
6584 if (all_same)
6585 {
6586 if (mode == V2DFmode)
6587 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6588 else
6589 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6590 }
6591 else
6592 {
6593 if (mode == V2DFmode)
6594 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6595 else
6596 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6597 }
6598 return;
6599 }
6600
6601 /* Special case initializing vector int if we are on 64-bit systems with
6602 direct move or we have the ISA 3.0 instructions. */
6603 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6604 && TARGET_DIRECT_MOVE_64BIT)
6605 {
6606 if (all_same)
6607 {
6608 rtx element0 = XVECEXP (vals, 0, 0);
6609 if (MEM_P (element0))
6610 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6611 else
6612 element0 = force_reg (SImode, element0);
6613
6614 if (TARGET_P9_VECTOR)
6615 emit_insn (gen_vsx_splat_v4si (target, element0));
6616 else
6617 {
6618 rtx tmp = gen_reg_rtx (DImode);
6619 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6620 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6621 }
6622 return;
6623 }
6624 else
6625 {
6626 rtx elements[4];
6627 size_t i;
6628
6629 for (i = 0; i < 4; i++)
6630 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6631
6632 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6633 elements[2], elements[3]));
6634 return;
6635 }
6636 }
6637
6638 /* With single precision floating point on VSX, know that internally single
6639 precision is actually represented as a double, and either make 2 V2DF
6640 vectors, and convert these vectors to single precision, or do one
6641 conversion, and splat the result to the other elements. */
6642 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6643 {
6644 if (all_same)
6645 {
6646 rtx element0 = XVECEXP (vals, 0, 0);
6647
6648 if (TARGET_P9_VECTOR)
6649 {
6650 if (MEM_P (element0))
6651 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6652
6653 emit_insn (gen_vsx_splat_v4sf (target, element0));
6654 }
6655
6656 else
6657 {
6658 rtx freg = gen_reg_rtx (V4SFmode);
6659 rtx sreg = force_reg (SFmode, element0);
6660 rtx cvt = (TARGET_XSCVDPSPN
6661 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6662 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6663
6664 emit_insn (cvt);
6665 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6666 const0_rtx));
6667 }
6668 }
6669 else
6670 {
6671 rtx dbl_even = gen_reg_rtx (V2DFmode);
6672 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6673 rtx flt_even = gen_reg_rtx (V4SFmode);
6674 rtx flt_odd = gen_reg_rtx (V4SFmode);
6675 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6676 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6677 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6678 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6679
6680 /* Use VMRGEW if we can instead of doing a permute. */
6681 if (TARGET_P8_VECTOR)
6682 {
6683 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6684 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6685 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6686 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6687 if (BYTES_BIG_ENDIAN)
6688 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6689 else
6690 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6691 }
6692 else
6693 {
6694 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6695 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6696 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6697 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6698 rs6000_expand_extract_even (target, flt_even, flt_odd);
6699 }
6700 }
6701 return;
6702 }
6703
6704 /* Special case initializing vector short/char that are splats if we are on
6705 64-bit systems with direct move. */
6706 if (all_same && TARGET_DIRECT_MOVE_64BIT
6707 && (mode == V16QImode || mode == V8HImode))
6708 {
6709 rtx op0 = XVECEXP (vals, 0, 0);
6710 rtx di_tmp = gen_reg_rtx (DImode);
6711
6712 if (!REG_P (op0))
6713 op0 = force_reg (GET_MODE_INNER (mode), op0);
6714
6715 if (mode == V16QImode)
6716 {
6717 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6718 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6719 return;
6720 }
6721
6722 if (mode == V8HImode)
6723 {
6724 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6725 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6726 return;
6727 }
6728 }
6729
6730 /* Store value to stack temp. Load vector element. Splat. However, splat
6731 of 64-bit items is not supported on Altivec. */
6732 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6733 {
6734 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6735 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6736 XVECEXP (vals, 0, 0));
6737 x = gen_rtx_UNSPEC (VOIDmode,
6738 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6739 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6740 gen_rtvec (2,
6741 gen_rtx_SET (target, mem),
6742 x)));
6743 x = gen_rtx_VEC_SELECT (inner_mode, target,
6744 gen_rtx_PARALLEL (VOIDmode,
6745 gen_rtvec (1, const0_rtx)));
6746 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6747 return;
6748 }
6749
6750 /* One field is non-constant. Load constant then overwrite
6751 varying field. */
6752 if (n_var == 1)
6753 {
6754 rtx copy = copy_rtx (vals);
6755
6756 /* Load constant part of vector, substitute neighboring value for
6757 varying element. */
6758 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6759 rs6000_expand_vector_init (target, copy);
6760
6761 /* Insert variable. */
6762 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6763 return;
6764 }
6765
6766 /* Construct the vector in memory one field at a time
6767 and load the whole vector. */
6768 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6769 for (i = 0; i < n_elts; i++)
6770 emit_move_insn (adjust_address_nv (mem, inner_mode,
6771 i * GET_MODE_SIZE (inner_mode)),
6772 XVECEXP (vals, 0, i));
6773 emit_move_insn (target, mem);
6774 }
6775
6776 /* Set field ELT of TARGET to VAL. */
6777
6778 void
6779 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6780 {
6781 machine_mode mode = GET_MODE (target);
6782 machine_mode inner_mode = GET_MODE_INNER (mode);
6783 rtx reg = gen_reg_rtx (mode);
6784 rtx mask, mem, x;
6785 int width = GET_MODE_SIZE (inner_mode);
6786 int i;
6787
6788 val = force_reg (GET_MODE (val), val);
6789
6790 if (VECTOR_MEM_VSX_P (mode))
6791 {
6792 rtx insn = NULL_RTX;
6793 rtx elt_rtx = GEN_INT (elt);
6794
6795 if (mode == V2DFmode)
6796 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6797
6798 else if (mode == V2DImode)
6799 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6800
6801 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6802 {
6803 if (mode == V4SImode)
6804 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6805 else if (mode == V8HImode)
6806 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6807 else if (mode == V16QImode)
6808 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6809 else if (mode == V4SFmode)
6810 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6811 }
6812
6813 if (insn)
6814 {
6815 emit_insn (insn);
6816 return;
6817 }
6818 }
6819
6820 /* Simplify setting single element vectors like V1TImode. */
6821 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6822 {
6823 emit_move_insn (target, gen_lowpart (mode, val));
6824 return;
6825 }
6826
6827 /* Load single variable value. */
6828 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6829 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6830 x = gen_rtx_UNSPEC (VOIDmode,
6831 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6832 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6833 gen_rtvec (2,
6834 gen_rtx_SET (reg, mem),
6835 x)));
6836
6837 /* Linear sequence. */
6838 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6839 for (i = 0; i < 16; ++i)
6840 XVECEXP (mask, 0, i) = GEN_INT (i);
6841
6842 /* Set permute mask to insert element into target. */
6843 for (i = 0; i < width; ++i)
6844 XVECEXP (mask, 0, elt*width + i)
6845 = GEN_INT (i + 0x10);
6846 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6847
6848 if (BYTES_BIG_ENDIAN)
6849 x = gen_rtx_UNSPEC (mode,
6850 gen_rtvec (3, target, reg,
6851 force_reg (V16QImode, x)),
6852 UNSPEC_VPERM);
6853 else
6854 {
6855 if (TARGET_P9_VECTOR)
6856 x = gen_rtx_UNSPEC (mode,
6857 gen_rtvec (3, reg, target,
6858 force_reg (V16QImode, x)),
6859 UNSPEC_VPERMR);
6860 else
6861 {
6862 /* Invert selector. We prefer to generate VNAND on P8 so
6863 that future fusion opportunities can kick in, but must
6864 generate VNOR elsewhere. */
6865 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6866 rtx iorx = (TARGET_P8_VECTOR
6867 ? gen_rtx_IOR (V16QImode, notx, notx)
6868 : gen_rtx_AND (V16QImode, notx, notx));
6869 rtx tmp = gen_reg_rtx (V16QImode);
6870 emit_insn (gen_rtx_SET (tmp, iorx));
6871
6872 /* Permute with operands reversed and adjusted selector. */
6873 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6874 UNSPEC_VPERM);
6875 }
6876 }
6877
6878 emit_insn (gen_rtx_SET (target, x));
6879 }
6880
6881 /* Extract field ELT from VEC into TARGET. */
6882
6883 void
6884 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6885 {
6886 machine_mode mode = GET_MODE (vec);
6887 machine_mode inner_mode = GET_MODE_INNER (mode);
6888 rtx mem;
6889
6890 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6891 {
6892 switch (mode)
6893 {
6894 default:
6895 break;
6896 case E_V1TImode:
6897 emit_move_insn (target, gen_lowpart (TImode, vec));
6898 break;
6899 case E_V2DFmode:
6900 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6901 return;
6902 case E_V2DImode:
6903 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6904 return;
6905 case E_V4SFmode:
6906 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6907 return;
6908 case E_V16QImode:
6909 if (TARGET_DIRECT_MOVE_64BIT)
6910 {
6911 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6912 return;
6913 }
6914 else
6915 break;
6916 case E_V8HImode:
6917 if (TARGET_DIRECT_MOVE_64BIT)
6918 {
6919 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6920 return;
6921 }
6922 else
6923 break;
6924 case E_V4SImode:
6925 if (TARGET_DIRECT_MOVE_64BIT)
6926 {
6927 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6928 return;
6929 }
6930 break;
6931 }
6932 }
6933 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6934 && TARGET_DIRECT_MOVE_64BIT)
6935 {
6936 if (GET_MODE (elt) != DImode)
6937 {
6938 rtx tmp = gen_reg_rtx (DImode);
6939 convert_move (tmp, elt, 0);
6940 elt = tmp;
6941 }
6942 else if (!REG_P (elt))
6943 elt = force_reg (DImode, elt);
6944
6945 switch (mode)
6946 {
6947 case E_V1TImode:
6948 emit_move_insn (target, gen_lowpart (TImode, vec));
6949 return;
6950
6951 case E_V2DFmode:
6952 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6953 return;
6954
6955 case E_V2DImode:
6956 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6957 return;
6958
6959 case E_V4SFmode:
6960 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6961 return;
6962
6963 case E_V4SImode:
6964 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6965 return;
6966
6967 case E_V8HImode:
6968 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6969 return;
6970
6971 case E_V16QImode:
6972 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6973 return;
6974
6975 default:
6976 gcc_unreachable ();
6977 }
6978 }
6979
6980 /* Allocate mode-sized buffer. */
6981 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6982
6983 emit_move_insn (mem, vec);
6984 if (CONST_INT_P (elt))
6985 {
6986 int modulo_elt = INTVAL (elt) % GET_MODE_NUNITS (mode);
6987
6988 /* Add offset to field within buffer matching vector element. */
6989 mem = adjust_address_nv (mem, inner_mode,
6990 modulo_elt * GET_MODE_SIZE (inner_mode));
6991 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6992 }
6993 else
6994 {
6995 unsigned int ele_size = GET_MODE_SIZE (inner_mode);
6996 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
6997 rtx new_addr = gen_reg_rtx (Pmode);
6998
6999 elt = gen_rtx_AND (Pmode, elt, num_ele_m1);
7000 if (ele_size > 1)
7001 elt = gen_rtx_MULT (Pmode, elt, GEN_INT (ele_size));
7002 new_addr = gen_rtx_PLUS (Pmode, XEXP (mem, 0), elt);
7003 new_addr = change_address (mem, inner_mode, new_addr);
7004 emit_move_insn (target, new_addr);
7005 }
7006 }
7007
7008 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7009 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7010 temporary (BASE_TMP) to fixup the address. Return the new memory address
7011 that is valid for reads or writes to a given register (SCALAR_REG). */
7012
7013 rtx
7014 rs6000_adjust_vec_address (rtx scalar_reg,
7015 rtx mem,
7016 rtx element,
7017 rtx base_tmp,
7018 machine_mode scalar_mode)
7019 {
7020 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7021 rtx addr = XEXP (mem, 0);
7022 rtx element_offset;
7023 rtx new_addr;
7024 bool valid_addr_p;
7025
7026 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7027 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7028
7029 /* Calculate what we need to add to the address to get the element
7030 address. */
7031 if (CONST_INT_P (element))
7032 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7033 else
7034 {
7035 int byte_shift = exact_log2 (scalar_size);
7036 gcc_assert (byte_shift >= 0);
7037
7038 if (byte_shift == 0)
7039 element_offset = element;
7040
7041 else
7042 {
7043 if (TARGET_POWERPC64)
7044 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7045 else
7046 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7047
7048 element_offset = base_tmp;
7049 }
7050 }
7051
7052 /* Create the new address pointing to the element within the vector. If we
7053 are adding 0, we don't have to change the address. */
7054 if (element_offset == const0_rtx)
7055 new_addr = addr;
7056
7057 /* A simple indirect address can be converted into a reg + offset
7058 address. */
7059 else if (REG_P (addr) || SUBREG_P (addr))
7060 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7061
7062 /* Optimize D-FORM addresses with constant offset with a constant element, to
7063 include the element offset in the address directly. */
7064 else if (GET_CODE (addr) == PLUS)
7065 {
7066 rtx op0 = XEXP (addr, 0);
7067 rtx op1 = XEXP (addr, 1);
7068 rtx insn;
7069
7070 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7071 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7072 {
7073 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7074 rtx offset_rtx = GEN_INT (offset);
7075
7076 if (IN_RANGE (offset, -32768, 32767)
7077 && (scalar_size < 8 || (offset & 0x3) == 0))
7078 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7079 else
7080 {
7081 emit_move_insn (base_tmp, offset_rtx);
7082 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7083 }
7084 }
7085 else
7086 {
7087 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7088 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7089
7090 /* Note, ADDI requires the register being added to be a base
7091 register. If the register was R0, load it up into the temporary
7092 and do the add. */
7093 if (op1_reg_p
7094 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7095 {
7096 insn = gen_add3_insn (base_tmp, op1, element_offset);
7097 gcc_assert (insn != NULL_RTX);
7098 emit_insn (insn);
7099 }
7100
7101 else if (ele_reg_p
7102 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7103 {
7104 insn = gen_add3_insn (base_tmp, element_offset, op1);
7105 gcc_assert (insn != NULL_RTX);
7106 emit_insn (insn);
7107 }
7108
7109 else
7110 {
7111 emit_move_insn (base_tmp, op1);
7112 emit_insn (gen_add2_insn (base_tmp, element_offset));
7113 }
7114
7115 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7116 }
7117 }
7118
7119 else
7120 {
7121 emit_move_insn (base_tmp, addr);
7122 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7123 }
7124
7125 /* If we have a PLUS, we need to see whether the particular register class
7126 allows for D-FORM or X-FORM addressing. */
7127 if (GET_CODE (new_addr) == PLUS)
7128 {
7129 rtx op1 = XEXP (new_addr, 1);
7130 addr_mask_type addr_mask;
7131 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
7132
7133 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
7134 if (INT_REGNO_P (scalar_regno))
7135 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7136
7137 else if (FP_REGNO_P (scalar_regno))
7138 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7139
7140 else if (ALTIVEC_REGNO_P (scalar_regno))
7141 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7142
7143 else
7144 gcc_unreachable ();
7145
7146 if (REG_P (op1) || SUBREG_P (op1))
7147 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7148 else
7149 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7150 }
7151
7152 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7153 valid_addr_p = true;
7154
7155 else
7156 valid_addr_p = false;
7157
7158 if (!valid_addr_p)
7159 {
7160 emit_move_insn (base_tmp, new_addr);
7161 new_addr = base_tmp;
7162 }
7163
7164 return change_address (mem, scalar_mode, new_addr);
7165 }
7166
7167 /* Split a variable vec_extract operation into the component instructions. */
7168
7169 void
7170 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7171 rtx tmp_altivec)
7172 {
7173 machine_mode mode = GET_MODE (src);
7174 machine_mode scalar_mode = GET_MODE_INNER (GET_MODE (src));
7175 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7176 int byte_shift = exact_log2 (scalar_size);
7177
7178 gcc_assert (byte_shift >= 0);
7179
7180 /* If we are given a memory address, optimize to load just the element. We
7181 don't have to adjust the vector element number on little endian
7182 systems. */
7183 if (MEM_P (src))
7184 {
7185 int num_elements = GET_MODE_NUNITS (mode);
7186 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7187
7188 emit_insn (gen_anddi3 (element, element, num_ele_m1));
7189 gcc_assert (REG_P (tmp_gpr));
7190 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7191 tmp_gpr, scalar_mode));
7192 return;
7193 }
7194
7195 else if (REG_P (src) || SUBREG_P (src))
7196 {
7197 int num_elements = GET_MODE_NUNITS (mode);
7198 int bits_in_element = mode_to_bits (GET_MODE_INNER (mode));
7199 int bit_shift = 7 - exact_log2 (num_elements);
7200 rtx element2;
7201 unsigned int dest_regno = reg_or_subregno (dest);
7202 unsigned int src_regno = reg_or_subregno (src);
7203 unsigned int element_regno = reg_or_subregno (element);
7204
7205 gcc_assert (REG_P (tmp_gpr));
7206
7207 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7208 a general purpose register. */
7209 if (TARGET_P9_VECTOR
7210 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7211 && INT_REGNO_P (dest_regno)
7212 && ALTIVEC_REGNO_P (src_regno)
7213 && INT_REGNO_P (element_regno))
7214 {
7215 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7216 rtx element_si = gen_rtx_REG (SImode, element_regno);
7217
7218 if (mode == V16QImode)
7219 emit_insn (BYTES_BIG_ENDIAN
7220 ? gen_vextublx (dest_si, element_si, src)
7221 : gen_vextubrx (dest_si, element_si, src));
7222
7223 else if (mode == V8HImode)
7224 {
7225 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7226 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7227 emit_insn (BYTES_BIG_ENDIAN
7228 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7229 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7230 }
7231
7232
7233 else
7234 {
7235 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7236 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7237 emit_insn (BYTES_BIG_ENDIAN
7238 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7239 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7240 }
7241
7242 return;
7243 }
7244
7245
7246 gcc_assert (REG_P (tmp_altivec));
7247
7248 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7249 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7250 will shift the element into the upper position (adding 3 to convert a
7251 byte shift into a bit shift). */
7252 if (scalar_size == 8)
7253 {
7254 if (!BYTES_BIG_ENDIAN)
7255 {
7256 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7257 element2 = tmp_gpr;
7258 }
7259 else
7260 element2 = element;
7261
7262 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7263 bit. */
7264 emit_insn (gen_rtx_SET (tmp_gpr,
7265 gen_rtx_AND (DImode,
7266 gen_rtx_ASHIFT (DImode,
7267 element2,
7268 GEN_INT (6)),
7269 GEN_INT (64))));
7270 }
7271 else
7272 {
7273 if (!BYTES_BIG_ENDIAN)
7274 {
7275 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7276
7277 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7278 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7279 element2 = tmp_gpr;
7280 }
7281 else
7282 element2 = element;
7283
7284 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7285 }
7286
7287 /* Get the value into the lower byte of the Altivec register where VSLO
7288 expects it. */
7289 if (TARGET_P9_VECTOR)
7290 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7291 else if (can_create_pseudo_p ())
7292 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7293 else
7294 {
7295 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7296 emit_move_insn (tmp_di, tmp_gpr);
7297 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7298 }
7299
7300 /* Do the VSLO to get the value into the final location. */
7301 switch (mode)
7302 {
7303 case E_V2DFmode:
7304 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7305 return;
7306
7307 case E_V2DImode:
7308 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7309 return;
7310
7311 case E_V4SFmode:
7312 {
7313 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7314 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7315 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7316 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7317 tmp_altivec));
7318
7319 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7320 return;
7321 }
7322
7323 case E_V4SImode:
7324 case E_V8HImode:
7325 case E_V16QImode:
7326 {
7327 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7328 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7329 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7330 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7331 tmp_altivec));
7332 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7333 emit_insn (gen_lshrdi3 (tmp_gpr_di, tmp_gpr_di,
7334 GEN_INT (64 - bits_in_element)));
7335 return;
7336 }
7337
7338 default:
7339 gcc_unreachable ();
7340 }
7341
7342 return;
7343 }
7344 else
7345 gcc_unreachable ();
7346 }
7347
7348 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7349 selects whether the alignment is abi mandated, optional, or
7350 both abi and optional alignment. */
7351
7352 unsigned int
7353 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7354 {
7355 if (how != align_opt)
7356 {
7357 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7358 align = 128;
7359 }
7360
7361 if (how != align_abi)
7362 {
7363 if (TREE_CODE (type) == ARRAY_TYPE
7364 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7365 {
7366 if (align < BITS_PER_WORD)
7367 align = BITS_PER_WORD;
7368 }
7369 }
7370
7371 return align;
7372 }
7373
7374 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7375 instructions simply ignore the low bits; VSX memory instructions
7376 are aligned to 4 or 8 bytes. */
7377
7378 static bool
7379 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7380 {
7381 return (STRICT_ALIGNMENT
7382 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7383 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7384 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7385 && (int) align < VECTOR_ALIGN (mode)))));
7386 }
7387
7388 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7389
7390 bool
7391 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7392 {
7393 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7394 {
7395 if (computed != 128)
7396 {
7397 static bool warned;
7398 if (!warned && warn_psabi)
7399 {
7400 warned = true;
7401 inform (input_location,
7402 "the layout of aggregates containing vectors with"
7403 " %d-byte alignment has changed in GCC 5",
7404 computed / BITS_PER_UNIT);
7405 }
7406 }
7407 /* In current GCC there is no special case. */
7408 return false;
7409 }
7410
7411 return false;
7412 }
7413
7414 /* AIX increases natural record alignment to doubleword if the first
7415 field is an FP double while the FP fields remain word aligned. */
7416
7417 unsigned int
7418 rs6000_special_round_type_align (tree type, unsigned int computed,
7419 unsigned int specified)
7420 {
7421 unsigned int align = MAX (computed, specified);
7422 tree field = TYPE_FIELDS (type);
7423
7424 /* Skip all non field decls */
7425 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7426 field = DECL_CHAIN (field);
7427
7428 if (field != NULL && field != type)
7429 {
7430 type = TREE_TYPE (field);
7431 while (TREE_CODE (type) == ARRAY_TYPE)
7432 type = TREE_TYPE (type);
7433
7434 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7435 align = MAX (align, 64);
7436 }
7437
7438 return align;
7439 }
7440
7441 /* Darwin increases record alignment to the natural alignment of
7442 the first field. */
7443
7444 unsigned int
7445 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7446 unsigned int specified)
7447 {
7448 unsigned int align = MAX (computed, specified);
7449
7450 if (TYPE_PACKED (type))
7451 return align;
7452
7453 /* Find the first field, looking down into aggregates. */
7454 do {
7455 tree field = TYPE_FIELDS (type);
7456 /* Skip all non field decls */
7457 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7458 field = DECL_CHAIN (field);
7459 if (! field)
7460 break;
7461 /* A packed field does not contribute any extra alignment. */
7462 if (DECL_PACKED (field))
7463 return align;
7464 type = TREE_TYPE (field);
7465 while (TREE_CODE (type) == ARRAY_TYPE)
7466 type = TREE_TYPE (type);
7467 } while (AGGREGATE_TYPE_P (type));
7468
7469 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7470 align = MAX (align, TYPE_ALIGN (type));
7471
7472 return align;
7473 }
7474
7475 /* Return 1 for an operand in small memory on V.4/eabi. */
7476
7477 int
7478 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7479 machine_mode mode ATTRIBUTE_UNUSED)
7480 {
7481 #if TARGET_ELF
7482 rtx sym_ref;
7483
7484 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7485 return 0;
7486
7487 if (DEFAULT_ABI != ABI_V4)
7488 return 0;
7489
7490 if (SYMBOL_REF_P (op))
7491 sym_ref = op;
7492
7493 else if (GET_CODE (op) != CONST
7494 || GET_CODE (XEXP (op, 0)) != PLUS
7495 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7496 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7497 return 0;
7498
7499 else
7500 {
7501 rtx sum = XEXP (op, 0);
7502 HOST_WIDE_INT summand;
7503
7504 /* We have to be careful here, because it is the referenced address
7505 that must be 32k from _SDA_BASE_, not just the symbol. */
7506 summand = INTVAL (XEXP (sum, 1));
7507 if (summand < 0 || summand > g_switch_value)
7508 return 0;
7509
7510 sym_ref = XEXP (sum, 0);
7511 }
7512
7513 return SYMBOL_REF_SMALL_P (sym_ref);
7514 #else
7515 return 0;
7516 #endif
7517 }
7518
7519 /* Return true if either operand is a general purpose register. */
7520
7521 bool
7522 gpr_or_gpr_p (rtx op0, rtx op1)
7523 {
7524 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7525 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7526 }
7527
7528 /* Return true if this is a move direct operation between GPR registers and
7529 floating point/VSX registers. */
7530
7531 bool
7532 direct_move_p (rtx op0, rtx op1)
7533 {
7534 int regno0, regno1;
7535
7536 if (!REG_P (op0) || !REG_P (op1))
7537 return false;
7538
7539 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7540 return false;
7541
7542 regno0 = REGNO (op0);
7543 regno1 = REGNO (op1);
7544 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7545 return false;
7546
7547 if (INT_REGNO_P (regno0))
7548 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7549
7550 else if (INT_REGNO_P (regno1))
7551 {
7552 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7553 return true;
7554
7555 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7556 return true;
7557 }
7558
7559 return false;
7560 }
7561
7562 /* Return true if the OFFSET is valid for the quad address instructions that
7563 use d-form (register + offset) addressing. */
7564
7565 static inline bool
7566 quad_address_offset_p (HOST_WIDE_INT offset)
7567 {
7568 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7569 }
7570
7571 /* Return true if the ADDR is an acceptable address for a quad memory
7572 operation of mode MODE (either LQ/STQ for general purpose registers, or
7573 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7574 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7575 3.0 LXV/STXV instruction. */
7576
7577 bool
7578 quad_address_p (rtx addr, machine_mode mode, bool strict)
7579 {
7580 rtx op0, op1;
7581
7582 if (GET_MODE_SIZE (mode) != 16)
7583 return false;
7584
7585 if (legitimate_indirect_address_p (addr, strict))
7586 return true;
7587
7588 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7589 return false;
7590
7591 if (GET_CODE (addr) != PLUS)
7592 return false;
7593
7594 op0 = XEXP (addr, 0);
7595 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7596 return false;
7597
7598 op1 = XEXP (addr, 1);
7599 if (!CONST_INT_P (op1))
7600 return false;
7601
7602 return quad_address_offset_p (INTVAL (op1));
7603 }
7604
7605 /* Return true if this is a load or store quad operation. This function does
7606 not handle the atomic quad memory instructions. */
7607
7608 bool
7609 quad_load_store_p (rtx op0, rtx op1)
7610 {
7611 bool ret;
7612
7613 if (!TARGET_QUAD_MEMORY)
7614 ret = false;
7615
7616 else if (REG_P (op0) && MEM_P (op1))
7617 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7618 && quad_memory_operand (op1, GET_MODE (op1))
7619 && !reg_overlap_mentioned_p (op0, op1));
7620
7621 else if (MEM_P (op0) && REG_P (op1))
7622 ret = (quad_memory_operand (op0, GET_MODE (op0))
7623 && quad_int_reg_operand (op1, GET_MODE (op1)));
7624
7625 else
7626 ret = false;
7627
7628 if (TARGET_DEBUG_ADDR)
7629 {
7630 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7631 ret ? "true" : "false");
7632 debug_rtx (gen_rtx_SET (op0, op1));
7633 }
7634
7635 return ret;
7636 }
7637
7638 /* Given an address, return a constant offset term if one exists. */
7639
7640 static rtx
7641 address_offset (rtx op)
7642 {
7643 if (GET_CODE (op) == PRE_INC
7644 || GET_CODE (op) == PRE_DEC)
7645 op = XEXP (op, 0);
7646 else if (GET_CODE (op) == PRE_MODIFY
7647 || GET_CODE (op) == LO_SUM)
7648 op = XEXP (op, 1);
7649
7650 if (GET_CODE (op) == CONST)
7651 op = XEXP (op, 0);
7652
7653 if (GET_CODE (op) == PLUS)
7654 op = XEXP (op, 1);
7655
7656 if (CONST_INT_P (op))
7657 return op;
7658
7659 return NULL_RTX;
7660 }
7661
7662 /* Return true if the MEM operand is a memory operand suitable for use
7663 with a (full width, possibly multiple) gpr load/store. On
7664 powerpc64 this means the offset must be divisible by 4.
7665 Implements 'Y' constraint.
7666
7667 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7668 a constraint function we know the operand has satisfied a suitable
7669 memory predicate. Also accept some odd rtl generated by reload
7670 (see rs6000_legitimize_reload_address for various forms). It is
7671 important that reload rtl be accepted by appropriate constraints
7672 but not by the operand predicate.
7673
7674 Offsetting a lo_sum should not be allowed, except where we know by
7675 alignment that a 32k boundary is not crossed, but see the ???
7676 comment in rs6000_legitimize_reload_address. Note that by
7677 "offsetting" here we mean a further offset to access parts of the
7678 MEM. It's fine to have a lo_sum where the inner address is offset
7679 from a sym, since the same sym+offset will appear in the high part
7680 of the address calculation. */
7681
7682 bool
7683 mem_operand_gpr (rtx op, machine_mode mode)
7684 {
7685 unsigned HOST_WIDE_INT offset;
7686 int extra;
7687 rtx addr = XEXP (op, 0);
7688
7689 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7690 if (TARGET_UPDATE
7691 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7692 && mode_supports_pre_incdec_p (mode)
7693 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7694 return true;
7695
7696 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7697 if (!rs6000_offsettable_memref_p (op, mode, false))
7698 return false;
7699
7700 op = address_offset (addr);
7701 if (op == NULL_RTX)
7702 return true;
7703
7704 offset = INTVAL (op);
7705 if (TARGET_POWERPC64 && (offset & 3) != 0)
7706 return false;
7707
7708 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7709 if (extra < 0)
7710 extra = 0;
7711
7712 if (GET_CODE (addr) == LO_SUM)
7713 /* For lo_sum addresses, we must allow any offset except one that
7714 causes a wrap, so test only the low 16 bits. */
7715 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7716
7717 return offset + 0x8000 < 0x10000u - extra;
7718 }
7719
7720 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7721 enforce an offset divisible by 4 even for 32-bit. */
7722
7723 bool
7724 mem_operand_ds_form (rtx op, machine_mode mode)
7725 {
7726 unsigned HOST_WIDE_INT offset;
7727 int extra;
7728 rtx addr = XEXP (op, 0);
7729
7730 if (!offsettable_address_p (false, mode, addr))
7731 return false;
7732
7733 op = address_offset (addr);
7734 if (op == NULL_RTX)
7735 return true;
7736
7737 offset = INTVAL (op);
7738 if ((offset & 3) != 0)
7739 return false;
7740
7741 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7742 if (extra < 0)
7743 extra = 0;
7744
7745 if (GET_CODE (addr) == LO_SUM)
7746 /* For lo_sum addresses, we must allow any offset except one that
7747 causes a wrap, so test only the low 16 bits. */
7748 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7749
7750 return offset + 0x8000 < 0x10000u - extra;
7751 }
7752 \f
7753 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7754
7755 static bool
7756 reg_offset_addressing_ok_p (machine_mode mode)
7757 {
7758 switch (mode)
7759 {
7760 case E_V16QImode:
7761 case E_V8HImode:
7762 case E_V4SFmode:
7763 case E_V4SImode:
7764 case E_V2DFmode:
7765 case E_V2DImode:
7766 case E_V1TImode:
7767 case E_TImode:
7768 case E_TFmode:
7769 case E_KFmode:
7770 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7771 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7772 a vector mode, if we want to use the VSX registers to move it around,
7773 we need to restrict ourselves to reg+reg addressing. Similarly for
7774 IEEE 128-bit floating point that is passed in a single vector
7775 register. */
7776 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7777 return mode_supports_dq_form (mode);
7778 break;
7779
7780 case E_SDmode:
7781 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7782 addressing for the LFIWZX and STFIWX instructions. */
7783 if (TARGET_NO_SDMODE_STACK)
7784 return false;
7785 break;
7786
7787 default:
7788 break;
7789 }
7790
7791 return true;
7792 }
7793
7794 static bool
7795 virtual_stack_registers_memory_p (rtx op)
7796 {
7797 int regnum;
7798
7799 if (REG_P (op))
7800 regnum = REGNO (op);
7801
7802 else if (GET_CODE (op) == PLUS
7803 && REG_P (XEXP (op, 0))
7804 && CONST_INT_P (XEXP (op, 1)))
7805 regnum = REGNO (XEXP (op, 0));
7806
7807 else
7808 return false;
7809
7810 return (regnum >= FIRST_VIRTUAL_REGISTER
7811 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7812 }
7813
7814 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7815 is known to not straddle a 32k boundary. This function is used
7816 to determine whether -mcmodel=medium code can use TOC pointer
7817 relative addressing for OP. This means the alignment of the TOC
7818 pointer must also be taken into account, and unfortunately that is
7819 only 8 bytes. */
7820
7821 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7822 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7823 #endif
7824
7825 static bool
7826 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7827 machine_mode mode)
7828 {
7829 tree decl;
7830 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7831
7832 if (!SYMBOL_REF_P (op))
7833 return false;
7834
7835 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7836 SYMBOL_REF. */
7837 if (mode_supports_dq_form (mode))
7838 return false;
7839
7840 dsize = GET_MODE_SIZE (mode);
7841 decl = SYMBOL_REF_DECL (op);
7842 if (!decl)
7843 {
7844 if (dsize == 0)
7845 return false;
7846
7847 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7848 replacing memory addresses with an anchor plus offset. We
7849 could find the decl by rummaging around in the block->objects
7850 VEC for the given offset but that seems like too much work. */
7851 dalign = BITS_PER_UNIT;
7852 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7853 && SYMBOL_REF_ANCHOR_P (op)
7854 && SYMBOL_REF_BLOCK (op) != NULL)
7855 {
7856 struct object_block *block = SYMBOL_REF_BLOCK (op);
7857
7858 dalign = block->alignment;
7859 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7860 }
7861 else if (CONSTANT_POOL_ADDRESS_P (op))
7862 {
7863 /* It would be nice to have get_pool_align().. */
7864 machine_mode cmode = get_pool_mode (op);
7865
7866 dalign = GET_MODE_ALIGNMENT (cmode);
7867 }
7868 }
7869 else if (DECL_P (decl))
7870 {
7871 dalign = DECL_ALIGN (decl);
7872
7873 if (dsize == 0)
7874 {
7875 /* Allow BLKmode when the entire object is known to not
7876 cross a 32k boundary. */
7877 if (!DECL_SIZE_UNIT (decl))
7878 return false;
7879
7880 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7881 return false;
7882
7883 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7884 if (dsize > 32768)
7885 return false;
7886
7887 dalign /= BITS_PER_UNIT;
7888 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7889 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7890 return dalign >= dsize;
7891 }
7892 }
7893 else
7894 gcc_unreachable ();
7895
7896 /* Find how many bits of the alignment we know for this access. */
7897 dalign /= BITS_PER_UNIT;
7898 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7899 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7900 mask = dalign - 1;
7901 lsb = offset & -offset;
7902 mask &= lsb - 1;
7903 dalign = mask + 1;
7904
7905 return dalign >= dsize;
7906 }
7907
7908 static bool
7909 constant_pool_expr_p (rtx op)
7910 {
7911 rtx base, offset;
7912
7913 split_const (op, &base, &offset);
7914 return (SYMBOL_REF_P (base)
7915 && CONSTANT_POOL_ADDRESS_P (base)
7916 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7917 }
7918
7919 /* These are only used to pass through from print_operand/print_operand_address
7920 to rs6000_output_addr_const_extra over the intervening function
7921 output_addr_const which is not target code. */
7922 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7923
7924 /* Return true if OP is a toc pointer relative address (the output
7925 of create_TOC_reference). If STRICT, do not match non-split
7926 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7927 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7928 TOCREL_OFFSET_RET respectively. */
7929
7930 bool
7931 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7932 const_rtx *tocrel_offset_ret)
7933 {
7934 if (!TARGET_TOC)
7935 return false;
7936
7937 if (TARGET_CMODEL != CMODEL_SMALL)
7938 {
7939 /* When strict ensure we have everything tidy. */
7940 if (strict
7941 && !(GET_CODE (op) == LO_SUM
7942 && REG_P (XEXP (op, 0))
7943 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7944 return false;
7945
7946 /* When not strict, allow non-split TOC addresses and also allow
7947 (lo_sum (high ..)) TOC addresses created during reload. */
7948 if (GET_CODE (op) == LO_SUM)
7949 op = XEXP (op, 1);
7950 }
7951
7952 const_rtx tocrel_base = op;
7953 const_rtx tocrel_offset = const0_rtx;
7954
7955 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7956 {
7957 tocrel_base = XEXP (op, 0);
7958 tocrel_offset = XEXP (op, 1);
7959 }
7960
7961 if (tocrel_base_ret)
7962 *tocrel_base_ret = tocrel_base;
7963 if (tocrel_offset_ret)
7964 *tocrel_offset_ret = tocrel_offset;
7965
7966 return (GET_CODE (tocrel_base) == UNSPEC
7967 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7968 && REG_P (XVECEXP (tocrel_base, 0, 1))
7969 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7970 }
7971
7972 /* Return true if X is a constant pool address, and also for cmodel=medium
7973 if X is a toc-relative address known to be offsettable within MODE. */
7974
7975 bool
7976 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7977 bool strict)
7978 {
7979 const_rtx tocrel_base, tocrel_offset;
7980 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7981 && (TARGET_CMODEL != CMODEL_MEDIUM
7982 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7983 || mode == QImode
7984 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7985 INTVAL (tocrel_offset), mode)));
7986 }
7987
7988 static bool
7989 legitimate_small_data_p (machine_mode mode, rtx x)
7990 {
7991 return (DEFAULT_ABI == ABI_V4
7992 && !flag_pic && !TARGET_TOC
7993 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7994 && small_data_operand (x, mode));
7995 }
7996
7997 bool
7998 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7999 bool strict, bool worst_case)
8000 {
8001 unsigned HOST_WIDE_INT offset;
8002 unsigned int extra;
8003
8004 if (GET_CODE (x) != PLUS)
8005 return false;
8006 if (!REG_P (XEXP (x, 0)))
8007 return false;
8008 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8009 return false;
8010 if (mode_supports_dq_form (mode))
8011 return quad_address_p (x, mode, strict);
8012 if (!reg_offset_addressing_ok_p (mode))
8013 return virtual_stack_registers_memory_p (x);
8014 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8015 return true;
8016 if (!CONST_INT_P (XEXP (x, 1)))
8017 return false;
8018
8019 offset = INTVAL (XEXP (x, 1));
8020 extra = 0;
8021 switch (mode)
8022 {
8023 case E_DFmode:
8024 case E_DDmode:
8025 case E_DImode:
8026 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8027 addressing. */
8028 if (VECTOR_MEM_VSX_P (mode))
8029 return false;
8030
8031 if (!worst_case)
8032 break;
8033 if (!TARGET_POWERPC64)
8034 extra = 4;
8035 else if (offset & 3)
8036 return false;
8037 break;
8038
8039 case E_TFmode:
8040 case E_IFmode:
8041 case E_KFmode:
8042 case E_TDmode:
8043 case E_TImode:
8044 case E_PTImode:
8045 extra = 8;
8046 if (!worst_case)
8047 break;
8048 if (!TARGET_POWERPC64)
8049 extra = 12;
8050 else if (offset & 3)
8051 return false;
8052 break;
8053
8054 default:
8055 break;
8056 }
8057
8058 offset += 0x8000;
8059 return offset < 0x10000 - extra;
8060 }
8061
8062 bool
8063 legitimate_indexed_address_p (rtx x, int strict)
8064 {
8065 rtx op0, op1;
8066
8067 if (GET_CODE (x) != PLUS)
8068 return false;
8069
8070 op0 = XEXP (x, 0);
8071 op1 = XEXP (x, 1);
8072
8073 return (REG_P (op0) && REG_P (op1)
8074 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8075 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8076 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8077 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8078 }
8079
8080 bool
8081 avoiding_indexed_address_p (machine_mode mode)
8082 {
8083 /* Avoid indexed addressing for modes that have non-indexed
8084 load/store instruction forms. */
8085 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8086 }
8087
8088 bool
8089 legitimate_indirect_address_p (rtx x, int strict)
8090 {
8091 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
8092 }
8093
8094 bool
8095 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8096 {
8097 if (!TARGET_MACHO || !flag_pic
8098 || mode != SImode || !MEM_P (x))
8099 return false;
8100 x = XEXP (x, 0);
8101
8102 if (GET_CODE (x) != LO_SUM)
8103 return false;
8104 if (!REG_P (XEXP (x, 0)))
8105 return false;
8106 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8107 return false;
8108 x = XEXP (x, 1);
8109
8110 return CONSTANT_P (x);
8111 }
8112
8113 static bool
8114 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8115 {
8116 if (GET_CODE (x) != LO_SUM)
8117 return false;
8118 if (!REG_P (XEXP (x, 0)))
8119 return false;
8120 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8121 return false;
8122 /* quad word addresses are restricted, and we can't use LO_SUM. */
8123 if (mode_supports_dq_form (mode))
8124 return false;
8125 x = XEXP (x, 1);
8126
8127 if (TARGET_ELF || TARGET_MACHO)
8128 {
8129 bool large_toc_ok;
8130
8131 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8132 return false;
8133 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8134 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8135 recognizes some LO_SUM addresses as valid although this
8136 function says opposite. In most cases, LRA through different
8137 transformations can generate correct code for address reloads.
8138 It cannot manage only some LO_SUM cases. So we need to add
8139 code analogous to one in rs6000_legitimize_reload_address for
8140 LOW_SUM here saying that some addresses are still valid. */
8141 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8142 && small_toc_ref (x, VOIDmode));
8143 if (TARGET_TOC && ! large_toc_ok)
8144 return false;
8145 if (GET_MODE_NUNITS (mode) != 1)
8146 return false;
8147 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8148 && !(/* ??? Assume floating point reg based on mode? */
8149 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8150 return false;
8151
8152 return CONSTANT_P (x) || large_toc_ok;
8153 }
8154
8155 return false;
8156 }
8157
8158
8159 /* Try machine-dependent ways of modifying an illegitimate address
8160 to be legitimate. If we find one, return the new, valid address.
8161 This is used from only one place: `memory_address' in explow.c.
8162
8163 OLDX is the address as it was before break_out_memory_refs was
8164 called. In some cases it is useful to look at this to decide what
8165 needs to be done.
8166
8167 It is always safe for this function to do nothing. It exists to
8168 recognize opportunities to optimize the output.
8169
8170 On RS/6000, first check for the sum of a register with a constant
8171 integer that is out of range. If so, generate code to add the
8172 constant with the low-order 16 bits masked to the register and force
8173 this result into another register (this can be done with `cau').
8174 Then generate an address of REG+(CONST&0xffff), allowing for the
8175 possibility of bit 16 being a one.
8176
8177 Then check for the sum of a register and something not constant, try to
8178 load the other things into a register and return the sum. */
8179
8180 static rtx
8181 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8182 machine_mode mode)
8183 {
8184 unsigned int extra;
8185
8186 if (!reg_offset_addressing_ok_p (mode)
8187 || mode_supports_dq_form (mode))
8188 {
8189 if (virtual_stack_registers_memory_p (x))
8190 return x;
8191
8192 /* In theory we should not be seeing addresses of the form reg+0,
8193 but just in case it is generated, optimize it away. */
8194 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8195 return force_reg (Pmode, XEXP (x, 0));
8196
8197 /* For TImode with load/store quad, restrict addresses to just a single
8198 pointer, so it works with both GPRs and VSX registers. */
8199 /* Make sure both operands are registers. */
8200 else if (GET_CODE (x) == PLUS
8201 && (mode != TImode || !TARGET_VSX))
8202 return gen_rtx_PLUS (Pmode,
8203 force_reg (Pmode, XEXP (x, 0)),
8204 force_reg (Pmode, XEXP (x, 1)));
8205 else
8206 return force_reg (Pmode, x);
8207 }
8208 if (SYMBOL_REF_P (x))
8209 {
8210 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8211 if (model != 0)
8212 return rs6000_legitimize_tls_address (x, model);
8213 }
8214
8215 extra = 0;
8216 switch (mode)
8217 {
8218 case E_TFmode:
8219 case E_TDmode:
8220 case E_TImode:
8221 case E_PTImode:
8222 case E_IFmode:
8223 case E_KFmode:
8224 /* As in legitimate_offset_address_p we do not assume
8225 worst-case. The mode here is just a hint as to the registers
8226 used. A TImode is usually in gprs, but may actually be in
8227 fprs. Leave worst-case scenario for reload to handle via
8228 insn constraints. PTImode is only GPRs. */
8229 extra = 8;
8230 break;
8231 default:
8232 break;
8233 }
8234
8235 if (GET_CODE (x) == PLUS
8236 && REG_P (XEXP (x, 0))
8237 && CONST_INT_P (XEXP (x, 1))
8238 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8239 >= 0x10000 - extra))
8240 {
8241 HOST_WIDE_INT high_int, low_int;
8242 rtx sum;
8243 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8244 if (low_int >= 0x8000 - extra)
8245 low_int = 0;
8246 high_int = INTVAL (XEXP (x, 1)) - low_int;
8247 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8248 GEN_INT (high_int)), 0);
8249 return plus_constant (Pmode, sum, low_int);
8250 }
8251 else if (GET_CODE (x) == PLUS
8252 && REG_P (XEXP (x, 0))
8253 && !CONST_INT_P (XEXP (x, 1))
8254 && GET_MODE_NUNITS (mode) == 1
8255 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8256 || (/* ??? Assume floating point reg based on mode? */
8257 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8258 && !avoiding_indexed_address_p (mode))
8259 {
8260 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8261 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8262 }
8263 else if ((TARGET_ELF
8264 #if TARGET_MACHO
8265 || !MACHO_DYNAMIC_NO_PIC_P
8266 #endif
8267 )
8268 && TARGET_32BIT
8269 && TARGET_NO_TOC
8270 && !flag_pic
8271 && !CONST_INT_P (x)
8272 && !CONST_WIDE_INT_P (x)
8273 && !CONST_DOUBLE_P (x)
8274 && CONSTANT_P (x)
8275 && GET_MODE_NUNITS (mode) == 1
8276 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8277 || (/* ??? Assume floating point reg based on mode? */
8278 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8279 {
8280 rtx reg = gen_reg_rtx (Pmode);
8281 if (TARGET_ELF)
8282 emit_insn (gen_elf_high (reg, x));
8283 else
8284 emit_insn (gen_macho_high (reg, x));
8285 return gen_rtx_LO_SUM (Pmode, reg, x);
8286 }
8287 else if (TARGET_TOC
8288 && SYMBOL_REF_P (x)
8289 && constant_pool_expr_p (x)
8290 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8291 return create_TOC_reference (x, NULL_RTX);
8292 else
8293 return x;
8294 }
8295
8296 /* Debug version of rs6000_legitimize_address. */
8297 static rtx
8298 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8299 {
8300 rtx ret;
8301 rtx_insn *insns;
8302
8303 start_sequence ();
8304 ret = rs6000_legitimize_address (x, oldx, mode);
8305 insns = get_insns ();
8306 end_sequence ();
8307
8308 if (ret != x)
8309 {
8310 fprintf (stderr,
8311 "\nrs6000_legitimize_address: mode %s, old code %s, "
8312 "new code %s, modified\n",
8313 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8314 GET_RTX_NAME (GET_CODE (ret)));
8315
8316 fprintf (stderr, "Original address:\n");
8317 debug_rtx (x);
8318
8319 fprintf (stderr, "oldx:\n");
8320 debug_rtx (oldx);
8321
8322 fprintf (stderr, "New address:\n");
8323 debug_rtx (ret);
8324
8325 if (insns)
8326 {
8327 fprintf (stderr, "Insns added:\n");
8328 debug_rtx_list (insns, 20);
8329 }
8330 }
8331 else
8332 {
8333 fprintf (stderr,
8334 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8335 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8336
8337 debug_rtx (x);
8338 }
8339
8340 if (insns)
8341 emit_insn (insns);
8342
8343 return ret;
8344 }
8345
8346 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8347 We need to emit DTP-relative relocations. */
8348
8349 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8350 static void
8351 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8352 {
8353 switch (size)
8354 {
8355 case 4:
8356 fputs ("\t.long\t", file);
8357 break;
8358 case 8:
8359 fputs (DOUBLE_INT_ASM_OP, file);
8360 break;
8361 default:
8362 gcc_unreachable ();
8363 }
8364 output_addr_const (file, x);
8365 if (TARGET_ELF)
8366 fputs ("@dtprel+0x8000", file);
8367 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8368 {
8369 switch (SYMBOL_REF_TLS_MODEL (x))
8370 {
8371 case 0:
8372 break;
8373 case TLS_MODEL_LOCAL_EXEC:
8374 fputs ("@le", file);
8375 break;
8376 case TLS_MODEL_INITIAL_EXEC:
8377 fputs ("@ie", file);
8378 break;
8379 case TLS_MODEL_GLOBAL_DYNAMIC:
8380 case TLS_MODEL_LOCAL_DYNAMIC:
8381 fputs ("@m", file);
8382 break;
8383 default:
8384 gcc_unreachable ();
8385 }
8386 }
8387 }
8388
8389 /* Return true if X is a symbol that refers to real (rather than emulated)
8390 TLS. */
8391
8392 static bool
8393 rs6000_real_tls_symbol_ref_p (rtx x)
8394 {
8395 return (SYMBOL_REF_P (x)
8396 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8397 }
8398
8399 /* In the name of slightly smaller debug output, and to cater to
8400 general assembler lossage, recognize various UNSPEC sequences
8401 and turn them back into a direct symbol reference. */
8402
8403 static rtx
8404 rs6000_delegitimize_address (rtx orig_x)
8405 {
8406 rtx x, y, offset;
8407
8408 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8409 orig_x = XVECEXP (orig_x, 0, 0);
8410
8411 orig_x = delegitimize_mem_from_attrs (orig_x);
8412
8413 x = orig_x;
8414 if (MEM_P (x))
8415 x = XEXP (x, 0);
8416
8417 y = x;
8418 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8419 y = XEXP (y, 1);
8420
8421 offset = NULL_RTX;
8422 if (GET_CODE (y) == PLUS
8423 && GET_MODE (y) == Pmode
8424 && CONST_INT_P (XEXP (y, 1)))
8425 {
8426 offset = XEXP (y, 1);
8427 y = XEXP (y, 0);
8428 }
8429
8430 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8431 {
8432 y = XVECEXP (y, 0, 0);
8433
8434 #ifdef HAVE_AS_TLS
8435 /* Do not associate thread-local symbols with the original
8436 constant pool symbol. */
8437 if (TARGET_XCOFF
8438 && SYMBOL_REF_P (y)
8439 && CONSTANT_POOL_ADDRESS_P (y)
8440 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8441 return orig_x;
8442 #endif
8443
8444 if (offset != NULL_RTX)
8445 y = gen_rtx_PLUS (Pmode, y, offset);
8446 if (!MEM_P (orig_x))
8447 return y;
8448 else
8449 return replace_equiv_address_nv (orig_x, y);
8450 }
8451
8452 if (TARGET_MACHO
8453 && GET_CODE (orig_x) == LO_SUM
8454 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8455 {
8456 y = XEXP (XEXP (orig_x, 1), 0);
8457 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8458 return XVECEXP (y, 0, 0);
8459 }
8460
8461 return orig_x;
8462 }
8463
8464 /* Return true if X shouldn't be emitted into the debug info.
8465 The linker doesn't like .toc section references from
8466 .debug_* sections, so reject .toc section symbols. */
8467
8468 static bool
8469 rs6000_const_not_ok_for_debug_p (rtx x)
8470 {
8471 if (GET_CODE (x) == UNSPEC)
8472 return true;
8473 if (SYMBOL_REF_P (x)
8474 && CONSTANT_POOL_ADDRESS_P (x))
8475 {
8476 rtx c = get_pool_constant (x);
8477 machine_mode cmode = get_pool_mode (x);
8478 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8479 return true;
8480 }
8481
8482 return false;
8483 }
8484
8485 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8486
8487 static bool
8488 rs6000_legitimate_combined_insn (rtx_insn *insn)
8489 {
8490 int icode = INSN_CODE (insn);
8491
8492 /* Reject creating doloop insns. Combine should not be allowed
8493 to create these for a number of reasons:
8494 1) In a nested loop, if combine creates one of these in an
8495 outer loop and the register allocator happens to allocate ctr
8496 to the outer loop insn, then the inner loop can't use ctr.
8497 Inner loops ought to be more highly optimized.
8498 2) Combine often wants to create one of these from what was
8499 originally a three insn sequence, first combining the three
8500 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8501 allocated ctr, the splitter takes use back to the three insn
8502 sequence. It's better to stop combine at the two insn
8503 sequence.
8504 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8505 insns, the register allocator sometimes uses floating point
8506 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8507 jump insn and output reloads are not implemented for jumps,
8508 the ctrsi/ctrdi splitters need to handle all possible cases.
8509 That's a pain, and it gets to be seriously difficult when a
8510 splitter that runs after reload needs memory to transfer from
8511 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8512 for the difficult case. It's better to not create problems
8513 in the first place. */
8514 if (icode != CODE_FOR_nothing
8515 && (icode == CODE_FOR_bdz_si
8516 || icode == CODE_FOR_bdz_di
8517 || icode == CODE_FOR_bdnz_si
8518 || icode == CODE_FOR_bdnz_di
8519 || icode == CODE_FOR_bdztf_si
8520 || icode == CODE_FOR_bdztf_di
8521 || icode == CODE_FOR_bdnztf_si
8522 || icode == CODE_FOR_bdnztf_di))
8523 return false;
8524
8525 return true;
8526 }
8527
8528 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8529
8530 static GTY(()) rtx rs6000_tls_symbol;
8531 static rtx
8532 rs6000_tls_get_addr (void)
8533 {
8534 if (!rs6000_tls_symbol)
8535 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8536
8537 return rs6000_tls_symbol;
8538 }
8539
8540 /* Construct the SYMBOL_REF for TLS GOT references. */
8541
8542 static GTY(()) rtx rs6000_got_symbol;
8543 static rtx
8544 rs6000_got_sym (void)
8545 {
8546 if (!rs6000_got_symbol)
8547 {
8548 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8549 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8550 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8551 }
8552
8553 return rs6000_got_symbol;
8554 }
8555
8556 /* AIX Thread-Local Address support. */
8557
8558 static rtx
8559 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8560 {
8561 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8562 const char *name;
8563 char *tlsname;
8564
8565 name = XSTR (addr, 0);
8566 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8567 or the symbol will be in TLS private data section. */
8568 if (name[strlen (name) - 1] != ']'
8569 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8570 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8571 {
8572 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8573 strcpy (tlsname, name);
8574 strcat (tlsname,
8575 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8576 tlsaddr = copy_rtx (addr);
8577 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8578 }
8579 else
8580 tlsaddr = addr;
8581
8582 /* Place addr into TOC constant pool. */
8583 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8584
8585 /* Output the TOC entry and create the MEM referencing the value. */
8586 if (constant_pool_expr_p (XEXP (sym, 0))
8587 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8588 {
8589 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8590 mem = gen_const_mem (Pmode, tocref);
8591 set_mem_alias_set (mem, get_TOC_alias_set ());
8592 }
8593 else
8594 return sym;
8595
8596 /* Use global-dynamic for local-dynamic. */
8597 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8598 || model == TLS_MODEL_LOCAL_DYNAMIC)
8599 {
8600 /* Create new TOC reference for @m symbol. */
8601 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8602 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8603 strcpy (tlsname, "*LCM");
8604 strcat (tlsname, name + 3);
8605 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8606 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8607 tocref = create_TOC_reference (modaddr, NULL_RTX);
8608 rtx modmem = gen_const_mem (Pmode, tocref);
8609 set_mem_alias_set (modmem, get_TOC_alias_set ());
8610
8611 rtx modreg = gen_reg_rtx (Pmode);
8612 emit_insn (gen_rtx_SET (modreg, modmem));
8613
8614 tmpreg = gen_reg_rtx (Pmode);
8615 emit_insn (gen_rtx_SET (tmpreg, mem));
8616
8617 dest = gen_reg_rtx (Pmode);
8618 if (TARGET_32BIT)
8619 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8620 else
8621 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8622 return dest;
8623 }
8624 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8625 else if (TARGET_32BIT)
8626 {
8627 tlsreg = gen_reg_rtx (SImode);
8628 emit_insn (gen_tls_get_tpointer (tlsreg));
8629 }
8630 else
8631 tlsreg = gen_rtx_REG (DImode, 13);
8632
8633 /* Load the TOC value into temporary register. */
8634 tmpreg = gen_reg_rtx (Pmode);
8635 emit_insn (gen_rtx_SET (tmpreg, mem));
8636 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8637 gen_rtx_MINUS (Pmode, addr, tlsreg));
8638
8639 /* Add TOC symbol value to TLS pointer. */
8640 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8641
8642 return dest;
8643 }
8644
8645 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8646 __tls_get_addr call. */
8647
8648 void
8649 rs6000_output_tlsargs (rtx *operands)
8650 {
8651 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8652 rtx op[3];
8653
8654 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8655 op[0] = operands[0];
8656 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8657 op[1] = XVECEXP (operands[2], 0, 0);
8658 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8659 {
8660 /* The GOT register. */
8661 op[2] = XVECEXP (operands[2], 0, 1);
8662 if (TARGET_CMODEL != CMODEL_SMALL)
8663 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8664 "addi %0,%0,%1@got@tlsgd@l", op);
8665 else
8666 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8667 }
8668 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8669 {
8670 if (TARGET_CMODEL != CMODEL_SMALL)
8671 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8672 "addi %0,%0,%&@got@tlsld@l", op);
8673 else
8674 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8675 }
8676 else
8677 gcc_unreachable ();
8678 }
8679
8680 /* Passes the tls arg value for global dynamic and local dynamic
8681 emit_library_call_value in rs6000_legitimize_tls_address to
8682 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8683 marker relocs put on __tls_get_addr calls. */
8684 static rtx global_tlsarg;
8685
8686 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8687 this (thread-local) address. */
8688
8689 static rtx
8690 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8691 {
8692 rtx dest, insn;
8693
8694 if (TARGET_XCOFF)
8695 return rs6000_legitimize_tls_address_aix (addr, model);
8696
8697 dest = gen_reg_rtx (Pmode);
8698 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8699 {
8700 rtx tlsreg;
8701
8702 if (TARGET_64BIT)
8703 {
8704 tlsreg = gen_rtx_REG (Pmode, 13);
8705 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8706 }
8707 else
8708 {
8709 tlsreg = gen_rtx_REG (Pmode, 2);
8710 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8711 }
8712 emit_insn (insn);
8713 }
8714 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8715 {
8716 rtx tlsreg, tmp;
8717
8718 tmp = gen_reg_rtx (Pmode);
8719 if (TARGET_64BIT)
8720 {
8721 tlsreg = gen_rtx_REG (Pmode, 13);
8722 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8723 }
8724 else
8725 {
8726 tlsreg = gen_rtx_REG (Pmode, 2);
8727 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8728 }
8729 emit_insn (insn);
8730 if (TARGET_64BIT)
8731 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8732 else
8733 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8734 emit_insn (insn);
8735 }
8736 else
8737 {
8738 rtx got, tga, tmp1, tmp2;
8739
8740 /* We currently use relocations like @got@tlsgd for tls, which
8741 means the linker will handle allocation of tls entries, placing
8742 them in the .got section. So use a pointer to the .got section,
8743 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8744 or to secondary GOT sections used by 32-bit -fPIC. */
8745 if (TARGET_64BIT)
8746 got = gen_rtx_REG (Pmode, 2);
8747 else
8748 {
8749 if (flag_pic == 1)
8750 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8751 else
8752 {
8753 rtx gsym = rs6000_got_sym ();
8754 got = gen_reg_rtx (Pmode);
8755 if (flag_pic == 0)
8756 rs6000_emit_move (got, gsym, Pmode);
8757 else
8758 {
8759 rtx mem, lab;
8760
8761 tmp1 = gen_reg_rtx (Pmode);
8762 tmp2 = gen_reg_rtx (Pmode);
8763 mem = gen_const_mem (Pmode, tmp1);
8764 lab = gen_label_rtx ();
8765 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8766 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8767 if (TARGET_LINK_STACK)
8768 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8769 emit_move_insn (tmp2, mem);
8770 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8771 set_unique_reg_note (last, REG_EQUAL, gsym);
8772 }
8773 }
8774 }
8775
8776 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8777 {
8778 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8779 UNSPEC_TLSGD);
8780 tga = rs6000_tls_get_addr ();
8781 global_tlsarg = arg;
8782 if (TARGET_TLS_MARKERS)
8783 {
8784 rtx argreg = gen_rtx_REG (Pmode, 3);
8785 emit_insn (gen_rtx_SET (argreg, arg));
8786 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8787 argreg, Pmode);
8788 }
8789 else
8790 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8791 global_tlsarg = NULL_RTX;
8792
8793 /* Make a note so that the result of this call can be CSEd. */
8794 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8795 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8796 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8797 }
8798 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8799 {
8800 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8801 tga = rs6000_tls_get_addr ();
8802 tmp1 = gen_reg_rtx (Pmode);
8803 global_tlsarg = arg;
8804 if (TARGET_TLS_MARKERS)
8805 {
8806 rtx argreg = gen_rtx_REG (Pmode, 3);
8807 emit_insn (gen_rtx_SET (argreg, arg));
8808 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8809 argreg, Pmode);
8810 }
8811 else
8812 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8813 global_tlsarg = NULL_RTX;
8814
8815 /* Make a note so that the result of this call can be CSEd. */
8816 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8817 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8818 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8819
8820 if (rs6000_tls_size == 16)
8821 {
8822 if (TARGET_64BIT)
8823 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8824 else
8825 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8826 }
8827 else if (rs6000_tls_size == 32)
8828 {
8829 tmp2 = gen_reg_rtx (Pmode);
8830 if (TARGET_64BIT)
8831 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8832 else
8833 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8834 emit_insn (insn);
8835 if (TARGET_64BIT)
8836 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8837 else
8838 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8839 }
8840 else
8841 {
8842 tmp2 = gen_reg_rtx (Pmode);
8843 if (TARGET_64BIT)
8844 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8845 else
8846 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8847 emit_insn (insn);
8848 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8849 }
8850 emit_insn (insn);
8851 }
8852 else
8853 {
8854 /* IE, or 64-bit offset LE. */
8855 tmp2 = gen_reg_rtx (Pmode);
8856 if (TARGET_64BIT)
8857 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8858 else
8859 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8860 emit_insn (insn);
8861 if (TARGET_64BIT)
8862 insn = gen_tls_tls_64 (dest, tmp2, addr);
8863 else
8864 insn = gen_tls_tls_32 (dest, tmp2, addr);
8865 emit_insn (insn);
8866 }
8867 }
8868
8869 return dest;
8870 }
8871
8872 /* Only create the global variable for the stack protect guard if we are using
8873 the global flavor of that guard. */
8874 static tree
8875 rs6000_init_stack_protect_guard (void)
8876 {
8877 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8878 return default_stack_protect_guard ();
8879
8880 return NULL_TREE;
8881 }
8882
8883 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8884
8885 static bool
8886 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8887 {
8888 if (GET_CODE (x) == HIGH
8889 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8890 return true;
8891
8892 /* A TLS symbol in the TOC cannot contain a sum. */
8893 if (GET_CODE (x) == CONST
8894 && GET_CODE (XEXP (x, 0)) == PLUS
8895 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8896 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8897 return true;
8898
8899 /* Do not place an ELF TLS symbol in the constant pool. */
8900 return TARGET_ELF && tls_referenced_p (x);
8901 }
8902
8903 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8904 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8905 can be addressed relative to the toc pointer. */
8906
8907 static bool
8908 use_toc_relative_ref (rtx sym, machine_mode mode)
8909 {
8910 return ((constant_pool_expr_p (sym)
8911 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8912 get_pool_mode (sym)))
8913 || (TARGET_CMODEL == CMODEL_MEDIUM
8914 && SYMBOL_REF_LOCAL_P (sym)
8915 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8916 }
8917
8918 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8919 replace the input X, or the original X if no replacement is called for.
8920 The output parameter *WIN is 1 if the calling macro should goto WIN,
8921 0 if it should not.
8922
8923 For RS/6000, we wish to handle large displacements off a base
8924 register by splitting the addend across an addiu/addis and the mem insn.
8925 This cuts number of extra insns needed from 3 to 1.
8926
8927 On Darwin, we use this to generate code for floating point constants.
8928 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8929 The Darwin code is inside #if TARGET_MACHO because only then are the
8930 machopic_* functions defined. */
8931 static rtx
8932 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8933 int opnum, int type,
8934 int ind_levels ATTRIBUTE_UNUSED, int *win)
8935 {
8936 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8937 bool quad_offset_p = mode_supports_dq_form (mode);
8938
8939 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8940 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8941 if (reg_offset_p
8942 && opnum == 1
8943 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8944 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8945 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8946 && TARGET_P9_VECTOR)
8947 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8948 && TARGET_P9_VECTOR)))
8949 reg_offset_p = false;
8950
8951 /* We must recognize output that we have already generated ourselves. */
8952 if (GET_CODE (x) == PLUS
8953 && GET_CODE (XEXP (x, 0)) == PLUS
8954 && REG_P (XEXP (XEXP (x, 0), 0))
8955 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8956 && CONST_INT_P (XEXP (x, 1)))
8957 {
8958 if (TARGET_DEBUG_ADDR)
8959 {
8960 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8961 debug_rtx (x);
8962 }
8963 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8964 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8965 opnum, (enum reload_type) type);
8966 *win = 1;
8967 return x;
8968 }
8969
8970 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8971 if (GET_CODE (x) == LO_SUM
8972 && GET_CODE (XEXP (x, 0)) == HIGH)
8973 {
8974 if (TARGET_DEBUG_ADDR)
8975 {
8976 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8977 debug_rtx (x);
8978 }
8979 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8980 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8981 opnum, (enum reload_type) type);
8982 *win = 1;
8983 return x;
8984 }
8985
8986 #if TARGET_MACHO
8987 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8988 && GET_CODE (x) == LO_SUM
8989 && GET_CODE (XEXP (x, 0)) == PLUS
8990 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8991 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8992 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8993 && machopic_operand_p (XEXP (x, 1)))
8994 {
8995 /* Result of previous invocation of this function on Darwin
8996 floating point constant. */
8997 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8998 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8999 opnum, (enum reload_type) type);
9000 *win = 1;
9001 return x;
9002 }
9003 #endif
9004
9005 if (TARGET_CMODEL != CMODEL_SMALL
9006 && reg_offset_p
9007 && !quad_offset_p
9008 && small_toc_ref (x, VOIDmode))
9009 {
9010 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9011 x = gen_rtx_LO_SUM (Pmode, hi, x);
9012 if (TARGET_DEBUG_ADDR)
9013 {
9014 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9015 debug_rtx (x);
9016 }
9017 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9018 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9019 opnum, (enum reload_type) type);
9020 *win = 1;
9021 return x;
9022 }
9023
9024 if (GET_CODE (x) == PLUS
9025 && REG_P (XEXP (x, 0))
9026 && HARD_REGISTER_P (XEXP (x, 0))
9027 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9028 && CONST_INT_P (XEXP (x, 1))
9029 && reg_offset_p
9030 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9031 {
9032 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9033 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9034 HOST_WIDE_INT high
9035 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9036
9037 /* Check for 32-bit overflow or quad addresses with one of the
9038 four least significant bits set. */
9039 if (high + low != val
9040 || (quad_offset_p && (low & 0xf)))
9041 {
9042 *win = 0;
9043 return x;
9044 }
9045
9046 /* Reload the high part into a base reg; leave the low part
9047 in the mem directly. */
9048
9049 x = gen_rtx_PLUS (GET_MODE (x),
9050 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9051 GEN_INT (high)),
9052 GEN_INT (low));
9053
9054 if (TARGET_DEBUG_ADDR)
9055 {
9056 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9057 debug_rtx (x);
9058 }
9059 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9060 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9061 opnum, (enum reload_type) type);
9062 *win = 1;
9063 return x;
9064 }
9065
9066 if (SYMBOL_REF_P (x)
9067 && reg_offset_p
9068 && !quad_offset_p
9069 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9070 #if TARGET_MACHO
9071 && DEFAULT_ABI == ABI_DARWIN
9072 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9073 && machopic_symbol_defined_p (x)
9074 #else
9075 && DEFAULT_ABI == ABI_V4
9076 && !flag_pic
9077 #endif
9078 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9079 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9080 without fprs.
9081 ??? Assume floating point reg based on mode? This assumption is
9082 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9083 where reload ends up doing a DFmode load of a constant from
9084 mem using two gprs. Unfortunately, at this point reload
9085 hasn't yet selected regs so poking around in reload data
9086 won't help and even if we could figure out the regs reliably,
9087 we'd still want to allow this transformation when the mem is
9088 naturally aligned. Since we say the address is good here, we
9089 can't disable offsets from LO_SUMs in mem_operand_gpr.
9090 FIXME: Allow offset from lo_sum for other modes too, when
9091 mem is sufficiently aligned.
9092
9093 Also disallow this if the type can go in VMX/Altivec registers, since
9094 those registers do not have d-form (reg+offset) address modes. */
9095 && !reg_addr[mode].scalar_in_vmx_p
9096 && mode != TFmode
9097 && mode != TDmode
9098 && mode != IFmode
9099 && mode != KFmode
9100 && (mode != TImode || !TARGET_VSX)
9101 && mode != PTImode
9102 && (mode != DImode || TARGET_POWERPC64)
9103 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9104 || TARGET_HARD_FLOAT))
9105 {
9106 #if TARGET_MACHO
9107 if (flag_pic)
9108 {
9109 rtx offset = machopic_gen_offset (x);
9110 x = gen_rtx_LO_SUM (GET_MODE (x),
9111 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9112 gen_rtx_HIGH (Pmode, offset)), offset);
9113 }
9114 else
9115 #endif
9116 x = gen_rtx_LO_SUM (GET_MODE (x),
9117 gen_rtx_HIGH (Pmode, x), x);
9118
9119 if (TARGET_DEBUG_ADDR)
9120 {
9121 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9122 debug_rtx (x);
9123 }
9124 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9125 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9126 opnum, (enum reload_type) type);
9127 *win = 1;
9128 return x;
9129 }
9130
9131 /* Reload an offset address wrapped by an AND that represents the
9132 masking of the lower bits. Strip the outer AND and let reload
9133 convert the offset address into an indirect address. For VSX,
9134 force reload to create the address with an AND in a separate
9135 register, because we can't guarantee an altivec register will
9136 be used. */
9137 if (VECTOR_MEM_ALTIVEC_P (mode)
9138 && GET_CODE (x) == AND
9139 && GET_CODE (XEXP (x, 0)) == PLUS
9140 && REG_P (XEXP (XEXP (x, 0), 0))
9141 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9142 && CONST_INT_P (XEXP (x, 1))
9143 && INTVAL (XEXP (x, 1)) == -16)
9144 {
9145 x = XEXP (x, 0);
9146 *win = 1;
9147 return x;
9148 }
9149
9150 if (TARGET_TOC
9151 && reg_offset_p
9152 && !quad_offset_p
9153 && SYMBOL_REF_P (x)
9154 && use_toc_relative_ref (x, mode))
9155 {
9156 x = create_TOC_reference (x, NULL_RTX);
9157 if (TARGET_CMODEL != CMODEL_SMALL)
9158 {
9159 if (TARGET_DEBUG_ADDR)
9160 {
9161 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9162 debug_rtx (x);
9163 }
9164 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9165 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9166 opnum, (enum reload_type) type);
9167 }
9168 *win = 1;
9169 return x;
9170 }
9171 *win = 0;
9172 return x;
9173 }
9174
9175 /* Debug version of rs6000_legitimize_reload_address. */
9176 static rtx
9177 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9178 int opnum, int type,
9179 int ind_levels, int *win)
9180 {
9181 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9182 ind_levels, win);
9183 fprintf (stderr,
9184 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9185 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9186 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9187 debug_rtx (x);
9188
9189 if (x == ret)
9190 fprintf (stderr, "Same address returned\n");
9191 else if (!ret)
9192 fprintf (stderr, "NULL returned\n");
9193 else
9194 {
9195 fprintf (stderr, "New address:\n");
9196 debug_rtx (ret);
9197 }
9198
9199 return ret;
9200 }
9201
9202 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9203 that is a valid memory address for an instruction.
9204 The MODE argument is the machine mode for the MEM expression
9205 that wants to use this address.
9206
9207 On the RS/6000, there are four valid address: a SYMBOL_REF that
9208 refers to a constant pool entry of an address (or the sum of it
9209 plus a constant), a short (16-bit signed) constant plus a register,
9210 the sum of two registers, or a register indirect, possibly with an
9211 auto-increment. For DFmode, DDmode and DImode with a constant plus
9212 register, we must ensure that both words are addressable or PowerPC64
9213 with offset word aligned.
9214
9215 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9216 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9217 because adjacent memory cells are accessed by adding word-sized offsets
9218 during assembly output. */
9219 static bool
9220 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9221 {
9222 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9223 bool quad_offset_p = mode_supports_dq_form (mode);
9224
9225 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9226 if (VECTOR_MEM_ALTIVEC_P (mode)
9227 && GET_CODE (x) == AND
9228 && CONST_INT_P (XEXP (x, 1))
9229 && INTVAL (XEXP (x, 1)) == -16)
9230 x = XEXP (x, 0);
9231
9232 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9233 return 0;
9234 if (legitimate_indirect_address_p (x, reg_ok_strict))
9235 return 1;
9236 if (TARGET_UPDATE
9237 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9238 && mode_supports_pre_incdec_p (mode)
9239 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9240 return 1;
9241 /* Handle restricted vector d-form offsets in ISA 3.0. */
9242 if (quad_offset_p)
9243 {
9244 if (quad_address_p (x, mode, reg_ok_strict))
9245 return 1;
9246 }
9247 else if (virtual_stack_registers_memory_p (x))
9248 return 1;
9249
9250 else if (reg_offset_p)
9251 {
9252 if (legitimate_small_data_p (mode, x))
9253 return 1;
9254 if (legitimate_constant_pool_address_p (x, mode,
9255 reg_ok_strict || lra_in_progress))
9256 return 1;
9257 }
9258
9259 /* For TImode, if we have TImode in VSX registers, only allow register
9260 indirect addresses. This will allow the values to go in either GPRs
9261 or VSX registers without reloading. The vector types would tend to
9262 go into VSX registers, so we allow REG+REG, while TImode seems
9263 somewhat split, in that some uses are GPR based, and some VSX based. */
9264 /* FIXME: We could loosen this by changing the following to
9265 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9266 but currently we cannot allow REG+REG addressing for TImode. See
9267 PR72827 for complete details on how this ends up hoodwinking DSE. */
9268 if (mode == TImode && TARGET_VSX)
9269 return 0;
9270 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9271 if (! reg_ok_strict
9272 && reg_offset_p
9273 && GET_CODE (x) == PLUS
9274 && REG_P (XEXP (x, 0))
9275 && (XEXP (x, 0) == virtual_stack_vars_rtx
9276 || XEXP (x, 0) == arg_pointer_rtx)
9277 && CONST_INT_P (XEXP (x, 1)))
9278 return 1;
9279 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9280 return 1;
9281 if (!FLOAT128_2REG_P (mode)
9282 && (TARGET_HARD_FLOAT
9283 || TARGET_POWERPC64
9284 || (mode != DFmode && mode != DDmode))
9285 && (TARGET_POWERPC64 || mode != DImode)
9286 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9287 && mode != PTImode
9288 && !avoiding_indexed_address_p (mode)
9289 && legitimate_indexed_address_p (x, reg_ok_strict))
9290 return 1;
9291 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9292 && mode_supports_pre_modify_p (mode)
9293 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9294 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9295 reg_ok_strict, false)
9296 || (!avoiding_indexed_address_p (mode)
9297 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9298 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9299 return 1;
9300 if (reg_offset_p && !quad_offset_p
9301 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9302 return 1;
9303 return 0;
9304 }
9305
9306 /* Debug version of rs6000_legitimate_address_p. */
9307 static bool
9308 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9309 bool reg_ok_strict)
9310 {
9311 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9312 fprintf (stderr,
9313 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9314 "strict = %d, reload = %s, code = %s\n",
9315 ret ? "true" : "false",
9316 GET_MODE_NAME (mode),
9317 reg_ok_strict,
9318 (reload_completed ? "after" : "before"),
9319 GET_RTX_NAME (GET_CODE (x)));
9320 debug_rtx (x);
9321
9322 return ret;
9323 }
9324
9325 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9326
9327 static bool
9328 rs6000_mode_dependent_address_p (const_rtx addr,
9329 addr_space_t as ATTRIBUTE_UNUSED)
9330 {
9331 return rs6000_mode_dependent_address_ptr (addr);
9332 }
9333
9334 /* Go to LABEL if ADDR (a legitimate address expression)
9335 has an effect that depends on the machine mode it is used for.
9336
9337 On the RS/6000 this is true of all integral offsets (since AltiVec
9338 and VSX modes don't allow them) or is a pre-increment or decrement.
9339
9340 ??? Except that due to conceptual problems in offsettable_address_p
9341 we can't really report the problems of integral offsets. So leave
9342 this assuming that the adjustable offset must be valid for the
9343 sub-words of a TFmode operand, which is what we had before. */
9344
9345 static bool
9346 rs6000_mode_dependent_address (const_rtx addr)
9347 {
9348 switch (GET_CODE (addr))
9349 {
9350 case PLUS:
9351 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9352 is considered a legitimate address before reload, so there
9353 are no offset restrictions in that case. Note that this
9354 condition is safe in strict mode because any address involving
9355 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9356 been rejected as illegitimate. */
9357 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9358 && XEXP (addr, 0) != arg_pointer_rtx
9359 && CONST_INT_P (XEXP (addr, 1)))
9360 {
9361 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9362 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9363 }
9364 break;
9365
9366 case LO_SUM:
9367 /* Anything in the constant pool is sufficiently aligned that
9368 all bytes have the same high part address. */
9369 return !legitimate_constant_pool_address_p (addr, QImode, false);
9370
9371 /* Auto-increment cases are now treated generically in recog.c. */
9372 case PRE_MODIFY:
9373 return TARGET_UPDATE;
9374
9375 /* AND is only allowed in Altivec loads. */
9376 case AND:
9377 return true;
9378
9379 default:
9380 break;
9381 }
9382
9383 return false;
9384 }
9385
9386 /* Debug version of rs6000_mode_dependent_address. */
9387 static bool
9388 rs6000_debug_mode_dependent_address (const_rtx addr)
9389 {
9390 bool ret = rs6000_mode_dependent_address (addr);
9391
9392 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9393 ret ? "true" : "false");
9394 debug_rtx (addr);
9395
9396 return ret;
9397 }
9398
9399 /* Implement FIND_BASE_TERM. */
9400
9401 rtx
9402 rs6000_find_base_term (rtx op)
9403 {
9404 rtx base;
9405
9406 base = op;
9407 if (GET_CODE (base) == CONST)
9408 base = XEXP (base, 0);
9409 if (GET_CODE (base) == PLUS)
9410 base = XEXP (base, 0);
9411 if (GET_CODE (base) == UNSPEC)
9412 switch (XINT (base, 1))
9413 {
9414 case UNSPEC_TOCREL:
9415 case UNSPEC_MACHOPIC_OFFSET:
9416 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9417 for aliasing purposes. */
9418 return XVECEXP (base, 0, 0);
9419 }
9420
9421 return op;
9422 }
9423
9424 /* More elaborate version of recog's offsettable_memref_p predicate
9425 that works around the ??? note of rs6000_mode_dependent_address.
9426 In particular it accepts
9427
9428 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9429
9430 in 32-bit mode, that the recog predicate rejects. */
9431
9432 static bool
9433 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9434 {
9435 bool worst_case;
9436
9437 if (!MEM_P (op))
9438 return false;
9439
9440 /* First mimic offsettable_memref_p. */
9441 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9442 return true;
9443
9444 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9445 the latter predicate knows nothing about the mode of the memory
9446 reference and, therefore, assumes that it is the largest supported
9447 mode (TFmode). As a consequence, legitimate offsettable memory
9448 references are rejected. rs6000_legitimate_offset_address_p contains
9449 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9450 at least with a little bit of help here given that we know the
9451 actual registers used. */
9452 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9453 || GET_MODE_SIZE (reg_mode) == 4);
9454 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9455 strict, worst_case);
9456 }
9457
9458 /* Determine the reassociation width to be used in reassociate_bb.
9459 This takes into account how many parallel operations we
9460 can actually do of a given type, and also the latency.
9461 P8:
9462 int add/sub 6/cycle
9463 mul 2/cycle
9464 vect add/sub/mul 2/cycle
9465 fp add/sub/mul 2/cycle
9466 dfp 1/cycle
9467 */
9468
9469 static int
9470 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9471 machine_mode mode)
9472 {
9473 switch (rs6000_tune)
9474 {
9475 case PROCESSOR_POWER8:
9476 case PROCESSOR_POWER9:
9477 if (DECIMAL_FLOAT_MODE_P (mode))
9478 return 1;
9479 if (VECTOR_MODE_P (mode))
9480 return 4;
9481 if (INTEGRAL_MODE_P (mode))
9482 return 1;
9483 if (FLOAT_MODE_P (mode))
9484 return 4;
9485 break;
9486 default:
9487 break;
9488 }
9489 return 1;
9490 }
9491
9492 /* Change register usage conditional on target flags. */
9493 static void
9494 rs6000_conditional_register_usage (void)
9495 {
9496 int i;
9497
9498 if (TARGET_DEBUG_TARGET)
9499 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9500
9501 /* Set MQ register fixed (already call_used) so that it will not be
9502 allocated. */
9503 fixed_regs[64] = 1;
9504
9505 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9506 if (TARGET_64BIT)
9507 fixed_regs[13] = call_used_regs[13]
9508 = call_really_used_regs[13] = 1;
9509
9510 /* Conditionally disable FPRs. */
9511 if (TARGET_SOFT_FLOAT)
9512 for (i = 32; i < 64; i++)
9513 fixed_regs[i] = call_used_regs[i]
9514 = call_really_used_regs[i] = 1;
9515
9516 /* The TOC register is not killed across calls in a way that is
9517 visible to the compiler. */
9518 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9519 call_really_used_regs[2] = 0;
9520
9521 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9522 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9523
9524 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9525 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9526 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9527 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9528
9529 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9530 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9531 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9532 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9533
9534 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9535 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9536 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9537
9538 if (!TARGET_ALTIVEC && !TARGET_VSX)
9539 {
9540 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9541 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9542 call_really_used_regs[VRSAVE_REGNO] = 1;
9543 }
9544
9545 if (TARGET_ALTIVEC || TARGET_VSX)
9546 global_regs[VSCR_REGNO] = 1;
9547
9548 if (TARGET_ALTIVEC_ABI)
9549 {
9550 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9551 call_used_regs[i] = call_really_used_regs[i] = 1;
9552
9553 /* AIX reserves VR20:31 in non-extended ABI mode. */
9554 if (TARGET_XCOFF)
9555 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9556 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9557 }
9558 }
9559
9560 \f
9561 /* Output insns to set DEST equal to the constant SOURCE as a series of
9562 lis, ori and shl instructions and return TRUE. */
9563
9564 bool
9565 rs6000_emit_set_const (rtx dest, rtx source)
9566 {
9567 machine_mode mode = GET_MODE (dest);
9568 rtx temp, set;
9569 rtx_insn *insn;
9570 HOST_WIDE_INT c;
9571
9572 gcc_checking_assert (CONST_INT_P (source));
9573 c = INTVAL (source);
9574 switch (mode)
9575 {
9576 case E_QImode:
9577 case E_HImode:
9578 emit_insn (gen_rtx_SET (dest, source));
9579 return true;
9580
9581 case E_SImode:
9582 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9583
9584 emit_insn (gen_rtx_SET (copy_rtx (temp),
9585 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9586 emit_insn (gen_rtx_SET (dest,
9587 gen_rtx_IOR (SImode, copy_rtx (temp),
9588 GEN_INT (c & 0xffff))));
9589 break;
9590
9591 case E_DImode:
9592 if (!TARGET_POWERPC64)
9593 {
9594 rtx hi, lo;
9595
9596 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9597 DImode);
9598 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9599 DImode);
9600 emit_move_insn (hi, GEN_INT (c >> 32));
9601 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9602 emit_move_insn (lo, GEN_INT (c));
9603 }
9604 else
9605 rs6000_emit_set_long_const (dest, c);
9606 break;
9607
9608 default:
9609 gcc_unreachable ();
9610 }
9611
9612 insn = get_last_insn ();
9613 set = single_set (insn);
9614 if (! CONSTANT_P (SET_SRC (set)))
9615 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9616
9617 return true;
9618 }
9619
9620 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9621 Output insns to set DEST equal to the constant C as a series of
9622 lis, ori and shl instructions. */
9623
9624 static void
9625 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9626 {
9627 rtx temp;
9628 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9629
9630 ud1 = c & 0xffff;
9631 c = c >> 16;
9632 ud2 = c & 0xffff;
9633 c = c >> 16;
9634 ud3 = c & 0xffff;
9635 c = c >> 16;
9636 ud4 = c & 0xffff;
9637
9638 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9639 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9640 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9641
9642 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9643 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9644 {
9645 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9646
9647 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9648 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9649 if (ud1 != 0)
9650 emit_move_insn (dest,
9651 gen_rtx_IOR (DImode, copy_rtx (temp),
9652 GEN_INT (ud1)));
9653 }
9654 else if (ud3 == 0 && ud4 == 0)
9655 {
9656 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9657
9658 gcc_assert (ud2 & 0x8000);
9659 emit_move_insn (copy_rtx (temp),
9660 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9661 if (ud1 != 0)
9662 emit_move_insn (copy_rtx (temp),
9663 gen_rtx_IOR (DImode, copy_rtx (temp),
9664 GEN_INT (ud1)));
9665 emit_move_insn (dest,
9666 gen_rtx_ZERO_EXTEND (DImode,
9667 gen_lowpart (SImode,
9668 copy_rtx (temp))));
9669 }
9670 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9671 || (ud4 == 0 && ! (ud3 & 0x8000)))
9672 {
9673 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9674
9675 emit_move_insn (copy_rtx (temp),
9676 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9677 if (ud2 != 0)
9678 emit_move_insn (copy_rtx (temp),
9679 gen_rtx_IOR (DImode, copy_rtx (temp),
9680 GEN_INT (ud2)));
9681 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9682 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9683 GEN_INT (16)));
9684 if (ud1 != 0)
9685 emit_move_insn (dest,
9686 gen_rtx_IOR (DImode, copy_rtx (temp),
9687 GEN_INT (ud1)));
9688 }
9689 else
9690 {
9691 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9692
9693 emit_move_insn (copy_rtx (temp),
9694 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9695 if (ud3 != 0)
9696 emit_move_insn (copy_rtx (temp),
9697 gen_rtx_IOR (DImode, copy_rtx (temp),
9698 GEN_INT (ud3)));
9699
9700 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9701 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9702 GEN_INT (32)));
9703 if (ud2 != 0)
9704 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9705 gen_rtx_IOR (DImode, copy_rtx (temp),
9706 GEN_INT (ud2 << 16)));
9707 if (ud1 != 0)
9708 emit_move_insn (dest,
9709 gen_rtx_IOR (DImode, copy_rtx (temp),
9710 GEN_INT (ud1)));
9711 }
9712 }
9713
9714 /* Helper for the following. Get rid of [r+r] memory refs
9715 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9716
9717 static void
9718 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9719 {
9720 if (MEM_P (operands[0])
9721 && !REG_P (XEXP (operands[0], 0))
9722 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9723 GET_MODE (operands[0]), false))
9724 operands[0]
9725 = replace_equiv_address (operands[0],
9726 copy_addr_to_reg (XEXP (operands[0], 0)));
9727
9728 if (MEM_P (operands[1])
9729 && !REG_P (XEXP (operands[1], 0))
9730 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9731 GET_MODE (operands[1]), false))
9732 operands[1]
9733 = replace_equiv_address (operands[1],
9734 copy_addr_to_reg (XEXP (operands[1], 0)));
9735 }
9736
9737 /* Generate a vector of constants to permute MODE for a little-endian
9738 storage operation by swapping the two halves of a vector. */
9739 static rtvec
9740 rs6000_const_vec (machine_mode mode)
9741 {
9742 int i, subparts;
9743 rtvec v;
9744
9745 switch (mode)
9746 {
9747 case E_V1TImode:
9748 subparts = 1;
9749 break;
9750 case E_V2DFmode:
9751 case E_V2DImode:
9752 subparts = 2;
9753 break;
9754 case E_V4SFmode:
9755 case E_V4SImode:
9756 subparts = 4;
9757 break;
9758 case E_V8HImode:
9759 subparts = 8;
9760 break;
9761 case E_V16QImode:
9762 subparts = 16;
9763 break;
9764 default:
9765 gcc_unreachable();
9766 }
9767
9768 v = rtvec_alloc (subparts);
9769
9770 for (i = 0; i < subparts / 2; ++i)
9771 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9772 for (i = subparts / 2; i < subparts; ++i)
9773 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9774
9775 return v;
9776 }
9777
9778 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9779 store operation. */
9780 void
9781 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9782 {
9783 /* Scalar permutations are easier to express in integer modes rather than
9784 floating-point modes, so cast them here. We use V1TImode instead
9785 of TImode to ensure that the values don't go through GPRs. */
9786 if (FLOAT128_VECTOR_P (mode))
9787 {
9788 dest = gen_lowpart (V1TImode, dest);
9789 source = gen_lowpart (V1TImode, source);
9790 mode = V1TImode;
9791 }
9792
9793 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9794 scalar. */
9795 if (mode == TImode || mode == V1TImode)
9796 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9797 GEN_INT (64))));
9798 else
9799 {
9800 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9801 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9802 }
9803 }
9804
9805 /* Emit a little-endian load from vector memory location SOURCE to VSX
9806 register DEST in mode MODE. The load is done with two permuting
9807 insn's that represent an lxvd2x and xxpermdi. */
9808 void
9809 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9810 {
9811 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9812 V1TImode). */
9813 if (mode == TImode || mode == V1TImode)
9814 {
9815 mode = V2DImode;
9816 dest = gen_lowpart (V2DImode, dest);
9817 source = adjust_address (source, V2DImode, 0);
9818 }
9819
9820 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9821 rs6000_emit_le_vsx_permute (tmp, source, mode);
9822 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9823 }
9824
9825 /* Emit a little-endian store to vector memory location DEST from VSX
9826 register SOURCE in mode MODE. The store is done with two permuting
9827 insn's that represent an xxpermdi and an stxvd2x. */
9828 void
9829 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9830 {
9831 /* This should never be called during or after LRA, because it does
9832 not re-permute the source register. It is intended only for use
9833 during expand. */
9834 gcc_assert (!lra_in_progress && !reload_completed);
9835
9836 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9837 V1TImode). */
9838 if (mode == TImode || mode == V1TImode)
9839 {
9840 mode = V2DImode;
9841 dest = adjust_address (dest, V2DImode, 0);
9842 source = gen_lowpart (V2DImode, source);
9843 }
9844
9845 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9846 rs6000_emit_le_vsx_permute (tmp, source, mode);
9847 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9848 }
9849
9850 /* Emit a sequence representing a little-endian VSX load or store,
9851 moving data from SOURCE to DEST in mode MODE. This is done
9852 separately from rs6000_emit_move to ensure it is called only
9853 during expand. LE VSX loads and stores introduced later are
9854 handled with a split. The expand-time RTL generation allows
9855 us to optimize away redundant pairs of register-permutes. */
9856 void
9857 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9858 {
9859 gcc_assert (!BYTES_BIG_ENDIAN
9860 && VECTOR_MEM_VSX_P (mode)
9861 && !TARGET_P9_VECTOR
9862 && !gpr_or_gpr_p (dest, source)
9863 && (MEM_P (source) ^ MEM_P (dest)));
9864
9865 if (MEM_P (source))
9866 {
9867 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9868 rs6000_emit_le_vsx_load (dest, source, mode);
9869 }
9870 else
9871 {
9872 if (!REG_P (source))
9873 source = force_reg (mode, source);
9874 rs6000_emit_le_vsx_store (dest, source, mode);
9875 }
9876 }
9877
9878 /* Return whether a SFmode or SImode move can be done without converting one
9879 mode to another. This arrises when we have:
9880
9881 (SUBREG:SF (REG:SI ...))
9882 (SUBREG:SI (REG:SF ...))
9883
9884 and one of the values is in a floating point/vector register, where SFmode
9885 scalars are stored in DFmode format. */
9886
9887 bool
9888 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9889 {
9890 if (TARGET_ALLOW_SF_SUBREG)
9891 return true;
9892
9893 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9894 return true;
9895
9896 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9897 return true;
9898
9899 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9900 if (SUBREG_P (dest))
9901 {
9902 rtx dest_subreg = SUBREG_REG (dest);
9903 rtx src_subreg = SUBREG_REG (src);
9904 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9905 }
9906
9907 return false;
9908 }
9909
9910
9911 /* Helper function to change moves with:
9912
9913 (SUBREG:SF (REG:SI)) and
9914 (SUBREG:SI (REG:SF))
9915
9916 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9917 values are stored as DFmode values in the VSX registers. We need to convert
9918 the bits before we can use a direct move or operate on the bits in the
9919 vector register as an integer type.
9920
9921 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9922
9923 static bool
9924 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9925 {
9926 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9927 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9928 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9929 {
9930 rtx inner_source = SUBREG_REG (source);
9931 machine_mode inner_mode = GET_MODE (inner_source);
9932
9933 if (mode == SImode && inner_mode == SFmode)
9934 {
9935 emit_insn (gen_movsi_from_sf (dest, inner_source));
9936 return true;
9937 }
9938
9939 if (mode == SFmode && inner_mode == SImode)
9940 {
9941 emit_insn (gen_movsf_from_si (dest, inner_source));
9942 return true;
9943 }
9944 }
9945
9946 return false;
9947 }
9948
9949 /* Emit a move from SOURCE to DEST in mode MODE. */
9950 void
9951 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9952 {
9953 rtx operands[2];
9954 operands[0] = dest;
9955 operands[1] = source;
9956
9957 if (TARGET_DEBUG_ADDR)
9958 {
9959 fprintf (stderr,
9960 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9961 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9962 GET_MODE_NAME (mode),
9963 lra_in_progress,
9964 reload_completed,
9965 can_create_pseudo_p ());
9966 debug_rtx (dest);
9967 fprintf (stderr, "source:\n");
9968 debug_rtx (source);
9969 }
9970
9971 /* Check that we get CONST_WIDE_INT only when we should. */
9972 if (CONST_WIDE_INT_P (operands[1])
9973 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9974 gcc_unreachable ();
9975
9976 #ifdef HAVE_AS_GNU_ATTRIBUTE
9977 /* If we use a long double type, set the flags in .gnu_attribute that say
9978 what the long double type is. This is to allow the linker's warning
9979 message for the wrong long double to be useful, even if the function does
9980 not do a call (for example, doing a 128-bit add on power9 if the long
9981 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9982 used if they aren't the default long dobule type. */
9983 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9984 {
9985 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9986 rs6000_passes_float = rs6000_passes_long_double = true;
9987
9988 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9989 rs6000_passes_float = rs6000_passes_long_double = true;
9990 }
9991 #endif
9992
9993 /* See if we need to special case SImode/SFmode SUBREG moves. */
9994 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9995 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9996 return;
9997
9998 /* Check if GCC is setting up a block move that will end up using FP
9999 registers as temporaries. We must make sure this is acceptable. */
10000 if (MEM_P (operands[0])
10001 && MEM_P (operands[1])
10002 && mode == DImode
10003 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10004 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10005 && ! (rs6000_slow_unaligned_access (SImode,
10006 (MEM_ALIGN (operands[0]) > 32
10007 ? 32 : MEM_ALIGN (operands[0])))
10008 || rs6000_slow_unaligned_access (SImode,
10009 (MEM_ALIGN (operands[1]) > 32
10010 ? 32 : MEM_ALIGN (operands[1]))))
10011 && ! MEM_VOLATILE_P (operands [0])
10012 && ! MEM_VOLATILE_P (operands [1]))
10013 {
10014 emit_move_insn (adjust_address (operands[0], SImode, 0),
10015 adjust_address (operands[1], SImode, 0));
10016 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10017 adjust_address (copy_rtx (operands[1]), SImode, 4));
10018 return;
10019 }
10020
10021 if (can_create_pseudo_p () && MEM_P (operands[0])
10022 && !gpc_reg_operand (operands[1], mode))
10023 operands[1] = force_reg (mode, operands[1]);
10024
10025 /* Recognize the case where operand[1] is a reference to thread-local
10026 data and load its address to a register. */
10027 if (tls_referenced_p (operands[1]))
10028 {
10029 enum tls_model model;
10030 rtx tmp = operands[1];
10031 rtx addend = NULL;
10032
10033 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10034 {
10035 addend = XEXP (XEXP (tmp, 0), 1);
10036 tmp = XEXP (XEXP (tmp, 0), 0);
10037 }
10038
10039 gcc_assert (SYMBOL_REF_P (tmp));
10040 model = SYMBOL_REF_TLS_MODEL (tmp);
10041 gcc_assert (model != 0);
10042
10043 tmp = rs6000_legitimize_tls_address (tmp, model);
10044 if (addend)
10045 {
10046 tmp = gen_rtx_PLUS (mode, tmp, addend);
10047 tmp = force_operand (tmp, operands[0]);
10048 }
10049 operands[1] = tmp;
10050 }
10051
10052 /* 128-bit constant floating-point values on Darwin should really be loaded
10053 as two parts. However, this premature splitting is a problem when DFmode
10054 values can go into Altivec registers. */
10055 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
10056 && !reg_addr[DFmode].scalar_in_vmx_p)
10057 {
10058 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10059 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10060 DFmode);
10061 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10062 GET_MODE_SIZE (DFmode)),
10063 simplify_gen_subreg (DFmode, operands[1], mode,
10064 GET_MODE_SIZE (DFmode)),
10065 DFmode);
10066 return;
10067 }
10068
10069 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10070 p1:SD) if p1 is not of floating point class and p0 is spilled as
10071 we can have no analogous movsd_store for this. */
10072 if (lra_in_progress && mode == DDmode
10073 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
10074 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10075 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
10076 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10077 {
10078 enum reg_class cl;
10079 int regno = REGNO (SUBREG_REG (operands[1]));
10080
10081 if (!HARD_REGISTER_NUM_P (regno))
10082 {
10083 cl = reg_preferred_class (regno);
10084 regno = reg_renumber[regno];
10085 if (regno < 0)
10086 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10087 }
10088 if (regno >= 0 && ! FP_REGNO_P (regno))
10089 {
10090 mode = SDmode;
10091 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10092 operands[1] = SUBREG_REG (operands[1]);
10093 }
10094 }
10095 if (lra_in_progress
10096 && mode == SDmode
10097 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
10098 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10099 && (REG_P (operands[1])
10100 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
10101 {
10102 int regno = reg_or_subregno (operands[1]);
10103 enum reg_class cl;
10104
10105 if (!HARD_REGISTER_NUM_P (regno))
10106 {
10107 cl = reg_preferred_class (regno);
10108 gcc_assert (cl != NO_REGS);
10109 regno = reg_renumber[regno];
10110 if (regno < 0)
10111 regno = ira_class_hard_regs[cl][0];
10112 }
10113 if (FP_REGNO_P (regno))
10114 {
10115 if (GET_MODE (operands[0]) != DDmode)
10116 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10117 emit_insn (gen_movsd_store (operands[0], operands[1]));
10118 }
10119 else if (INT_REGNO_P (regno))
10120 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10121 else
10122 gcc_unreachable();
10123 return;
10124 }
10125 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10126 p:DD)) if p0 is not of floating point class and p1 is spilled as
10127 we can have no analogous movsd_load for this. */
10128 if (lra_in_progress && mode == DDmode
10129 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
10130 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10131 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
10132 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10133 {
10134 enum reg_class cl;
10135 int regno = REGNO (SUBREG_REG (operands[0]));
10136
10137 if (!HARD_REGISTER_NUM_P (regno))
10138 {
10139 cl = reg_preferred_class (regno);
10140 regno = reg_renumber[regno];
10141 if (regno < 0)
10142 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10143 }
10144 if (regno >= 0 && ! FP_REGNO_P (regno))
10145 {
10146 mode = SDmode;
10147 operands[0] = SUBREG_REG (operands[0]);
10148 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10149 }
10150 }
10151 if (lra_in_progress
10152 && mode == SDmode
10153 && (REG_P (operands[0])
10154 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
10155 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
10156 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10157 {
10158 int regno = reg_or_subregno (operands[0]);
10159 enum reg_class cl;
10160
10161 if (!HARD_REGISTER_NUM_P (regno))
10162 {
10163 cl = reg_preferred_class (regno);
10164 gcc_assert (cl != NO_REGS);
10165 regno = reg_renumber[regno];
10166 if (regno < 0)
10167 regno = ira_class_hard_regs[cl][0];
10168 }
10169 if (FP_REGNO_P (regno))
10170 {
10171 if (GET_MODE (operands[1]) != DDmode)
10172 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10173 emit_insn (gen_movsd_load (operands[0], operands[1]));
10174 }
10175 else if (INT_REGNO_P (regno))
10176 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10177 else
10178 gcc_unreachable();
10179 return;
10180 }
10181
10182 /* FIXME: In the long term, this switch statement should go away
10183 and be replaced by a sequence of tests based on things like
10184 mode == Pmode. */
10185 switch (mode)
10186 {
10187 case E_HImode:
10188 case E_QImode:
10189 if (CONSTANT_P (operands[1])
10190 && !CONST_INT_P (operands[1]))
10191 operands[1] = force_const_mem (mode, operands[1]);
10192 break;
10193
10194 case E_TFmode:
10195 case E_TDmode:
10196 case E_IFmode:
10197 case E_KFmode:
10198 if (FLOAT128_2REG_P (mode))
10199 rs6000_eliminate_indexed_memrefs (operands);
10200 /* fall through */
10201
10202 case E_DFmode:
10203 case E_DDmode:
10204 case E_SFmode:
10205 case E_SDmode:
10206 if (CONSTANT_P (operands[1])
10207 && ! easy_fp_constant (operands[1], mode))
10208 operands[1] = force_const_mem (mode, operands[1]);
10209 break;
10210
10211 case E_V16QImode:
10212 case E_V8HImode:
10213 case E_V4SFmode:
10214 case E_V4SImode:
10215 case E_V2DFmode:
10216 case E_V2DImode:
10217 case E_V1TImode:
10218 if (CONSTANT_P (operands[1])
10219 && !easy_vector_constant (operands[1], mode))
10220 operands[1] = force_const_mem (mode, operands[1]);
10221 break;
10222
10223 case E_SImode:
10224 case E_DImode:
10225 /* Use default pattern for address of ELF small data */
10226 if (TARGET_ELF
10227 && mode == Pmode
10228 && DEFAULT_ABI == ABI_V4
10229 && (SYMBOL_REF_P (operands[1])
10230 || GET_CODE (operands[1]) == CONST)
10231 && small_data_operand (operands[1], mode))
10232 {
10233 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10234 return;
10235 }
10236
10237 if (DEFAULT_ABI == ABI_V4
10238 && mode == Pmode && mode == SImode
10239 && flag_pic == 1 && got_operand (operands[1], mode))
10240 {
10241 emit_insn (gen_movsi_got (operands[0], operands[1]));
10242 return;
10243 }
10244
10245 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10246 && TARGET_NO_TOC
10247 && ! flag_pic
10248 && mode == Pmode
10249 && CONSTANT_P (operands[1])
10250 && GET_CODE (operands[1]) != HIGH
10251 && !CONST_INT_P (operands[1]))
10252 {
10253 rtx target = (!can_create_pseudo_p ()
10254 ? operands[0]
10255 : gen_reg_rtx (mode));
10256
10257 /* If this is a function address on -mcall-aixdesc,
10258 convert it to the address of the descriptor. */
10259 if (DEFAULT_ABI == ABI_AIX
10260 && SYMBOL_REF_P (operands[1])
10261 && XSTR (operands[1], 0)[0] == '.')
10262 {
10263 const char *name = XSTR (operands[1], 0);
10264 rtx new_ref;
10265 while (*name == '.')
10266 name++;
10267 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10268 CONSTANT_POOL_ADDRESS_P (new_ref)
10269 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10270 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10271 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10272 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10273 operands[1] = new_ref;
10274 }
10275
10276 if (DEFAULT_ABI == ABI_DARWIN)
10277 {
10278 #if TARGET_MACHO
10279 if (MACHO_DYNAMIC_NO_PIC_P)
10280 {
10281 /* Take care of any required data indirection. */
10282 operands[1] = rs6000_machopic_legitimize_pic_address (
10283 operands[1], mode, operands[0]);
10284 if (operands[0] != operands[1])
10285 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10286 return;
10287 }
10288 #endif
10289 emit_insn (gen_macho_high (target, operands[1]));
10290 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10291 return;
10292 }
10293
10294 emit_insn (gen_elf_high (target, operands[1]));
10295 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10296 return;
10297 }
10298
10299 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10300 and we have put it in the TOC, we just need to make a TOC-relative
10301 reference to it. */
10302 if (TARGET_TOC
10303 && SYMBOL_REF_P (operands[1])
10304 && use_toc_relative_ref (operands[1], mode))
10305 operands[1] = create_TOC_reference (operands[1], operands[0]);
10306 else if (mode == Pmode
10307 && CONSTANT_P (operands[1])
10308 && GET_CODE (operands[1]) != HIGH
10309 && ((REG_P (operands[0])
10310 && FP_REGNO_P (REGNO (operands[0])))
10311 || !CONST_INT_P (operands[1])
10312 || (num_insns_constant (operands[1], mode)
10313 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10314 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10315 && (TARGET_CMODEL == CMODEL_SMALL
10316 || can_create_pseudo_p ()
10317 || (REG_P (operands[0])
10318 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10319 {
10320
10321 #if TARGET_MACHO
10322 /* Darwin uses a special PIC legitimizer. */
10323 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10324 {
10325 operands[1] =
10326 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10327 operands[0]);
10328 if (operands[0] != operands[1])
10329 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10330 return;
10331 }
10332 #endif
10333
10334 /* If we are to limit the number of things we put in the TOC and
10335 this is a symbol plus a constant we can add in one insn,
10336 just put the symbol in the TOC and add the constant. */
10337 if (GET_CODE (operands[1]) == CONST
10338 && TARGET_NO_SUM_IN_TOC
10339 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10340 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10341 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10342 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
10343 && ! side_effects_p (operands[0]))
10344 {
10345 rtx sym =
10346 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10347 rtx other = XEXP (XEXP (operands[1], 0), 1);
10348
10349 sym = force_reg (mode, sym);
10350 emit_insn (gen_add3_insn (operands[0], sym, other));
10351 return;
10352 }
10353
10354 operands[1] = force_const_mem (mode, operands[1]);
10355
10356 if (TARGET_TOC
10357 && SYMBOL_REF_P (XEXP (operands[1], 0))
10358 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10359 {
10360 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10361 operands[0]);
10362 operands[1] = gen_const_mem (mode, tocref);
10363 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10364 }
10365 }
10366 break;
10367
10368 case E_TImode:
10369 if (!VECTOR_MEM_VSX_P (TImode))
10370 rs6000_eliminate_indexed_memrefs (operands);
10371 break;
10372
10373 case E_PTImode:
10374 rs6000_eliminate_indexed_memrefs (operands);
10375 break;
10376
10377 default:
10378 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10379 }
10380
10381 /* Above, we may have called force_const_mem which may have returned
10382 an invalid address. If we can, fix this up; otherwise, reload will
10383 have to deal with it. */
10384 if (MEM_P (operands[1]))
10385 operands[1] = validize_mem (operands[1]);
10386
10387 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10388 }
10389 \f
10390 /* Nonzero if we can use a floating-point register to pass this arg. */
10391 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10392 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10393 && (CUM)->fregno <= FP_ARG_MAX_REG \
10394 && TARGET_HARD_FLOAT)
10395
10396 /* Nonzero if we can use an AltiVec register to pass this arg. */
10397 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10398 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10399 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10400 && TARGET_ALTIVEC_ABI \
10401 && (NAMED))
10402
10403 /* Walk down the type tree of TYPE counting consecutive base elements.
10404 If *MODEP is VOIDmode, then set it to the first valid floating point
10405 or vector type. If a non-floating point or vector type is found, or
10406 if a floating point or vector type that doesn't match a non-VOIDmode
10407 *MODEP is found, then return -1, otherwise return the count in the
10408 sub-tree. */
10409
10410 static int
10411 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10412 {
10413 machine_mode mode;
10414 HOST_WIDE_INT size;
10415
10416 switch (TREE_CODE (type))
10417 {
10418 case REAL_TYPE:
10419 mode = TYPE_MODE (type);
10420 if (!SCALAR_FLOAT_MODE_P (mode))
10421 return -1;
10422
10423 if (*modep == VOIDmode)
10424 *modep = mode;
10425
10426 if (*modep == mode)
10427 return 1;
10428
10429 break;
10430
10431 case COMPLEX_TYPE:
10432 mode = TYPE_MODE (TREE_TYPE (type));
10433 if (!SCALAR_FLOAT_MODE_P (mode))
10434 return -1;
10435
10436 if (*modep == VOIDmode)
10437 *modep = mode;
10438
10439 if (*modep == mode)
10440 return 2;
10441
10442 break;
10443
10444 case VECTOR_TYPE:
10445 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10446 return -1;
10447
10448 /* Use V4SImode as representative of all 128-bit vector types. */
10449 size = int_size_in_bytes (type);
10450 switch (size)
10451 {
10452 case 16:
10453 mode = V4SImode;
10454 break;
10455 default:
10456 return -1;
10457 }
10458
10459 if (*modep == VOIDmode)
10460 *modep = mode;
10461
10462 /* Vector modes are considered to be opaque: two vectors are
10463 equivalent for the purposes of being homogeneous aggregates
10464 if they are the same size. */
10465 if (*modep == mode)
10466 return 1;
10467
10468 break;
10469
10470 case ARRAY_TYPE:
10471 {
10472 int count;
10473 tree index = TYPE_DOMAIN (type);
10474
10475 /* Can't handle incomplete types nor sizes that are not
10476 fixed. */
10477 if (!COMPLETE_TYPE_P (type)
10478 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10479 return -1;
10480
10481 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10482 if (count == -1
10483 || !index
10484 || !TYPE_MAX_VALUE (index)
10485 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10486 || !TYPE_MIN_VALUE (index)
10487 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10488 || count < 0)
10489 return -1;
10490
10491 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10492 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10493
10494 /* There must be no padding. */
10495 if (wi::to_wide (TYPE_SIZE (type))
10496 != count * GET_MODE_BITSIZE (*modep))
10497 return -1;
10498
10499 return count;
10500 }
10501
10502 case RECORD_TYPE:
10503 {
10504 int count = 0;
10505 int sub_count;
10506 tree field;
10507
10508 /* Can't handle incomplete types nor sizes that are not
10509 fixed. */
10510 if (!COMPLETE_TYPE_P (type)
10511 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10512 return -1;
10513
10514 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10515 {
10516 if (TREE_CODE (field) != FIELD_DECL)
10517 continue;
10518
10519 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10520 if (sub_count < 0)
10521 return -1;
10522 count += sub_count;
10523 }
10524
10525 /* There must be no padding. */
10526 if (wi::to_wide (TYPE_SIZE (type))
10527 != count * GET_MODE_BITSIZE (*modep))
10528 return -1;
10529
10530 return count;
10531 }
10532
10533 case UNION_TYPE:
10534 case QUAL_UNION_TYPE:
10535 {
10536 /* These aren't very interesting except in a degenerate case. */
10537 int count = 0;
10538 int sub_count;
10539 tree field;
10540
10541 /* Can't handle incomplete types nor sizes that are not
10542 fixed. */
10543 if (!COMPLETE_TYPE_P (type)
10544 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10545 return -1;
10546
10547 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10548 {
10549 if (TREE_CODE (field) != FIELD_DECL)
10550 continue;
10551
10552 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10553 if (sub_count < 0)
10554 return -1;
10555 count = count > sub_count ? count : sub_count;
10556 }
10557
10558 /* There must be no padding. */
10559 if (wi::to_wide (TYPE_SIZE (type))
10560 != count * GET_MODE_BITSIZE (*modep))
10561 return -1;
10562
10563 return count;
10564 }
10565
10566 default:
10567 break;
10568 }
10569
10570 return -1;
10571 }
10572
10573 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10574 float or vector aggregate that shall be passed in FP/vector registers
10575 according to the ELFv2 ABI, return the homogeneous element mode in
10576 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10577
10578 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10579
10580 static bool
10581 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10582 machine_mode *elt_mode,
10583 int *n_elts)
10584 {
10585 /* Note that we do not accept complex types at the top level as
10586 homogeneous aggregates; these types are handled via the
10587 targetm.calls.split_complex_arg mechanism. Complex types
10588 can be elements of homogeneous aggregates, however. */
10589 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10590 && AGGREGATE_TYPE_P (type))
10591 {
10592 machine_mode field_mode = VOIDmode;
10593 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10594
10595 if (field_count > 0)
10596 {
10597 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10598 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10599
10600 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10601 up to AGGR_ARG_NUM_REG registers. */
10602 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10603 {
10604 if (elt_mode)
10605 *elt_mode = field_mode;
10606 if (n_elts)
10607 *n_elts = field_count;
10608 return true;
10609 }
10610 }
10611 }
10612
10613 if (elt_mode)
10614 *elt_mode = mode;
10615 if (n_elts)
10616 *n_elts = 1;
10617 return false;
10618 }
10619
10620 /* Return a nonzero value to say to return the function value in
10621 memory, just as large structures are always returned. TYPE will be
10622 the data type of the value, and FNTYPE will be the type of the
10623 function doing the returning, or @code{NULL} for libcalls.
10624
10625 The AIX ABI for the RS/6000 specifies that all structures are
10626 returned in memory. The Darwin ABI does the same.
10627
10628 For the Darwin 64 Bit ABI, a function result can be returned in
10629 registers or in memory, depending on the size of the return data
10630 type. If it is returned in registers, the value occupies the same
10631 registers as it would if it were the first and only function
10632 argument. Otherwise, the function places its result in memory at
10633 the location pointed to by GPR3.
10634
10635 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10636 but a draft put them in memory, and GCC used to implement the draft
10637 instead of the final standard. Therefore, aix_struct_return
10638 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10639 compatibility can change DRAFT_V4_STRUCT_RET to override the
10640 default, and -m switches get the final word. See
10641 rs6000_option_override_internal for more details.
10642
10643 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10644 long double support is enabled. These values are returned in memory.
10645
10646 int_size_in_bytes returns -1 for variable size objects, which go in
10647 memory always. The cast to unsigned makes -1 > 8. */
10648
10649 static bool
10650 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10651 {
10652 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10653 if (TARGET_MACHO
10654 && rs6000_darwin64_abi
10655 && TREE_CODE (type) == RECORD_TYPE
10656 && int_size_in_bytes (type) > 0)
10657 {
10658 CUMULATIVE_ARGS valcum;
10659 rtx valret;
10660
10661 valcum.words = 0;
10662 valcum.fregno = FP_ARG_MIN_REG;
10663 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10664 /* Do a trial code generation as if this were going to be passed
10665 as an argument; if any part goes in memory, we return NULL. */
10666 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10667 if (valret)
10668 return false;
10669 /* Otherwise fall through to more conventional ABI rules. */
10670 }
10671
10672 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10673 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10674 NULL, NULL))
10675 return false;
10676
10677 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10678 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10679 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10680 return false;
10681
10682 if (AGGREGATE_TYPE_P (type)
10683 && (aix_struct_return
10684 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10685 return true;
10686
10687 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10688 modes only exist for GCC vector types if -maltivec. */
10689 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10690 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10691 return false;
10692
10693 /* Return synthetic vectors in memory. */
10694 if (TREE_CODE (type) == VECTOR_TYPE
10695 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10696 {
10697 static bool warned_for_return_big_vectors = false;
10698 if (!warned_for_return_big_vectors)
10699 {
10700 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10701 "non-standard ABI extension with no compatibility "
10702 "guarantee");
10703 warned_for_return_big_vectors = true;
10704 }
10705 return true;
10706 }
10707
10708 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10709 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10710 return true;
10711
10712 return false;
10713 }
10714
10715 /* Specify whether values returned in registers should be at the most
10716 significant end of a register. We want aggregates returned by
10717 value to match the way aggregates are passed to functions. */
10718
10719 static bool
10720 rs6000_return_in_msb (const_tree valtype)
10721 {
10722 return (DEFAULT_ABI == ABI_ELFv2
10723 && BYTES_BIG_ENDIAN
10724 && AGGREGATE_TYPE_P (valtype)
10725 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10726 == PAD_UPWARD));
10727 }
10728
10729 #ifdef HAVE_AS_GNU_ATTRIBUTE
10730 /* Return TRUE if a call to function FNDECL may be one that
10731 potentially affects the function calling ABI of the object file. */
10732
10733 static bool
10734 call_ABI_of_interest (tree fndecl)
10735 {
10736 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10737 {
10738 struct cgraph_node *c_node;
10739
10740 /* Libcalls are always interesting. */
10741 if (fndecl == NULL_TREE)
10742 return true;
10743
10744 /* Any call to an external function is interesting. */
10745 if (DECL_EXTERNAL (fndecl))
10746 return true;
10747
10748 /* Interesting functions that we are emitting in this object file. */
10749 c_node = cgraph_node::get (fndecl);
10750 c_node = c_node->ultimate_alias_target ();
10751 return !c_node->only_called_directly_p ();
10752 }
10753 return false;
10754 }
10755 #endif
10756
10757 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10758 for a call to a function whose data type is FNTYPE.
10759 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10760
10761 For incoming args we set the number of arguments in the prototype large
10762 so we never return a PARALLEL. */
10763
10764 void
10765 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10766 rtx libname ATTRIBUTE_UNUSED, int incoming,
10767 int libcall, int n_named_args,
10768 tree fndecl,
10769 machine_mode return_mode ATTRIBUTE_UNUSED)
10770 {
10771 static CUMULATIVE_ARGS zero_cumulative;
10772
10773 *cum = zero_cumulative;
10774 cum->words = 0;
10775 cum->fregno = FP_ARG_MIN_REG;
10776 cum->vregno = ALTIVEC_ARG_MIN_REG;
10777 cum->prototype = (fntype && prototype_p (fntype));
10778 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10779 ? CALL_LIBCALL : CALL_NORMAL);
10780 cum->sysv_gregno = GP_ARG_MIN_REG;
10781 cum->stdarg = stdarg_p (fntype);
10782 cum->libcall = libcall;
10783
10784 cum->nargs_prototype = 0;
10785 if (incoming || cum->prototype)
10786 cum->nargs_prototype = n_named_args;
10787
10788 /* Check for a longcall attribute. */
10789 if ((!fntype && rs6000_default_long_calls)
10790 || (fntype
10791 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10792 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10793 cum->call_cookie |= CALL_LONG;
10794 else if (DEFAULT_ABI != ABI_DARWIN)
10795 {
10796 bool is_local = (fndecl
10797 && !DECL_EXTERNAL (fndecl)
10798 && !DECL_WEAK (fndecl)
10799 && (*targetm.binds_local_p) (fndecl));
10800 if (is_local)
10801 ;
10802 else if (flag_plt)
10803 {
10804 if (fntype
10805 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10806 cum->call_cookie |= CALL_LONG;
10807 }
10808 else
10809 {
10810 if (!(fntype
10811 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10812 cum->call_cookie |= CALL_LONG;
10813 }
10814 }
10815
10816 if (TARGET_DEBUG_ARG)
10817 {
10818 fprintf (stderr, "\ninit_cumulative_args:");
10819 if (fntype)
10820 {
10821 tree ret_type = TREE_TYPE (fntype);
10822 fprintf (stderr, " ret code = %s,",
10823 get_tree_code_name (TREE_CODE (ret_type)));
10824 }
10825
10826 if (cum->call_cookie & CALL_LONG)
10827 fprintf (stderr, " longcall,");
10828
10829 fprintf (stderr, " proto = %d, nargs = %d\n",
10830 cum->prototype, cum->nargs_prototype);
10831 }
10832
10833 #ifdef HAVE_AS_GNU_ATTRIBUTE
10834 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10835 {
10836 cum->escapes = call_ABI_of_interest (fndecl);
10837 if (cum->escapes)
10838 {
10839 tree return_type;
10840
10841 if (fntype)
10842 {
10843 return_type = TREE_TYPE (fntype);
10844 return_mode = TYPE_MODE (return_type);
10845 }
10846 else
10847 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10848
10849 if (return_type != NULL)
10850 {
10851 if (TREE_CODE (return_type) == RECORD_TYPE
10852 && TYPE_TRANSPARENT_AGGR (return_type))
10853 {
10854 return_type = TREE_TYPE (first_field (return_type));
10855 return_mode = TYPE_MODE (return_type);
10856 }
10857 if (AGGREGATE_TYPE_P (return_type)
10858 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10859 <= 8))
10860 rs6000_returns_struct = true;
10861 }
10862 if (SCALAR_FLOAT_MODE_P (return_mode))
10863 {
10864 rs6000_passes_float = true;
10865 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10866 && (FLOAT128_IBM_P (return_mode)
10867 || FLOAT128_IEEE_P (return_mode)
10868 || (return_type != NULL
10869 && (TYPE_MAIN_VARIANT (return_type)
10870 == long_double_type_node))))
10871 rs6000_passes_long_double = true;
10872
10873 /* Note if we passed or return a IEEE 128-bit type. We changed
10874 the mangling for these types, and we may need to make an alias
10875 with the old mangling. */
10876 if (FLOAT128_IEEE_P (return_mode))
10877 rs6000_passes_ieee128 = true;
10878 }
10879 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10880 rs6000_passes_vector = true;
10881 }
10882 }
10883 #endif
10884
10885 if (fntype
10886 && !TARGET_ALTIVEC
10887 && TARGET_ALTIVEC_ABI
10888 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10889 {
10890 error ("cannot return value in vector register because"
10891 " altivec instructions are disabled, use %qs"
10892 " to enable them", "-maltivec");
10893 }
10894 }
10895 \f
10896 /* The mode the ABI uses for a word. This is not the same as word_mode
10897 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10898
10899 static scalar_int_mode
10900 rs6000_abi_word_mode (void)
10901 {
10902 return TARGET_32BIT ? SImode : DImode;
10903 }
10904
10905 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10906 static char *
10907 rs6000_offload_options (void)
10908 {
10909 if (TARGET_64BIT)
10910 return xstrdup ("-foffload-abi=lp64");
10911 else
10912 return xstrdup ("-foffload-abi=ilp32");
10913 }
10914
10915 /* On rs6000, function arguments are promoted, as are function return
10916 values. */
10917
10918 static machine_mode
10919 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10920 machine_mode mode,
10921 int *punsignedp ATTRIBUTE_UNUSED,
10922 const_tree, int)
10923 {
10924 PROMOTE_MODE (mode, *punsignedp, type);
10925
10926 return mode;
10927 }
10928
10929 /* Return true if TYPE must be passed on the stack and not in registers. */
10930
10931 static bool
10932 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10933 {
10934 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10935 return must_pass_in_stack_var_size (mode, type);
10936 else
10937 return must_pass_in_stack_var_size_or_pad (mode, type);
10938 }
10939
10940 static inline bool
10941 is_complex_IBM_long_double (machine_mode mode)
10942 {
10943 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10944 }
10945
10946 /* Whether ABI_V4 passes MODE args to a function in floating point
10947 registers. */
10948
10949 static bool
10950 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10951 {
10952 if (!TARGET_HARD_FLOAT)
10953 return false;
10954 if (mode == DFmode)
10955 return true;
10956 if (mode == SFmode && named)
10957 return true;
10958 /* ABI_V4 passes complex IBM long double in 8 gprs.
10959 Stupid, but we can't change the ABI now. */
10960 if (is_complex_IBM_long_double (mode))
10961 return false;
10962 if (FLOAT128_2REG_P (mode))
10963 return true;
10964 if (DECIMAL_FLOAT_MODE_P (mode))
10965 return true;
10966 return false;
10967 }
10968
10969 /* Implement TARGET_FUNCTION_ARG_PADDING.
10970
10971 For the AIX ABI structs are always stored left shifted in their
10972 argument slot. */
10973
10974 static pad_direction
10975 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10976 {
10977 #ifndef AGGREGATE_PADDING_FIXED
10978 #define AGGREGATE_PADDING_FIXED 0
10979 #endif
10980 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10981 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10982 #endif
10983
10984 if (!AGGREGATE_PADDING_FIXED)
10985 {
10986 /* GCC used to pass structures of the same size as integer types as
10987 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10988 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10989 passed padded downward, except that -mstrict-align further
10990 muddied the water in that multi-component structures of 2 and 4
10991 bytes in size were passed padded upward.
10992
10993 The following arranges for best compatibility with previous
10994 versions of gcc, but removes the -mstrict-align dependency. */
10995 if (BYTES_BIG_ENDIAN)
10996 {
10997 HOST_WIDE_INT size = 0;
10998
10999 if (mode == BLKmode)
11000 {
11001 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11002 size = int_size_in_bytes (type);
11003 }
11004 else
11005 size = GET_MODE_SIZE (mode);
11006
11007 if (size == 1 || size == 2 || size == 4)
11008 return PAD_DOWNWARD;
11009 }
11010 return PAD_UPWARD;
11011 }
11012
11013 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11014 {
11015 if (type != 0 && AGGREGATE_TYPE_P (type))
11016 return PAD_UPWARD;
11017 }
11018
11019 /* Fall back to the default. */
11020 return default_function_arg_padding (mode, type);
11021 }
11022
11023 /* If defined, a C expression that gives the alignment boundary, in bits,
11024 of an argument with the specified mode and type. If it is not defined,
11025 PARM_BOUNDARY is used for all arguments.
11026
11027 V.4 wants long longs and doubles to be double word aligned. Just
11028 testing the mode size is a boneheaded way to do this as it means
11029 that other types such as complex int are also double word aligned.
11030 However, we're stuck with this because changing the ABI might break
11031 existing library interfaces.
11032
11033 Quadword align Altivec/VSX vectors.
11034 Quadword align large synthetic vector types. */
11035
11036 static unsigned int
11037 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11038 {
11039 machine_mode elt_mode;
11040 int n_elts;
11041
11042 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11043
11044 if (DEFAULT_ABI == ABI_V4
11045 && (GET_MODE_SIZE (mode) == 8
11046 || (TARGET_HARD_FLOAT
11047 && !is_complex_IBM_long_double (mode)
11048 && FLOAT128_2REG_P (mode))))
11049 return 64;
11050 else if (FLOAT128_VECTOR_P (mode))
11051 return 128;
11052 else if (type && TREE_CODE (type) == VECTOR_TYPE
11053 && int_size_in_bytes (type) >= 8
11054 && int_size_in_bytes (type) < 16)
11055 return 64;
11056 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11057 || (type && TREE_CODE (type) == VECTOR_TYPE
11058 && int_size_in_bytes (type) >= 16))
11059 return 128;
11060
11061 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11062 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11063 -mcompat-align-parm is used. */
11064 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11065 || DEFAULT_ABI == ABI_ELFv2)
11066 && type && TYPE_ALIGN (type) > 64)
11067 {
11068 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11069 or homogeneous float/vector aggregates here. We already handled
11070 vector aggregates above, but still need to check for float here. */
11071 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11072 && !SCALAR_FLOAT_MODE_P (elt_mode));
11073
11074 /* We used to check for BLKmode instead of the above aggregate type
11075 check. Warn when this results in any difference to the ABI. */
11076 if (aggregate_p != (mode == BLKmode))
11077 {
11078 static bool warned;
11079 if (!warned && warn_psabi)
11080 {
11081 warned = true;
11082 inform (input_location,
11083 "the ABI of passing aggregates with %d-byte alignment"
11084 " has changed in GCC 5",
11085 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11086 }
11087 }
11088
11089 if (aggregate_p)
11090 return 128;
11091 }
11092
11093 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11094 implement the "aggregate type" check as a BLKmode check here; this
11095 means certain aggregate types are in fact not aligned. */
11096 if (TARGET_MACHO && rs6000_darwin64_abi
11097 && mode == BLKmode
11098 && type && TYPE_ALIGN (type) > 64)
11099 return 128;
11100
11101 return PARM_BOUNDARY;
11102 }
11103
11104 /* The offset in words to the start of the parameter save area. */
11105
11106 static unsigned int
11107 rs6000_parm_offset (void)
11108 {
11109 return (DEFAULT_ABI == ABI_V4 ? 2
11110 : DEFAULT_ABI == ABI_ELFv2 ? 4
11111 : 6);
11112 }
11113
11114 /* For a function parm of MODE and TYPE, return the starting word in
11115 the parameter area. NWORDS of the parameter area are already used. */
11116
11117 static unsigned int
11118 rs6000_parm_start (machine_mode mode, const_tree type,
11119 unsigned int nwords)
11120 {
11121 unsigned int align;
11122
11123 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11124 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11125 }
11126
11127 /* Compute the size (in words) of a function argument. */
11128
11129 static unsigned long
11130 rs6000_arg_size (machine_mode mode, const_tree type)
11131 {
11132 unsigned long size;
11133
11134 if (mode != BLKmode)
11135 size = GET_MODE_SIZE (mode);
11136 else
11137 size = int_size_in_bytes (type);
11138
11139 if (TARGET_32BIT)
11140 return (size + 3) >> 2;
11141 else
11142 return (size + 7) >> 3;
11143 }
11144 \f
11145 /* Use this to flush pending int fields. */
11146
11147 static void
11148 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11149 HOST_WIDE_INT bitpos, int final)
11150 {
11151 unsigned int startbit, endbit;
11152 int intregs, intoffset;
11153
11154 /* Handle the situations where a float is taking up the first half
11155 of the GPR, and the other half is empty (typically due to
11156 alignment restrictions). We can detect this by a 8-byte-aligned
11157 int field, or by seeing that this is the final flush for this
11158 argument. Count the word and continue on. */
11159 if (cum->floats_in_gpr == 1
11160 && (cum->intoffset % 64 == 0
11161 || (cum->intoffset == -1 && final)))
11162 {
11163 cum->words++;
11164 cum->floats_in_gpr = 0;
11165 }
11166
11167 if (cum->intoffset == -1)
11168 return;
11169
11170 intoffset = cum->intoffset;
11171 cum->intoffset = -1;
11172 cum->floats_in_gpr = 0;
11173
11174 if (intoffset % BITS_PER_WORD != 0)
11175 {
11176 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11177 if (!int_mode_for_size (bits, 0).exists ())
11178 {
11179 /* We couldn't find an appropriate mode, which happens,
11180 e.g., in packed structs when there are 3 bytes to load.
11181 Back intoffset back to the beginning of the word in this
11182 case. */
11183 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11184 }
11185 }
11186
11187 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11188 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11189 intregs = (endbit - startbit) / BITS_PER_WORD;
11190 cum->words += intregs;
11191 /* words should be unsigned. */
11192 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11193 {
11194 int pad = (endbit/BITS_PER_WORD) - cum->words;
11195 cum->words += pad;
11196 }
11197 }
11198
11199 /* The darwin64 ABI calls for us to recurse down through structs,
11200 looking for elements passed in registers. Unfortunately, we have
11201 to track int register count here also because of misalignments
11202 in powerpc alignment mode. */
11203
11204 static void
11205 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11206 const_tree type,
11207 HOST_WIDE_INT startbitpos)
11208 {
11209 tree f;
11210
11211 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11212 if (TREE_CODE (f) == FIELD_DECL)
11213 {
11214 HOST_WIDE_INT bitpos = startbitpos;
11215 tree ftype = TREE_TYPE (f);
11216 machine_mode mode;
11217 if (ftype == error_mark_node)
11218 continue;
11219 mode = TYPE_MODE (ftype);
11220
11221 if (DECL_SIZE (f) != 0
11222 && tree_fits_uhwi_p (bit_position (f)))
11223 bitpos += int_bit_position (f);
11224
11225 /* ??? FIXME: else assume zero offset. */
11226
11227 if (TREE_CODE (ftype) == RECORD_TYPE)
11228 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11229 else if (USE_FP_FOR_ARG_P (cum, mode))
11230 {
11231 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11232 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11233 cum->fregno += n_fpregs;
11234 /* Single-precision floats present a special problem for
11235 us, because they are smaller than an 8-byte GPR, and so
11236 the structure-packing rules combined with the standard
11237 varargs behavior mean that we want to pack float/float
11238 and float/int combinations into a single register's
11239 space. This is complicated by the arg advance flushing,
11240 which works on arbitrarily large groups of int-type
11241 fields. */
11242 if (mode == SFmode)
11243 {
11244 if (cum->floats_in_gpr == 1)
11245 {
11246 /* Two floats in a word; count the word and reset
11247 the float count. */
11248 cum->words++;
11249 cum->floats_in_gpr = 0;
11250 }
11251 else if (bitpos % 64 == 0)
11252 {
11253 /* A float at the beginning of an 8-byte word;
11254 count it and put off adjusting cum->words until
11255 we see if a arg advance flush is going to do it
11256 for us. */
11257 cum->floats_in_gpr++;
11258 }
11259 else
11260 {
11261 /* The float is at the end of a word, preceded
11262 by integer fields, so the arg advance flush
11263 just above has already set cum->words and
11264 everything is taken care of. */
11265 }
11266 }
11267 else
11268 cum->words += n_fpregs;
11269 }
11270 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11271 {
11272 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11273 cum->vregno++;
11274 cum->words += 2;
11275 }
11276 else if (cum->intoffset == -1)
11277 cum->intoffset = bitpos;
11278 }
11279 }
11280
11281 /* Check for an item that needs to be considered specially under the darwin 64
11282 bit ABI. These are record types where the mode is BLK or the structure is
11283 8 bytes in size. */
11284 static int
11285 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11286 {
11287 return rs6000_darwin64_abi
11288 && ((mode == BLKmode
11289 && TREE_CODE (type) == RECORD_TYPE
11290 && int_size_in_bytes (type) > 0)
11291 || (type && TREE_CODE (type) == RECORD_TYPE
11292 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11293 }
11294
11295 /* Update the data in CUM to advance over an argument
11296 of mode MODE and data type TYPE.
11297 (TYPE is null for libcalls where that information may not be available.)
11298
11299 Note that for args passed by reference, function_arg will be called
11300 with MODE and TYPE set to that of the pointer to the arg, not the arg
11301 itself. */
11302
11303 static void
11304 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11305 const_tree type, bool named, int depth)
11306 {
11307 machine_mode elt_mode;
11308 int n_elts;
11309
11310 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11311
11312 /* Only tick off an argument if we're not recursing. */
11313 if (depth == 0)
11314 cum->nargs_prototype--;
11315
11316 #ifdef HAVE_AS_GNU_ATTRIBUTE
11317 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11318 && cum->escapes)
11319 {
11320 if (SCALAR_FLOAT_MODE_P (mode))
11321 {
11322 rs6000_passes_float = true;
11323 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11324 && (FLOAT128_IBM_P (mode)
11325 || FLOAT128_IEEE_P (mode)
11326 || (type != NULL
11327 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11328 rs6000_passes_long_double = true;
11329
11330 /* Note if we passed or return a IEEE 128-bit type. We changed the
11331 mangling for these types, and we may need to make an alias with
11332 the old mangling. */
11333 if (FLOAT128_IEEE_P (mode))
11334 rs6000_passes_ieee128 = true;
11335 }
11336 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11337 rs6000_passes_vector = true;
11338 }
11339 #endif
11340
11341 if (TARGET_ALTIVEC_ABI
11342 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11343 || (type && TREE_CODE (type) == VECTOR_TYPE
11344 && int_size_in_bytes (type) == 16)))
11345 {
11346 bool stack = false;
11347
11348 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11349 {
11350 cum->vregno += n_elts;
11351
11352 if (!TARGET_ALTIVEC)
11353 error ("cannot pass argument in vector register because"
11354 " altivec instructions are disabled, use %qs"
11355 " to enable them", "-maltivec");
11356
11357 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11358 even if it is going to be passed in a vector register.
11359 Darwin does the same for variable-argument functions. */
11360 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11361 && TARGET_64BIT)
11362 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11363 stack = true;
11364 }
11365 else
11366 stack = true;
11367
11368 if (stack)
11369 {
11370 int align;
11371
11372 /* Vector parameters must be 16-byte aligned. In 32-bit
11373 mode this means we need to take into account the offset
11374 to the parameter save area. In 64-bit mode, they just
11375 have to start on an even word, since the parameter save
11376 area is 16-byte aligned. */
11377 if (TARGET_32BIT)
11378 align = -(rs6000_parm_offset () + cum->words) & 3;
11379 else
11380 align = cum->words & 1;
11381 cum->words += align + rs6000_arg_size (mode, type);
11382
11383 if (TARGET_DEBUG_ARG)
11384 {
11385 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11386 cum->words, align);
11387 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11388 cum->nargs_prototype, cum->prototype,
11389 GET_MODE_NAME (mode));
11390 }
11391 }
11392 }
11393 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11394 {
11395 int size = int_size_in_bytes (type);
11396 /* Variable sized types have size == -1 and are
11397 treated as if consisting entirely of ints.
11398 Pad to 16 byte boundary if needed. */
11399 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11400 && (cum->words % 2) != 0)
11401 cum->words++;
11402 /* For varargs, we can just go up by the size of the struct. */
11403 if (!named)
11404 cum->words += (size + 7) / 8;
11405 else
11406 {
11407 /* It is tempting to say int register count just goes up by
11408 sizeof(type)/8, but this is wrong in a case such as
11409 { int; double; int; } [powerpc alignment]. We have to
11410 grovel through the fields for these too. */
11411 cum->intoffset = 0;
11412 cum->floats_in_gpr = 0;
11413 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11414 rs6000_darwin64_record_arg_advance_flush (cum,
11415 size * BITS_PER_UNIT, 1);
11416 }
11417 if (TARGET_DEBUG_ARG)
11418 {
11419 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11420 cum->words, TYPE_ALIGN (type), size);
11421 fprintf (stderr,
11422 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11423 cum->nargs_prototype, cum->prototype,
11424 GET_MODE_NAME (mode));
11425 }
11426 }
11427 else if (DEFAULT_ABI == ABI_V4)
11428 {
11429 if (abi_v4_pass_in_fpr (mode, named))
11430 {
11431 /* _Decimal128 must use an even/odd register pair. This assumes
11432 that the register number is odd when fregno is odd. */
11433 if (mode == TDmode && (cum->fregno % 2) == 1)
11434 cum->fregno++;
11435
11436 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11437 <= FP_ARG_V4_MAX_REG)
11438 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11439 else
11440 {
11441 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11442 if (mode == DFmode || FLOAT128_IBM_P (mode)
11443 || mode == DDmode || mode == TDmode)
11444 cum->words += cum->words & 1;
11445 cum->words += rs6000_arg_size (mode, type);
11446 }
11447 }
11448 else
11449 {
11450 int n_words = rs6000_arg_size (mode, type);
11451 int gregno = cum->sysv_gregno;
11452
11453 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11454 As does any other 2 word item such as complex int due to a
11455 historical mistake. */
11456 if (n_words == 2)
11457 gregno += (1 - gregno) & 1;
11458
11459 /* Multi-reg args are not split between registers and stack. */
11460 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11461 {
11462 /* Long long is aligned on the stack. So are other 2 word
11463 items such as complex int due to a historical mistake. */
11464 if (n_words == 2)
11465 cum->words += cum->words & 1;
11466 cum->words += n_words;
11467 }
11468
11469 /* Note: continuing to accumulate gregno past when we've started
11470 spilling to the stack indicates the fact that we've started
11471 spilling to the stack to expand_builtin_saveregs. */
11472 cum->sysv_gregno = gregno + n_words;
11473 }
11474
11475 if (TARGET_DEBUG_ARG)
11476 {
11477 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11478 cum->words, cum->fregno);
11479 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11480 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11481 fprintf (stderr, "mode = %4s, named = %d\n",
11482 GET_MODE_NAME (mode), named);
11483 }
11484 }
11485 else
11486 {
11487 int n_words = rs6000_arg_size (mode, type);
11488 int start_words = cum->words;
11489 int align_words = rs6000_parm_start (mode, type, start_words);
11490
11491 cum->words = align_words + n_words;
11492
11493 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11494 {
11495 /* _Decimal128 must be passed in an even/odd float register pair.
11496 This assumes that the register number is odd when fregno is
11497 odd. */
11498 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11499 cum->fregno++;
11500 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11501 }
11502
11503 if (TARGET_DEBUG_ARG)
11504 {
11505 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11506 cum->words, cum->fregno);
11507 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11508 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11509 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11510 named, align_words - start_words, depth);
11511 }
11512 }
11513 }
11514
11515 static void
11516 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11517 const_tree type, bool named)
11518 {
11519 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11520 0);
11521 }
11522
11523 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11524 structure between cum->intoffset and bitpos to integer registers. */
11525
11526 static void
11527 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11528 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11529 {
11530 machine_mode mode;
11531 unsigned int regno;
11532 unsigned int startbit, endbit;
11533 int this_regno, intregs, intoffset;
11534 rtx reg;
11535
11536 if (cum->intoffset == -1)
11537 return;
11538
11539 intoffset = cum->intoffset;
11540 cum->intoffset = -1;
11541
11542 /* If this is the trailing part of a word, try to only load that
11543 much into the register. Otherwise load the whole register. Note
11544 that in the latter case we may pick up unwanted bits. It's not a
11545 problem at the moment but may wish to revisit. */
11546
11547 if (intoffset % BITS_PER_WORD != 0)
11548 {
11549 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11550 if (!int_mode_for_size (bits, 0).exists (&mode))
11551 {
11552 /* We couldn't find an appropriate mode, which happens,
11553 e.g., in packed structs when there are 3 bytes to load.
11554 Back intoffset back to the beginning of the word in this
11555 case. */
11556 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11557 mode = word_mode;
11558 }
11559 }
11560 else
11561 mode = word_mode;
11562
11563 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11564 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11565 intregs = (endbit - startbit) / BITS_PER_WORD;
11566 this_regno = cum->words + intoffset / BITS_PER_WORD;
11567
11568 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11569 cum->use_stack = 1;
11570
11571 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11572 if (intregs <= 0)
11573 return;
11574
11575 intoffset /= BITS_PER_UNIT;
11576 do
11577 {
11578 regno = GP_ARG_MIN_REG + this_regno;
11579 reg = gen_rtx_REG (mode, regno);
11580 rvec[(*k)++] =
11581 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11582
11583 this_regno += 1;
11584 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11585 mode = word_mode;
11586 intregs -= 1;
11587 }
11588 while (intregs > 0);
11589 }
11590
11591 /* Recursive workhorse for the following. */
11592
11593 static void
11594 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11595 HOST_WIDE_INT startbitpos, rtx rvec[],
11596 int *k)
11597 {
11598 tree f;
11599
11600 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11601 if (TREE_CODE (f) == FIELD_DECL)
11602 {
11603 HOST_WIDE_INT bitpos = startbitpos;
11604 tree ftype = TREE_TYPE (f);
11605 machine_mode mode;
11606 if (ftype == error_mark_node)
11607 continue;
11608 mode = TYPE_MODE (ftype);
11609
11610 if (DECL_SIZE (f) != 0
11611 && tree_fits_uhwi_p (bit_position (f)))
11612 bitpos += int_bit_position (f);
11613
11614 /* ??? FIXME: else assume zero offset. */
11615
11616 if (TREE_CODE (ftype) == RECORD_TYPE)
11617 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11618 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11619 {
11620 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11621 #if 0
11622 switch (mode)
11623 {
11624 case E_SCmode: mode = SFmode; break;
11625 case E_DCmode: mode = DFmode; break;
11626 case E_TCmode: mode = TFmode; break;
11627 default: break;
11628 }
11629 #endif
11630 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11631 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11632 {
11633 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11634 && (mode == TFmode || mode == TDmode));
11635 /* Long double or _Decimal128 split over regs and memory. */
11636 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11637 cum->use_stack=1;
11638 }
11639 rvec[(*k)++]
11640 = gen_rtx_EXPR_LIST (VOIDmode,
11641 gen_rtx_REG (mode, cum->fregno++),
11642 GEN_INT (bitpos / BITS_PER_UNIT));
11643 if (FLOAT128_2REG_P (mode))
11644 cum->fregno++;
11645 }
11646 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11647 {
11648 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11649 rvec[(*k)++]
11650 = gen_rtx_EXPR_LIST (VOIDmode,
11651 gen_rtx_REG (mode, cum->vregno++),
11652 GEN_INT (bitpos / BITS_PER_UNIT));
11653 }
11654 else if (cum->intoffset == -1)
11655 cum->intoffset = bitpos;
11656 }
11657 }
11658
11659 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11660 the register(s) to be used for each field and subfield of a struct
11661 being passed by value, along with the offset of where the
11662 register's value may be found in the block. FP fields go in FP
11663 register, vector fields go in vector registers, and everything
11664 else goes in int registers, packed as in memory.
11665
11666 This code is also used for function return values. RETVAL indicates
11667 whether this is the case.
11668
11669 Much of this is taken from the SPARC V9 port, which has a similar
11670 calling convention. */
11671
11672 static rtx
11673 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11674 bool named, bool retval)
11675 {
11676 rtx rvec[FIRST_PSEUDO_REGISTER];
11677 int k = 1, kbase = 1;
11678 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11679 /* This is a copy; modifications are not visible to our caller. */
11680 CUMULATIVE_ARGS copy_cum = *orig_cum;
11681 CUMULATIVE_ARGS *cum = &copy_cum;
11682
11683 /* Pad to 16 byte boundary if needed. */
11684 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11685 && (cum->words % 2) != 0)
11686 cum->words++;
11687
11688 cum->intoffset = 0;
11689 cum->use_stack = 0;
11690 cum->named = named;
11691
11692 /* Put entries into rvec[] for individual FP and vector fields, and
11693 for the chunks of memory that go in int regs. Note we start at
11694 element 1; 0 is reserved for an indication of using memory, and
11695 may or may not be filled in below. */
11696 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11697 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11698
11699 /* If any part of the struct went on the stack put all of it there.
11700 This hack is because the generic code for
11701 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11702 parts of the struct are not at the beginning. */
11703 if (cum->use_stack)
11704 {
11705 if (retval)
11706 return NULL_RTX; /* doesn't go in registers at all */
11707 kbase = 0;
11708 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11709 }
11710 if (k > 1 || cum->use_stack)
11711 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11712 else
11713 return NULL_RTX;
11714 }
11715
11716 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11717
11718 static rtx
11719 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11720 int align_words)
11721 {
11722 int n_units;
11723 int i, k;
11724 rtx rvec[GP_ARG_NUM_REG + 1];
11725
11726 if (align_words >= GP_ARG_NUM_REG)
11727 return NULL_RTX;
11728
11729 n_units = rs6000_arg_size (mode, type);
11730
11731 /* Optimize the simple case where the arg fits in one gpr, except in
11732 the case of BLKmode due to assign_parms assuming that registers are
11733 BITS_PER_WORD wide. */
11734 if (n_units == 0
11735 || (n_units == 1 && mode != BLKmode))
11736 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11737
11738 k = 0;
11739 if (align_words + n_units > GP_ARG_NUM_REG)
11740 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11741 using a magic NULL_RTX component.
11742 This is not strictly correct. Only some of the arg belongs in
11743 memory, not all of it. However, the normal scheme using
11744 function_arg_partial_nregs can result in unusual subregs, eg.
11745 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11746 store the whole arg to memory is often more efficient than code
11747 to store pieces, and we know that space is available in the right
11748 place for the whole arg. */
11749 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11750
11751 i = 0;
11752 do
11753 {
11754 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11755 rtx off = GEN_INT (i++ * 4);
11756 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11757 }
11758 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11759
11760 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11761 }
11762
11763 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11764 but must also be copied into the parameter save area starting at
11765 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11766 to the GPRs and/or memory. Return the number of elements used. */
11767
11768 static int
11769 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11770 int align_words, rtx *rvec)
11771 {
11772 int k = 0;
11773
11774 if (align_words < GP_ARG_NUM_REG)
11775 {
11776 int n_words = rs6000_arg_size (mode, type);
11777
11778 if (align_words + n_words > GP_ARG_NUM_REG
11779 || mode == BLKmode
11780 || (TARGET_32BIT && TARGET_POWERPC64))
11781 {
11782 /* If this is partially on the stack, then we only
11783 include the portion actually in registers here. */
11784 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11785 int i = 0;
11786
11787 if (align_words + n_words > GP_ARG_NUM_REG)
11788 {
11789 /* Not all of the arg fits in gprs. Say that it goes in memory
11790 too, using a magic NULL_RTX component. Also see comment in
11791 rs6000_mixed_function_arg for why the normal
11792 function_arg_partial_nregs scheme doesn't work in this case. */
11793 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11794 }
11795
11796 do
11797 {
11798 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11799 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11800 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11801 }
11802 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11803 }
11804 else
11805 {
11806 /* The whole arg fits in gprs. */
11807 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11808 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11809 }
11810 }
11811 else
11812 {
11813 /* It's entirely in memory. */
11814 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11815 }
11816
11817 return k;
11818 }
11819
11820 /* RVEC is a vector of K components of an argument of mode MODE.
11821 Construct the final function_arg return value from it. */
11822
11823 static rtx
11824 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11825 {
11826 gcc_assert (k >= 1);
11827
11828 /* Avoid returning a PARALLEL in the trivial cases. */
11829 if (k == 1)
11830 {
11831 if (XEXP (rvec[0], 0) == NULL_RTX)
11832 return NULL_RTX;
11833
11834 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11835 return XEXP (rvec[0], 0);
11836 }
11837
11838 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11839 }
11840
11841 /* Determine where to put an argument to a function.
11842 Value is zero to push the argument on the stack,
11843 or a hard register in which to store the argument.
11844
11845 MODE is the argument's machine mode.
11846 TYPE is the data type of the argument (as a tree).
11847 This is null for libcalls where that information may
11848 not be available.
11849 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11850 the preceding args and about the function being called. It is
11851 not modified in this routine.
11852 NAMED is nonzero if this argument is a named parameter
11853 (otherwise it is an extra parameter matching an ellipsis).
11854
11855 On RS/6000 the first eight words of non-FP are normally in registers
11856 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11857 Under V.4, the first 8 FP args are in registers.
11858
11859 If this is floating-point and no prototype is specified, we use
11860 both an FP and integer register (or possibly FP reg and stack). Library
11861 functions (when CALL_LIBCALL is set) always have the proper types for args,
11862 so we can pass the FP value just in one register. emit_library_function
11863 doesn't support PARALLEL anyway.
11864
11865 Note that for args passed by reference, function_arg will be called
11866 with MODE and TYPE set to that of the pointer to the arg, not the arg
11867 itself. */
11868
11869 static rtx
11870 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11871 const_tree type, bool named)
11872 {
11873 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11874 enum rs6000_abi abi = DEFAULT_ABI;
11875 machine_mode elt_mode;
11876 int n_elts;
11877
11878 /* Return a marker to indicate whether CR1 needs to set or clear the
11879 bit that V.4 uses to say fp args were passed in registers.
11880 Assume that we don't need the marker for software floating point,
11881 or compiler generated library calls. */
11882 if (mode == VOIDmode)
11883 {
11884 if (abi == ABI_V4
11885 && (cum->call_cookie & CALL_LIBCALL) == 0
11886 && (cum->stdarg
11887 || (cum->nargs_prototype < 0
11888 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11889 && TARGET_HARD_FLOAT)
11890 return GEN_INT (cum->call_cookie
11891 | ((cum->fregno == FP_ARG_MIN_REG)
11892 ? CALL_V4_SET_FP_ARGS
11893 : CALL_V4_CLEAR_FP_ARGS));
11894
11895 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11896 }
11897
11898 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11899
11900 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11901 {
11902 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11903 if (rslt != NULL_RTX)
11904 return rslt;
11905 /* Else fall through to usual handling. */
11906 }
11907
11908 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11909 {
11910 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11911 rtx r, off;
11912 int i, k = 0;
11913
11914 /* Do we also need to pass this argument in the parameter save area?
11915 Library support functions for IEEE 128-bit are assumed to not need the
11916 value passed both in GPRs and in vector registers. */
11917 if (TARGET_64BIT && !cum->prototype
11918 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11919 {
11920 int align_words = ROUND_UP (cum->words, 2);
11921 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11922 }
11923
11924 /* Describe where this argument goes in the vector registers. */
11925 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11926 {
11927 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11928 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11929 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11930 }
11931
11932 return rs6000_finish_function_arg (mode, rvec, k);
11933 }
11934 else if (TARGET_ALTIVEC_ABI
11935 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11936 || (type && TREE_CODE (type) == VECTOR_TYPE
11937 && int_size_in_bytes (type) == 16)))
11938 {
11939 if (named || abi == ABI_V4)
11940 return NULL_RTX;
11941 else
11942 {
11943 /* Vector parameters to varargs functions under AIX or Darwin
11944 get passed in memory and possibly also in GPRs. */
11945 int align, align_words, n_words;
11946 machine_mode part_mode;
11947
11948 /* Vector parameters must be 16-byte aligned. In 32-bit
11949 mode this means we need to take into account the offset
11950 to the parameter save area. In 64-bit mode, they just
11951 have to start on an even word, since the parameter save
11952 area is 16-byte aligned. */
11953 if (TARGET_32BIT)
11954 align = -(rs6000_parm_offset () + cum->words) & 3;
11955 else
11956 align = cum->words & 1;
11957 align_words = cum->words + align;
11958
11959 /* Out of registers? Memory, then. */
11960 if (align_words >= GP_ARG_NUM_REG)
11961 return NULL_RTX;
11962
11963 if (TARGET_32BIT && TARGET_POWERPC64)
11964 return rs6000_mixed_function_arg (mode, type, align_words);
11965
11966 /* The vector value goes in GPRs. Only the part of the
11967 value in GPRs is reported here. */
11968 part_mode = mode;
11969 n_words = rs6000_arg_size (mode, type);
11970 if (align_words + n_words > GP_ARG_NUM_REG)
11971 /* Fortunately, there are only two possibilities, the value
11972 is either wholly in GPRs or half in GPRs and half not. */
11973 part_mode = DImode;
11974
11975 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11976 }
11977 }
11978
11979 else if (abi == ABI_V4)
11980 {
11981 if (abi_v4_pass_in_fpr (mode, named))
11982 {
11983 /* _Decimal128 must use an even/odd register pair. This assumes
11984 that the register number is odd when fregno is odd. */
11985 if (mode == TDmode && (cum->fregno % 2) == 1)
11986 cum->fregno++;
11987
11988 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11989 <= FP_ARG_V4_MAX_REG)
11990 return gen_rtx_REG (mode, cum->fregno);
11991 else
11992 return NULL_RTX;
11993 }
11994 else
11995 {
11996 int n_words = rs6000_arg_size (mode, type);
11997 int gregno = cum->sysv_gregno;
11998
11999 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12000 As does any other 2 word item such as complex int due to a
12001 historical mistake. */
12002 if (n_words == 2)
12003 gregno += (1 - gregno) & 1;
12004
12005 /* Multi-reg args are not split between registers and stack. */
12006 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12007 return NULL_RTX;
12008
12009 if (TARGET_32BIT && TARGET_POWERPC64)
12010 return rs6000_mixed_function_arg (mode, type,
12011 gregno - GP_ARG_MIN_REG);
12012 return gen_rtx_REG (mode, gregno);
12013 }
12014 }
12015 else
12016 {
12017 int align_words = rs6000_parm_start (mode, type, cum->words);
12018
12019 /* _Decimal128 must be passed in an even/odd float register pair.
12020 This assumes that the register number is odd when fregno is odd. */
12021 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12022 cum->fregno++;
12023
12024 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12025 && !(TARGET_AIX && !TARGET_ELF
12026 && type != NULL && AGGREGATE_TYPE_P (type)))
12027 {
12028 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12029 rtx r, off;
12030 int i, k = 0;
12031 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12032 int fpr_words;
12033
12034 /* Do we also need to pass this argument in the parameter
12035 save area? */
12036 if (type && (cum->nargs_prototype <= 0
12037 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12038 && TARGET_XL_COMPAT
12039 && align_words >= GP_ARG_NUM_REG)))
12040 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12041
12042 /* Describe where this argument goes in the fprs. */
12043 for (i = 0; i < n_elts
12044 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12045 {
12046 /* Check if the argument is split over registers and memory.
12047 This can only ever happen for long double or _Decimal128;
12048 complex types are handled via split_complex_arg. */
12049 machine_mode fmode = elt_mode;
12050 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12051 {
12052 gcc_assert (FLOAT128_2REG_P (fmode));
12053 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12054 }
12055
12056 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12057 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12058 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12059 }
12060
12061 /* If there were not enough FPRs to hold the argument, the rest
12062 usually goes into memory. However, if the current position
12063 is still within the register parameter area, a portion may
12064 actually have to go into GPRs.
12065
12066 Note that it may happen that the portion of the argument
12067 passed in the first "half" of the first GPR was already
12068 passed in the last FPR as well.
12069
12070 For unnamed arguments, we already set up GPRs to cover the
12071 whole argument in rs6000_psave_function_arg, so there is
12072 nothing further to do at this point. */
12073 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12074 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12075 && cum->nargs_prototype > 0)
12076 {
12077 static bool warned;
12078
12079 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12080 int n_words = rs6000_arg_size (mode, type);
12081
12082 align_words += fpr_words;
12083 n_words -= fpr_words;
12084
12085 do
12086 {
12087 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12088 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12089 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12090 }
12091 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12092
12093 if (!warned && warn_psabi)
12094 {
12095 warned = true;
12096 inform (input_location,
12097 "the ABI of passing homogeneous float aggregates"
12098 " has changed in GCC 5");
12099 }
12100 }
12101
12102 return rs6000_finish_function_arg (mode, rvec, k);
12103 }
12104 else if (align_words < GP_ARG_NUM_REG)
12105 {
12106 if (TARGET_32BIT && TARGET_POWERPC64)
12107 return rs6000_mixed_function_arg (mode, type, align_words);
12108
12109 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12110 }
12111 else
12112 return NULL_RTX;
12113 }
12114 }
12115 \f
12116 /* For an arg passed partly in registers and partly in memory, this is
12117 the number of bytes passed in registers. For args passed entirely in
12118 registers or entirely in memory, zero. When an arg is described by a
12119 PARALLEL, perhaps using more than one register type, this function
12120 returns the number of bytes used by the first element of the PARALLEL. */
12121
12122 static int
12123 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12124 tree type, bool named)
12125 {
12126 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12127 bool passed_in_gprs = true;
12128 int ret = 0;
12129 int align_words;
12130 machine_mode elt_mode;
12131 int n_elts;
12132
12133 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12134
12135 if (DEFAULT_ABI == ABI_V4)
12136 return 0;
12137
12138 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12139 {
12140 /* If we are passing this arg in the fixed parameter save area (gprs or
12141 memory) as well as VRs, we do not use the partial bytes mechanism;
12142 instead, rs6000_function_arg will return a PARALLEL including a memory
12143 element as necessary. Library support functions for IEEE 128-bit are
12144 assumed to not need the value passed both in GPRs and in vector
12145 registers. */
12146 if (TARGET_64BIT && !cum->prototype
12147 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12148 return 0;
12149
12150 /* Otherwise, we pass in VRs only. Check for partial copies. */
12151 passed_in_gprs = false;
12152 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12153 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12154 }
12155
12156 /* In this complicated case we just disable the partial_nregs code. */
12157 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12158 return 0;
12159
12160 align_words = rs6000_parm_start (mode, type, cum->words);
12161
12162 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12163 && !(TARGET_AIX && !TARGET_ELF
12164 && type != NULL && AGGREGATE_TYPE_P (type)))
12165 {
12166 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12167
12168 /* If we are passing this arg in the fixed parameter save area
12169 (gprs or memory) as well as FPRs, we do not use the partial
12170 bytes mechanism; instead, rs6000_function_arg will return a
12171 PARALLEL including a memory element as necessary. */
12172 if (type
12173 && (cum->nargs_prototype <= 0
12174 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12175 && TARGET_XL_COMPAT
12176 && align_words >= GP_ARG_NUM_REG)))
12177 return 0;
12178
12179 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12180 passed_in_gprs = false;
12181 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12182 {
12183 /* Compute number of bytes / words passed in FPRs. If there
12184 is still space available in the register parameter area
12185 *after* that amount, a part of the argument will be passed
12186 in GPRs. In that case, the total amount passed in any
12187 registers is equal to the amount that would have been passed
12188 in GPRs if everything were passed there, so we fall back to
12189 the GPR code below to compute the appropriate value. */
12190 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12191 * MIN (8, GET_MODE_SIZE (elt_mode)));
12192 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12193
12194 if (align_words + fpr_words < GP_ARG_NUM_REG)
12195 passed_in_gprs = true;
12196 else
12197 ret = fpr;
12198 }
12199 }
12200
12201 if (passed_in_gprs
12202 && align_words < GP_ARG_NUM_REG
12203 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12204 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12205
12206 if (ret != 0 && TARGET_DEBUG_ARG)
12207 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12208
12209 return ret;
12210 }
12211 \f
12212 /* A C expression that indicates when an argument must be passed by
12213 reference. If nonzero for an argument, a copy of that argument is
12214 made in memory and a pointer to the argument is passed instead of
12215 the argument itself. The pointer is passed in whatever way is
12216 appropriate for passing a pointer to that type.
12217
12218 Under V.4, aggregates and long double are passed by reference.
12219
12220 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12221 reference unless the AltiVec vector extension ABI is in force.
12222
12223 As an extension to all ABIs, variable sized types are passed by
12224 reference. */
12225
12226 static bool
12227 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12228 machine_mode mode, const_tree type,
12229 bool named ATTRIBUTE_UNUSED)
12230 {
12231 if (!type)
12232 return 0;
12233
12234 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12235 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12236 {
12237 if (TARGET_DEBUG_ARG)
12238 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12239 return 1;
12240 }
12241
12242 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12243 {
12244 if (TARGET_DEBUG_ARG)
12245 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12246 return 1;
12247 }
12248
12249 if (int_size_in_bytes (type) < 0)
12250 {
12251 if (TARGET_DEBUG_ARG)
12252 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12253 return 1;
12254 }
12255
12256 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12257 modes only exist for GCC vector types if -maltivec. */
12258 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12259 {
12260 if (TARGET_DEBUG_ARG)
12261 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12262 return 1;
12263 }
12264
12265 /* Pass synthetic vectors in memory. */
12266 if (TREE_CODE (type) == VECTOR_TYPE
12267 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12268 {
12269 static bool warned_for_pass_big_vectors = false;
12270 if (TARGET_DEBUG_ARG)
12271 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12272 if (!warned_for_pass_big_vectors)
12273 {
12274 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12275 "non-standard ABI extension with no compatibility "
12276 "guarantee");
12277 warned_for_pass_big_vectors = true;
12278 }
12279 return 1;
12280 }
12281
12282 return 0;
12283 }
12284
12285 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12286 already processes. Return true if the parameter must be passed
12287 (fully or partially) on the stack. */
12288
12289 static bool
12290 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12291 {
12292 machine_mode mode;
12293 int unsignedp;
12294 rtx entry_parm;
12295
12296 /* Catch errors. */
12297 if (type == NULL || type == error_mark_node)
12298 return true;
12299
12300 /* Handle types with no storage requirement. */
12301 if (TYPE_MODE (type) == VOIDmode)
12302 return false;
12303
12304 /* Handle complex types. */
12305 if (TREE_CODE (type) == COMPLEX_TYPE)
12306 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12307 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12308
12309 /* Handle transparent aggregates. */
12310 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12311 && TYPE_TRANSPARENT_AGGR (type))
12312 type = TREE_TYPE (first_field (type));
12313
12314 /* See if this arg was passed by invisible reference. */
12315 if (pass_by_reference (get_cumulative_args (args_so_far),
12316 TYPE_MODE (type), type, true))
12317 type = build_pointer_type (type);
12318
12319 /* Find mode as it is passed by the ABI. */
12320 unsignedp = TYPE_UNSIGNED (type);
12321 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12322
12323 /* If we must pass in stack, we need a stack. */
12324 if (rs6000_must_pass_in_stack (mode, type))
12325 return true;
12326
12327 /* If there is no incoming register, we need a stack. */
12328 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12329 if (entry_parm == NULL)
12330 return true;
12331
12332 /* Likewise if we need to pass both in registers and on the stack. */
12333 if (GET_CODE (entry_parm) == PARALLEL
12334 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12335 return true;
12336
12337 /* Also true if we're partially in registers and partially not. */
12338 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12339 return true;
12340
12341 /* Update info on where next arg arrives in registers. */
12342 rs6000_function_arg_advance (args_so_far, mode, type, true);
12343 return false;
12344 }
12345
12346 /* Return true if FUN has no prototype, has a variable argument
12347 list, or passes any parameter in memory. */
12348
12349 static bool
12350 rs6000_function_parms_need_stack (tree fun, bool incoming)
12351 {
12352 tree fntype, result;
12353 CUMULATIVE_ARGS args_so_far_v;
12354 cumulative_args_t args_so_far;
12355
12356 if (!fun)
12357 /* Must be a libcall, all of which only use reg parms. */
12358 return false;
12359
12360 fntype = fun;
12361 if (!TYPE_P (fun))
12362 fntype = TREE_TYPE (fun);
12363
12364 /* Varargs functions need the parameter save area. */
12365 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12366 return true;
12367
12368 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12369 args_so_far = pack_cumulative_args (&args_so_far_v);
12370
12371 /* When incoming, we will have been passed the function decl.
12372 It is necessary to use the decl to handle K&R style functions,
12373 where TYPE_ARG_TYPES may not be available. */
12374 if (incoming)
12375 {
12376 gcc_assert (DECL_P (fun));
12377 result = DECL_RESULT (fun);
12378 }
12379 else
12380 result = TREE_TYPE (fntype);
12381
12382 if (result && aggregate_value_p (result, fntype))
12383 {
12384 if (!TYPE_P (result))
12385 result = TREE_TYPE (result);
12386 result = build_pointer_type (result);
12387 rs6000_parm_needs_stack (args_so_far, result);
12388 }
12389
12390 if (incoming)
12391 {
12392 tree parm;
12393
12394 for (parm = DECL_ARGUMENTS (fun);
12395 parm && parm != void_list_node;
12396 parm = TREE_CHAIN (parm))
12397 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12398 return true;
12399 }
12400 else
12401 {
12402 function_args_iterator args_iter;
12403 tree arg_type;
12404
12405 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12406 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12407 return true;
12408 }
12409
12410 return false;
12411 }
12412
12413 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12414 usually a constant depending on the ABI. However, in the ELFv2 ABI
12415 the register parameter area is optional when calling a function that
12416 has a prototype is scope, has no variable argument list, and passes
12417 all parameters in registers. */
12418
12419 int
12420 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12421 {
12422 int reg_parm_stack_space;
12423
12424 switch (DEFAULT_ABI)
12425 {
12426 default:
12427 reg_parm_stack_space = 0;
12428 break;
12429
12430 case ABI_AIX:
12431 case ABI_DARWIN:
12432 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12433 break;
12434
12435 case ABI_ELFv2:
12436 /* ??? Recomputing this every time is a bit expensive. Is there
12437 a place to cache this information? */
12438 if (rs6000_function_parms_need_stack (fun, incoming))
12439 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12440 else
12441 reg_parm_stack_space = 0;
12442 break;
12443 }
12444
12445 return reg_parm_stack_space;
12446 }
12447
12448 static void
12449 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12450 {
12451 int i;
12452 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12453
12454 if (nregs == 0)
12455 return;
12456
12457 for (i = 0; i < nregs; i++)
12458 {
12459 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12460 if (reload_completed)
12461 {
12462 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12463 tem = NULL_RTX;
12464 else
12465 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12466 i * GET_MODE_SIZE (reg_mode));
12467 }
12468 else
12469 tem = replace_equiv_address (tem, XEXP (tem, 0));
12470
12471 gcc_assert (tem);
12472
12473 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12474 }
12475 }
12476 \f
12477 /* Perform any needed actions needed for a function that is receiving a
12478 variable number of arguments.
12479
12480 CUM is as above.
12481
12482 MODE and TYPE are the mode and type of the current parameter.
12483
12484 PRETEND_SIZE is a variable that should be set to the amount of stack
12485 that must be pushed by the prolog to pretend that our caller pushed
12486 it.
12487
12488 Normally, this macro will push all remaining incoming registers on the
12489 stack and set PRETEND_SIZE to the length of the registers pushed. */
12490
12491 static void
12492 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12493 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12494 int no_rtl)
12495 {
12496 CUMULATIVE_ARGS next_cum;
12497 int reg_size = TARGET_32BIT ? 4 : 8;
12498 rtx save_area = NULL_RTX, mem;
12499 int first_reg_offset;
12500 alias_set_type set;
12501
12502 /* Skip the last named argument. */
12503 next_cum = *get_cumulative_args (cum);
12504 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12505
12506 if (DEFAULT_ABI == ABI_V4)
12507 {
12508 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12509
12510 if (! no_rtl)
12511 {
12512 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12513 HOST_WIDE_INT offset = 0;
12514
12515 /* Try to optimize the size of the varargs save area.
12516 The ABI requires that ap.reg_save_area is doubleword
12517 aligned, but we don't need to allocate space for all
12518 the bytes, only those to which we actually will save
12519 anything. */
12520 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12521 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12522 if (TARGET_HARD_FLOAT
12523 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12524 && cfun->va_list_fpr_size)
12525 {
12526 if (gpr_reg_num)
12527 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12528 * UNITS_PER_FP_WORD;
12529 if (cfun->va_list_fpr_size
12530 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12531 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12532 else
12533 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12534 * UNITS_PER_FP_WORD;
12535 }
12536 if (gpr_reg_num)
12537 {
12538 offset = -((first_reg_offset * reg_size) & ~7);
12539 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12540 {
12541 gpr_reg_num = cfun->va_list_gpr_size;
12542 if (reg_size == 4 && (first_reg_offset & 1))
12543 gpr_reg_num++;
12544 }
12545 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12546 }
12547 else if (fpr_size)
12548 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12549 * UNITS_PER_FP_WORD
12550 - (int) (GP_ARG_NUM_REG * reg_size);
12551
12552 if (gpr_size + fpr_size)
12553 {
12554 rtx reg_save_area
12555 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12556 gcc_assert (MEM_P (reg_save_area));
12557 reg_save_area = XEXP (reg_save_area, 0);
12558 if (GET_CODE (reg_save_area) == PLUS)
12559 {
12560 gcc_assert (XEXP (reg_save_area, 0)
12561 == virtual_stack_vars_rtx);
12562 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12563 offset += INTVAL (XEXP (reg_save_area, 1));
12564 }
12565 else
12566 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12567 }
12568
12569 cfun->machine->varargs_save_offset = offset;
12570 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12571 }
12572 }
12573 else
12574 {
12575 first_reg_offset = next_cum.words;
12576 save_area = crtl->args.internal_arg_pointer;
12577
12578 if (targetm.calls.must_pass_in_stack (mode, type))
12579 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12580 }
12581
12582 set = get_varargs_alias_set ();
12583 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12584 && cfun->va_list_gpr_size)
12585 {
12586 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12587
12588 if (va_list_gpr_counter_field)
12589 /* V4 va_list_gpr_size counts number of registers needed. */
12590 n_gpr = cfun->va_list_gpr_size;
12591 else
12592 /* char * va_list instead counts number of bytes needed. */
12593 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12594
12595 if (nregs > n_gpr)
12596 nregs = n_gpr;
12597
12598 mem = gen_rtx_MEM (BLKmode,
12599 plus_constant (Pmode, save_area,
12600 first_reg_offset * reg_size));
12601 MEM_NOTRAP_P (mem) = 1;
12602 set_mem_alias_set (mem, set);
12603 set_mem_align (mem, BITS_PER_WORD);
12604
12605 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12606 nregs);
12607 }
12608
12609 /* Save FP registers if needed. */
12610 if (DEFAULT_ABI == ABI_V4
12611 && TARGET_HARD_FLOAT
12612 && ! no_rtl
12613 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12614 && cfun->va_list_fpr_size)
12615 {
12616 int fregno = next_cum.fregno, nregs;
12617 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12618 rtx lab = gen_label_rtx ();
12619 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12620 * UNITS_PER_FP_WORD);
12621
12622 emit_jump_insn
12623 (gen_rtx_SET (pc_rtx,
12624 gen_rtx_IF_THEN_ELSE (VOIDmode,
12625 gen_rtx_NE (VOIDmode, cr1,
12626 const0_rtx),
12627 gen_rtx_LABEL_REF (VOIDmode, lab),
12628 pc_rtx)));
12629
12630 for (nregs = 0;
12631 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12632 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12633 {
12634 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12635 plus_constant (Pmode, save_area, off));
12636 MEM_NOTRAP_P (mem) = 1;
12637 set_mem_alias_set (mem, set);
12638 set_mem_align (mem, GET_MODE_ALIGNMENT (
12639 TARGET_HARD_FLOAT ? DFmode : SFmode));
12640 emit_move_insn (mem, gen_rtx_REG (
12641 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12642 }
12643
12644 emit_label (lab);
12645 }
12646 }
12647
12648 /* Create the va_list data type. */
12649
12650 static tree
12651 rs6000_build_builtin_va_list (void)
12652 {
12653 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12654
12655 /* For AIX, prefer 'char *' because that's what the system
12656 header files like. */
12657 if (DEFAULT_ABI != ABI_V4)
12658 return build_pointer_type (char_type_node);
12659
12660 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12661 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12662 get_identifier ("__va_list_tag"), record);
12663
12664 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12665 unsigned_char_type_node);
12666 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12667 unsigned_char_type_node);
12668 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12669 every user file. */
12670 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12671 get_identifier ("reserved"), short_unsigned_type_node);
12672 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12673 get_identifier ("overflow_arg_area"),
12674 ptr_type_node);
12675 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12676 get_identifier ("reg_save_area"),
12677 ptr_type_node);
12678
12679 va_list_gpr_counter_field = f_gpr;
12680 va_list_fpr_counter_field = f_fpr;
12681
12682 DECL_FIELD_CONTEXT (f_gpr) = record;
12683 DECL_FIELD_CONTEXT (f_fpr) = record;
12684 DECL_FIELD_CONTEXT (f_res) = record;
12685 DECL_FIELD_CONTEXT (f_ovf) = record;
12686 DECL_FIELD_CONTEXT (f_sav) = record;
12687
12688 TYPE_STUB_DECL (record) = type_decl;
12689 TYPE_NAME (record) = type_decl;
12690 TYPE_FIELDS (record) = f_gpr;
12691 DECL_CHAIN (f_gpr) = f_fpr;
12692 DECL_CHAIN (f_fpr) = f_res;
12693 DECL_CHAIN (f_res) = f_ovf;
12694 DECL_CHAIN (f_ovf) = f_sav;
12695
12696 layout_type (record);
12697
12698 /* The correct type is an array type of one element. */
12699 return build_array_type (record, build_index_type (size_zero_node));
12700 }
12701
12702 /* Implement va_start. */
12703
12704 static void
12705 rs6000_va_start (tree valist, rtx nextarg)
12706 {
12707 HOST_WIDE_INT words, n_gpr, n_fpr;
12708 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12709 tree gpr, fpr, ovf, sav, t;
12710
12711 /* Only SVR4 needs something special. */
12712 if (DEFAULT_ABI != ABI_V4)
12713 {
12714 std_expand_builtin_va_start (valist, nextarg);
12715 return;
12716 }
12717
12718 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12719 f_fpr = DECL_CHAIN (f_gpr);
12720 f_res = DECL_CHAIN (f_fpr);
12721 f_ovf = DECL_CHAIN (f_res);
12722 f_sav = DECL_CHAIN (f_ovf);
12723
12724 valist = build_simple_mem_ref (valist);
12725 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12726 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12727 f_fpr, NULL_TREE);
12728 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12729 f_ovf, NULL_TREE);
12730 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12731 f_sav, NULL_TREE);
12732
12733 /* Count number of gp and fp argument registers used. */
12734 words = crtl->args.info.words;
12735 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12736 GP_ARG_NUM_REG);
12737 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12738 FP_ARG_NUM_REG);
12739
12740 if (TARGET_DEBUG_ARG)
12741 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12742 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12743 words, n_gpr, n_fpr);
12744
12745 if (cfun->va_list_gpr_size)
12746 {
12747 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12748 build_int_cst (NULL_TREE, n_gpr));
12749 TREE_SIDE_EFFECTS (t) = 1;
12750 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12751 }
12752
12753 if (cfun->va_list_fpr_size)
12754 {
12755 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12756 build_int_cst (NULL_TREE, n_fpr));
12757 TREE_SIDE_EFFECTS (t) = 1;
12758 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12759
12760 #ifdef HAVE_AS_GNU_ATTRIBUTE
12761 if (call_ABI_of_interest (cfun->decl))
12762 rs6000_passes_float = true;
12763 #endif
12764 }
12765
12766 /* Find the overflow area. */
12767 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12768 if (words != 0)
12769 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12770 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12771 TREE_SIDE_EFFECTS (t) = 1;
12772 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12773
12774 /* If there were no va_arg invocations, don't set up the register
12775 save area. */
12776 if (!cfun->va_list_gpr_size
12777 && !cfun->va_list_fpr_size
12778 && n_gpr < GP_ARG_NUM_REG
12779 && n_fpr < FP_ARG_V4_MAX_REG)
12780 return;
12781
12782 /* Find the register save area. */
12783 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12784 if (cfun->machine->varargs_save_offset)
12785 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12786 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12787 TREE_SIDE_EFFECTS (t) = 1;
12788 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12789 }
12790
12791 /* Implement va_arg. */
12792
12793 static tree
12794 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12795 gimple_seq *post_p)
12796 {
12797 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12798 tree gpr, fpr, ovf, sav, reg, t, u;
12799 int size, rsize, n_reg, sav_ofs, sav_scale;
12800 tree lab_false, lab_over, addr;
12801 int align;
12802 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12803 int regalign = 0;
12804 gimple *stmt;
12805
12806 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12807 {
12808 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12809 return build_va_arg_indirect_ref (t);
12810 }
12811
12812 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12813 earlier version of gcc, with the property that it always applied alignment
12814 adjustments to the va-args (even for zero-sized types). The cheapest way
12815 to deal with this is to replicate the effect of the part of
12816 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12817 of relevance.
12818 We don't need to check for pass-by-reference because of the test above.
12819 We can return a simplifed answer, since we know there's no offset to add. */
12820
12821 if (((TARGET_MACHO
12822 && rs6000_darwin64_abi)
12823 || DEFAULT_ABI == ABI_ELFv2
12824 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12825 && integer_zerop (TYPE_SIZE (type)))
12826 {
12827 unsigned HOST_WIDE_INT align, boundary;
12828 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12829 align = PARM_BOUNDARY / BITS_PER_UNIT;
12830 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12831 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12832 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12833 boundary /= BITS_PER_UNIT;
12834 if (boundary > align)
12835 {
12836 tree t ;
12837 /* This updates arg ptr by the amount that would be necessary
12838 to align the zero-sized (but not zero-alignment) item. */
12839 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12840 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12841 gimplify_and_add (t, pre_p);
12842
12843 t = fold_convert (sizetype, valist_tmp);
12844 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12845 fold_convert (TREE_TYPE (valist),
12846 fold_build2 (BIT_AND_EXPR, sizetype, t,
12847 size_int (-boundary))));
12848 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12849 gimplify_and_add (t, pre_p);
12850 }
12851 /* Since it is zero-sized there's no increment for the item itself. */
12852 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12853 return build_va_arg_indirect_ref (valist_tmp);
12854 }
12855
12856 if (DEFAULT_ABI != ABI_V4)
12857 {
12858 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12859 {
12860 tree elem_type = TREE_TYPE (type);
12861 machine_mode elem_mode = TYPE_MODE (elem_type);
12862 int elem_size = GET_MODE_SIZE (elem_mode);
12863
12864 if (elem_size < UNITS_PER_WORD)
12865 {
12866 tree real_part, imag_part;
12867 gimple_seq post = NULL;
12868
12869 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12870 &post);
12871 /* Copy the value into a temporary, lest the formal temporary
12872 be reused out from under us. */
12873 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12874 gimple_seq_add_seq (pre_p, post);
12875
12876 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12877 post_p);
12878
12879 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12880 }
12881 }
12882
12883 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12884 }
12885
12886 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12887 f_fpr = DECL_CHAIN (f_gpr);
12888 f_res = DECL_CHAIN (f_fpr);
12889 f_ovf = DECL_CHAIN (f_res);
12890 f_sav = DECL_CHAIN (f_ovf);
12891
12892 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12893 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12894 f_fpr, NULL_TREE);
12895 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12896 f_ovf, NULL_TREE);
12897 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12898 f_sav, NULL_TREE);
12899
12900 size = int_size_in_bytes (type);
12901 rsize = (size + 3) / 4;
12902 int pad = 4 * rsize - size;
12903 align = 1;
12904
12905 machine_mode mode = TYPE_MODE (type);
12906 if (abi_v4_pass_in_fpr (mode, false))
12907 {
12908 /* FP args go in FP registers, if present. */
12909 reg = fpr;
12910 n_reg = (size + 7) / 8;
12911 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12912 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12913 if (mode != SFmode && mode != SDmode)
12914 align = 8;
12915 }
12916 else
12917 {
12918 /* Otherwise into GP registers. */
12919 reg = gpr;
12920 n_reg = rsize;
12921 sav_ofs = 0;
12922 sav_scale = 4;
12923 if (n_reg == 2)
12924 align = 8;
12925 }
12926
12927 /* Pull the value out of the saved registers.... */
12928
12929 lab_over = NULL;
12930 addr = create_tmp_var (ptr_type_node, "addr");
12931
12932 /* AltiVec vectors never go in registers when -mabi=altivec. */
12933 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12934 align = 16;
12935 else
12936 {
12937 lab_false = create_artificial_label (input_location);
12938 lab_over = create_artificial_label (input_location);
12939
12940 /* Long long is aligned in the registers. As are any other 2 gpr
12941 item such as complex int due to a historical mistake. */
12942 u = reg;
12943 if (n_reg == 2 && reg == gpr)
12944 {
12945 regalign = 1;
12946 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12947 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12948 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12949 unshare_expr (reg), u);
12950 }
12951 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12952 reg number is 0 for f1, so we want to make it odd. */
12953 else if (reg == fpr && mode == TDmode)
12954 {
12955 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12956 build_int_cst (TREE_TYPE (reg), 1));
12957 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12958 }
12959
12960 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12961 t = build2 (GE_EXPR, boolean_type_node, u, t);
12962 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12963 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12964 gimplify_and_add (t, pre_p);
12965
12966 t = sav;
12967 if (sav_ofs)
12968 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12969
12970 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12971 build_int_cst (TREE_TYPE (reg), n_reg));
12972 u = fold_convert (sizetype, u);
12973 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12974 t = fold_build_pointer_plus (t, u);
12975
12976 /* _Decimal32 varargs are located in the second word of the 64-bit
12977 FP register for 32-bit binaries. */
12978 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12979 t = fold_build_pointer_plus_hwi (t, size);
12980
12981 /* Args are passed right-aligned. */
12982 if (BYTES_BIG_ENDIAN)
12983 t = fold_build_pointer_plus_hwi (t, pad);
12984
12985 gimplify_assign (addr, t, pre_p);
12986
12987 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12988
12989 stmt = gimple_build_label (lab_false);
12990 gimple_seq_add_stmt (pre_p, stmt);
12991
12992 if ((n_reg == 2 && !regalign) || n_reg > 2)
12993 {
12994 /* Ensure that we don't find any more args in regs.
12995 Alignment has taken care of for special cases. */
12996 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12997 }
12998 }
12999
13000 /* ... otherwise out of the overflow area. */
13001
13002 /* Care for on-stack alignment if needed. */
13003 t = ovf;
13004 if (align != 1)
13005 {
13006 t = fold_build_pointer_plus_hwi (t, align - 1);
13007 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13008 build_int_cst (TREE_TYPE (t), -align));
13009 }
13010
13011 /* Args are passed right-aligned. */
13012 if (BYTES_BIG_ENDIAN)
13013 t = fold_build_pointer_plus_hwi (t, pad);
13014
13015 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13016
13017 gimplify_assign (unshare_expr (addr), t, pre_p);
13018
13019 t = fold_build_pointer_plus_hwi (t, size);
13020 gimplify_assign (unshare_expr (ovf), t, pre_p);
13021
13022 if (lab_over)
13023 {
13024 stmt = gimple_build_label (lab_over);
13025 gimple_seq_add_stmt (pre_p, stmt);
13026 }
13027
13028 if (STRICT_ALIGNMENT
13029 && (TYPE_ALIGN (type)
13030 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13031 {
13032 /* The value (of type complex double, for example) may not be
13033 aligned in memory in the saved registers, so copy via a
13034 temporary. (This is the same code as used for SPARC.) */
13035 tree tmp = create_tmp_var (type, "va_arg_tmp");
13036 tree dest_addr = build_fold_addr_expr (tmp);
13037
13038 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13039 3, dest_addr, addr, size_int (rsize * 4));
13040 TREE_ADDRESSABLE (tmp) = 1;
13041
13042 gimplify_and_add (copy, pre_p);
13043 addr = dest_addr;
13044 }
13045
13046 addr = fold_convert (ptrtype, addr);
13047 return build_va_arg_indirect_ref (addr);
13048 }
13049
13050 /* Builtins. */
13051
13052 static void
13053 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13054 {
13055 tree t;
13056 unsigned classify = rs6000_builtin_info[(int)code].attr;
13057 const char *attr_string = "";
13058
13059 gcc_assert (name != NULL);
13060 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13061
13062 if (rs6000_builtin_decls[(int)code])
13063 fatal_error (input_location,
13064 "internal error: builtin function %qs already processed",
13065 name);
13066
13067 rs6000_builtin_decls[(int)code] = t =
13068 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13069
13070 /* Set any special attributes. */
13071 if ((classify & RS6000_BTC_CONST) != 0)
13072 {
13073 /* const function, function only depends on the inputs. */
13074 TREE_READONLY (t) = 1;
13075 TREE_NOTHROW (t) = 1;
13076 attr_string = ", const";
13077 }
13078 else if ((classify & RS6000_BTC_PURE) != 0)
13079 {
13080 /* pure function, function can read global memory, but does not set any
13081 external state. */
13082 DECL_PURE_P (t) = 1;
13083 TREE_NOTHROW (t) = 1;
13084 attr_string = ", pure";
13085 }
13086 else if ((classify & RS6000_BTC_FP) != 0)
13087 {
13088 /* Function is a math function. If rounding mode is on, then treat the
13089 function as not reading global memory, but it can have arbitrary side
13090 effects. If it is off, then assume the function is a const function.
13091 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13092 builtin-attribute.def that is used for the math functions. */
13093 TREE_NOTHROW (t) = 1;
13094 if (flag_rounding_math)
13095 {
13096 DECL_PURE_P (t) = 1;
13097 DECL_IS_NOVOPS (t) = 1;
13098 attr_string = ", fp, pure";
13099 }
13100 else
13101 {
13102 TREE_READONLY (t) = 1;
13103 attr_string = ", fp, const";
13104 }
13105 }
13106 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13107 gcc_unreachable ();
13108
13109 if (TARGET_DEBUG_BUILTIN)
13110 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13111 (int)code, name, attr_string);
13112 }
13113
13114 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13115
13116 #undef RS6000_BUILTIN_0
13117 #undef RS6000_BUILTIN_1
13118 #undef RS6000_BUILTIN_2
13119 #undef RS6000_BUILTIN_3
13120 #undef RS6000_BUILTIN_A
13121 #undef RS6000_BUILTIN_D
13122 #undef RS6000_BUILTIN_H
13123 #undef RS6000_BUILTIN_P
13124 #undef RS6000_BUILTIN_X
13125
13126 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13127 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13128 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13129 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13130 { MASK, ICODE, NAME, ENUM },
13131
13132 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13133 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13134 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13135 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13136 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13137
13138 static const struct builtin_description bdesc_3arg[] =
13139 {
13140 #include "rs6000-builtin.def"
13141 };
13142
13143 /* DST operations: void foo (void *, const int, const char). */
13144
13145 #undef RS6000_BUILTIN_0
13146 #undef RS6000_BUILTIN_1
13147 #undef RS6000_BUILTIN_2
13148 #undef RS6000_BUILTIN_3
13149 #undef RS6000_BUILTIN_A
13150 #undef RS6000_BUILTIN_D
13151 #undef RS6000_BUILTIN_H
13152 #undef RS6000_BUILTIN_P
13153 #undef RS6000_BUILTIN_X
13154
13155 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13156 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13157 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13158 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13159 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13160 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13161 { MASK, ICODE, NAME, ENUM },
13162
13163 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13164 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13165 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13166
13167 static const struct builtin_description bdesc_dst[] =
13168 {
13169 #include "rs6000-builtin.def"
13170 };
13171
13172 /* Simple binary operations: VECc = foo (VECa, VECb). */
13173
13174 #undef RS6000_BUILTIN_0
13175 #undef RS6000_BUILTIN_1
13176 #undef RS6000_BUILTIN_2
13177 #undef RS6000_BUILTIN_3
13178 #undef RS6000_BUILTIN_A
13179 #undef RS6000_BUILTIN_D
13180 #undef RS6000_BUILTIN_H
13181 #undef RS6000_BUILTIN_P
13182 #undef RS6000_BUILTIN_X
13183
13184 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13185 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13186 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13187 { MASK, ICODE, NAME, ENUM },
13188
13189 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13190 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13191 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13192 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13193 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13194 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13195
13196 static const struct builtin_description bdesc_2arg[] =
13197 {
13198 #include "rs6000-builtin.def"
13199 };
13200
13201 #undef RS6000_BUILTIN_0
13202 #undef RS6000_BUILTIN_1
13203 #undef RS6000_BUILTIN_2
13204 #undef RS6000_BUILTIN_3
13205 #undef RS6000_BUILTIN_A
13206 #undef RS6000_BUILTIN_D
13207 #undef RS6000_BUILTIN_H
13208 #undef RS6000_BUILTIN_P
13209 #undef RS6000_BUILTIN_X
13210
13211 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13212 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13213 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13214 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13215 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13216 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13217 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13218 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13219 { MASK, ICODE, NAME, ENUM },
13220
13221 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13222
13223 /* AltiVec predicates. */
13224
13225 static const struct builtin_description bdesc_altivec_preds[] =
13226 {
13227 #include "rs6000-builtin.def"
13228 };
13229
13230 /* ABS* operations. */
13231
13232 #undef RS6000_BUILTIN_0
13233 #undef RS6000_BUILTIN_1
13234 #undef RS6000_BUILTIN_2
13235 #undef RS6000_BUILTIN_3
13236 #undef RS6000_BUILTIN_A
13237 #undef RS6000_BUILTIN_D
13238 #undef RS6000_BUILTIN_H
13239 #undef RS6000_BUILTIN_P
13240 #undef RS6000_BUILTIN_X
13241
13242 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13243 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13244 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13245 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13246 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13247 { MASK, ICODE, NAME, ENUM },
13248
13249 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13250 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13251 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13252 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13253
13254 static const struct builtin_description bdesc_abs[] =
13255 {
13256 #include "rs6000-builtin.def"
13257 };
13258
13259 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13260 foo (VECa). */
13261
13262 #undef RS6000_BUILTIN_0
13263 #undef RS6000_BUILTIN_1
13264 #undef RS6000_BUILTIN_2
13265 #undef RS6000_BUILTIN_3
13266 #undef RS6000_BUILTIN_A
13267 #undef RS6000_BUILTIN_D
13268 #undef RS6000_BUILTIN_H
13269 #undef RS6000_BUILTIN_P
13270 #undef RS6000_BUILTIN_X
13271
13272 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13273 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13274 { MASK, ICODE, NAME, ENUM },
13275
13276 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13277 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13278 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13279 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13280 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13281 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13282 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13283
13284 static const struct builtin_description bdesc_1arg[] =
13285 {
13286 #include "rs6000-builtin.def"
13287 };
13288
13289 /* Simple no-argument operations: result = __builtin_darn_32 () */
13290
13291 #undef RS6000_BUILTIN_0
13292 #undef RS6000_BUILTIN_1
13293 #undef RS6000_BUILTIN_2
13294 #undef RS6000_BUILTIN_3
13295 #undef RS6000_BUILTIN_A
13296 #undef RS6000_BUILTIN_D
13297 #undef RS6000_BUILTIN_H
13298 #undef RS6000_BUILTIN_P
13299 #undef RS6000_BUILTIN_X
13300
13301 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13302 { MASK, ICODE, NAME, ENUM },
13303
13304 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13305 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13306 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13307 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13308 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13309 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13310 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13311 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13312
13313 static const struct builtin_description bdesc_0arg[] =
13314 {
13315 #include "rs6000-builtin.def"
13316 };
13317
13318 /* HTM builtins. */
13319 #undef RS6000_BUILTIN_0
13320 #undef RS6000_BUILTIN_1
13321 #undef RS6000_BUILTIN_2
13322 #undef RS6000_BUILTIN_3
13323 #undef RS6000_BUILTIN_A
13324 #undef RS6000_BUILTIN_D
13325 #undef RS6000_BUILTIN_H
13326 #undef RS6000_BUILTIN_P
13327 #undef RS6000_BUILTIN_X
13328
13329 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13330 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13331 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13332 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13333 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13334 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13335 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13336 { MASK, ICODE, NAME, ENUM },
13337
13338 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13339 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13340
13341 static const struct builtin_description bdesc_htm[] =
13342 {
13343 #include "rs6000-builtin.def"
13344 };
13345
13346 #undef RS6000_BUILTIN_0
13347 #undef RS6000_BUILTIN_1
13348 #undef RS6000_BUILTIN_2
13349 #undef RS6000_BUILTIN_3
13350 #undef RS6000_BUILTIN_A
13351 #undef RS6000_BUILTIN_D
13352 #undef RS6000_BUILTIN_H
13353 #undef RS6000_BUILTIN_P
13354
13355 /* Return true if a builtin function is overloaded. */
13356 bool
13357 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13358 {
13359 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13360 }
13361
13362 const char *
13363 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13364 {
13365 return rs6000_builtin_info[(int)fncode].name;
13366 }
13367
13368 /* Expand an expression EXP that calls a builtin without arguments. */
13369 static rtx
13370 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13371 {
13372 rtx pat;
13373 machine_mode tmode = insn_data[icode].operand[0].mode;
13374
13375 if (icode == CODE_FOR_nothing)
13376 /* Builtin not supported on this processor. */
13377 return 0;
13378
13379 if (icode == CODE_FOR_rs6000_mffsl
13380 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13381 {
13382 error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
13383 return const0_rtx;
13384 }
13385
13386 if (target == 0
13387 || GET_MODE (target) != tmode
13388 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13389 target = gen_reg_rtx (tmode);
13390
13391 pat = GEN_FCN (icode) (target);
13392 if (! pat)
13393 return 0;
13394 emit_insn (pat);
13395
13396 return target;
13397 }
13398
13399
13400 static rtx
13401 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13402 {
13403 rtx pat;
13404 tree arg0 = CALL_EXPR_ARG (exp, 0);
13405 tree arg1 = CALL_EXPR_ARG (exp, 1);
13406 rtx op0 = expand_normal (arg0);
13407 rtx op1 = expand_normal (arg1);
13408 machine_mode mode0 = insn_data[icode].operand[0].mode;
13409 machine_mode mode1 = insn_data[icode].operand[1].mode;
13410
13411 if (icode == CODE_FOR_nothing)
13412 /* Builtin not supported on this processor. */
13413 return 0;
13414
13415 /* If we got invalid arguments bail out before generating bad rtl. */
13416 if (arg0 == error_mark_node || arg1 == error_mark_node)
13417 return const0_rtx;
13418
13419 if (!CONST_INT_P (op0)
13420 || INTVAL (op0) > 255
13421 || INTVAL (op0) < 0)
13422 {
13423 error ("argument 1 must be an 8-bit field value");
13424 return const0_rtx;
13425 }
13426
13427 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13428 op0 = copy_to_mode_reg (mode0, op0);
13429
13430 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13431 op1 = copy_to_mode_reg (mode1, op1);
13432
13433 pat = GEN_FCN (icode) (op0, op1);
13434 if (!pat)
13435 return const0_rtx;
13436 emit_insn (pat);
13437
13438 return NULL_RTX;
13439 }
13440
13441 static rtx
13442 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13443 {
13444 rtx pat;
13445 tree arg0 = CALL_EXPR_ARG (exp, 0);
13446 rtx op0 = expand_normal (arg0);
13447
13448 if (icode == CODE_FOR_nothing)
13449 /* Builtin not supported on this processor. */
13450 return 0;
13451
13452 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13453 {
13454 error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
13455 "%<-msoft-float%>");
13456 return const0_rtx;
13457 }
13458
13459 /* If we got invalid arguments bail out before generating bad rtl. */
13460 if (arg0 == error_mark_node)
13461 return const0_rtx;
13462
13463 /* Only allow bit numbers 0 to 31. */
13464 if (!u5bit_cint_operand (op0, VOIDmode))
13465 {
13466 error ("Argument must be a constant between 0 and 31.");
13467 return const0_rtx;
13468 }
13469
13470 pat = GEN_FCN (icode) (op0);
13471 if (!pat)
13472 return const0_rtx;
13473 emit_insn (pat);
13474
13475 return NULL_RTX;
13476 }
13477
13478 static rtx
13479 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13480 {
13481 rtx pat;
13482 tree arg0 = CALL_EXPR_ARG (exp, 0);
13483 rtx op0 = expand_normal (arg0);
13484 machine_mode mode0 = insn_data[icode].operand[0].mode;
13485
13486 if (icode == CODE_FOR_nothing)
13487 /* Builtin not supported on this processor. */
13488 return 0;
13489
13490 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13491 {
13492 error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
13493 return const0_rtx;
13494 }
13495
13496 /* If we got invalid arguments bail out before generating bad rtl. */
13497 if (arg0 == error_mark_node)
13498 return const0_rtx;
13499
13500 /* If the argument is a constant, check the range. Argument can only be a
13501 2-bit value. Unfortunately, can't check the range of the value at
13502 compile time if the argument is a variable. The least significant two
13503 bits of the argument, regardless of type, are used to set the rounding
13504 mode. All other bits are ignored. */
13505 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13506 {
13507 error ("Argument must be a value between 0 and 3.");
13508 return const0_rtx;
13509 }
13510
13511 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13512 op0 = copy_to_mode_reg (mode0, op0);
13513
13514 pat = GEN_FCN (icode) (op0);
13515 if (!pat)
13516 return const0_rtx;
13517 emit_insn (pat);
13518
13519 return NULL_RTX;
13520 }
13521 static rtx
13522 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13523 {
13524 rtx pat;
13525 tree arg0 = CALL_EXPR_ARG (exp, 0);
13526 rtx op0 = expand_normal (arg0);
13527 machine_mode mode0 = insn_data[icode].operand[0].mode;
13528
13529 if (TARGET_32BIT)
13530 /* Builtin not supported in 32-bit mode. */
13531 fatal_error (input_location,
13532 "%<__builtin_set_fpscr_drn%> is not supported "
13533 "in 32-bit mode.");
13534
13535 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13536 {
13537 error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
13538 return const0_rtx;
13539 }
13540
13541 if (icode == CODE_FOR_nothing)
13542 /* Builtin not supported on this processor. */
13543 return 0;
13544
13545 /* If we got invalid arguments bail out before generating bad rtl. */
13546 if (arg0 == error_mark_node)
13547 return const0_rtx;
13548
13549 /* If the argument is a constant, check the range. Agrument can only be a
13550 3-bit value. Unfortunately, can't check the range of the value at
13551 compile time if the argument is a variable. The least significant two
13552 bits of the argument, regardless of type, are used to set the rounding
13553 mode. All other bits are ignored. */
13554 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13555 {
13556 error ("Argument must be a value between 0 and 7.");
13557 return const0_rtx;
13558 }
13559
13560 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13561 op0 = copy_to_mode_reg (mode0, op0);
13562
13563 pat = GEN_FCN (icode) (op0);
13564 if (! pat)
13565 return const0_rtx;
13566 emit_insn (pat);
13567
13568 return NULL_RTX;
13569 }
13570
13571 static rtx
13572 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13573 {
13574 rtx pat;
13575 tree arg0 = CALL_EXPR_ARG (exp, 0);
13576 rtx op0 = expand_normal (arg0);
13577 machine_mode tmode = insn_data[icode].operand[0].mode;
13578 machine_mode mode0 = insn_data[icode].operand[1].mode;
13579
13580 if (icode == CODE_FOR_nothing)
13581 /* Builtin not supported on this processor. */
13582 return 0;
13583
13584 /* If we got invalid arguments bail out before generating bad rtl. */
13585 if (arg0 == error_mark_node)
13586 return const0_rtx;
13587
13588 if (icode == CODE_FOR_altivec_vspltisb
13589 || icode == CODE_FOR_altivec_vspltish
13590 || icode == CODE_FOR_altivec_vspltisw)
13591 {
13592 /* Only allow 5-bit *signed* literals. */
13593 if (!CONST_INT_P (op0)
13594 || INTVAL (op0) > 15
13595 || INTVAL (op0) < -16)
13596 {
13597 error ("argument 1 must be a 5-bit signed literal");
13598 return CONST0_RTX (tmode);
13599 }
13600 }
13601
13602 if (target == 0
13603 || GET_MODE (target) != tmode
13604 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13605 target = gen_reg_rtx (tmode);
13606
13607 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13608 op0 = copy_to_mode_reg (mode0, op0);
13609
13610 pat = GEN_FCN (icode) (target, op0);
13611 if (! pat)
13612 return 0;
13613 emit_insn (pat);
13614
13615 return target;
13616 }
13617
13618 static rtx
13619 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13620 {
13621 rtx pat, scratch1, scratch2;
13622 tree arg0 = CALL_EXPR_ARG (exp, 0);
13623 rtx op0 = expand_normal (arg0);
13624 machine_mode tmode = insn_data[icode].operand[0].mode;
13625 machine_mode mode0 = insn_data[icode].operand[1].mode;
13626
13627 /* If we have invalid arguments, bail out before generating bad rtl. */
13628 if (arg0 == error_mark_node)
13629 return const0_rtx;
13630
13631 if (target == 0
13632 || GET_MODE (target) != tmode
13633 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13634 target = gen_reg_rtx (tmode);
13635
13636 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13637 op0 = copy_to_mode_reg (mode0, op0);
13638
13639 scratch1 = gen_reg_rtx (mode0);
13640 scratch2 = gen_reg_rtx (mode0);
13641
13642 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13643 if (! pat)
13644 return 0;
13645 emit_insn (pat);
13646
13647 return target;
13648 }
13649
13650 static rtx
13651 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13652 {
13653 rtx pat;
13654 tree arg0 = CALL_EXPR_ARG (exp, 0);
13655 tree arg1 = CALL_EXPR_ARG (exp, 1);
13656 rtx op0 = expand_normal (arg0);
13657 rtx op1 = expand_normal (arg1);
13658 machine_mode tmode = insn_data[icode].operand[0].mode;
13659 machine_mode mode0 = insn_data[icode].operand[1].mode;
13660 machine_mode mode1 = insn_data[icode].operand[2].mode;
13661
13662 if (icode == CODE_FOR_nothing)
13663 /* Builtin not supported on this processor. */
13664 return 0;
13665
13666 /* If we got invalid arguments bail out before generating bad rtl. */
13667 if (arg0 == error_mark_node || arg1 == error_mark_node)
13668 return const0_rtx;
13669
13670 if (icode == CODE_FOR_unpackv1ti
13671 || icode == CODE_FOR_unpackkf
13672 || icode == CODE_FOR_unpacktf
13673 || icode == CODE_FOR_unpackif
13674 || icode == CODE_FOR_unpacktd)
13675 {
13676 /* Only allow 1-bit unsigned literals. */
13677 STRIP_NOPS (arg1);
13678 if (TREE_CODE (arg1) != INTEGER_CST
13679 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13680 {
13681 error ("argument 2 must be a 1-bit unsigned literal");
13682 return CONST0_RTX (tmode);
13683 }
13684 }
13685 else if (icode == CODE_FOR_altivec_vspltw)
13686 {
13687 /* Only allow 2-bit unsigned literals. */
13688 STRIP_NOPS (arg1);
13689 if (TREE_CODE (arg1) != INTEGER_CST
13690 || TREE_INT_CST_LOW (arg1) & ~3)
13691 {
13692 error ("argument 2 must be a 2-bit unsigned literal");
13693 return CONST0_RTX (tmode);
13694 }
13695 }
13696 else if (icode == CODE_FOR_altivec_vsplth)
13697 {
13698 /* Only allow 3-bit unsigned literals. */
13699 STRIP_NOPS (arg1);
13700 if (TREE_CODE (arg1) != INTEGER_CST
13701 || TREE_INT_CST_LOW (arg1) & ~7)
13702 {
13703 error ("argument 2 must be a 3-bit unsigned literal");
13704 return CONST0_RTX (tmode);
13705 }
13706 }
13707 else if (icode == CODE_FOR_altivec_vspltb)
13708 {
13709 /* Only allow 4-bit unsigned literals. */
13710 STRIP_NOPS (arg1);
13711 if (TREE_CODE (arg1) != INTEGER_CST
13712 || TREE_INT_CST_LOW (arg1) & ~15)
13713 {
13714 error ("argument 2 must be a 4-bit unsigned literal");
13715 return CONST0_RTX (tmode);
13716 }
13717 }
13718 else if (icode == CODE_FOR_altivec_vcfux
13719 || icode == CODE_FOR_altivec_vcfsx
13720 || icode == CODE_FOR_altivec_vctsxs
13721 || icode == CODE_FOR_altivec_vctuxs)
13722 {
13723 /* Only allow 5-bit unsigned literals. */
13724 STRIP_NOPS (arg1);
13725 if (TREE_CODE (arg1) != INTEGER_CST
13726 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13727 {
13728 error ("argument 2 must be a 5-bit unsigned literal");
13729 return CONST0_RTX (tmode);
13730 }
13731 }
13732 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13733 || icode == CODE_FOR_dfptstsfi_lt_dd
13734 || icode == CODE_FOR_dfptstsfi_gt_dd
13735 || icode == CODE_FOR_dfptstsfi_unordered_dd
13736 || icode == CODE_FOR_dfptstsfi_eq_td
13737 || icode == CODE_FOR_dfptstsfi_lt_td
13738 || icode == CODE_FOR_dfptstsfi_gt_td
13739 || icode == CODE_FOR_dfptstsfi_unordered_td)
13740 {
13741 /* Only allow 6-bit unsigned literals. */
13742 STRIP_NOPS (arg0);
13743 if (TREE_CODE (arg0) != INTEGER_CST
13744 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13745 {
13746 error ("argument 1 must be a 6-bit unsigned literal");
13747 return CONST0_RTX (tmode);
13748 }
13749 }
13750 else if (icode == CODE_FOR_xststdcqp_kf
13751 || icode == CODE_FOR_xststdcqp_tf
13752 || icode == CODE_FOR_xststdcdp
13753 || icode == CODE_FOR_xststdcsp
13754 || icode == CODE_FOR_xvtstdcdp
13755 || icode == CODE_FOR_xvtstdcsp)
13756 {
13757 /* Only allow 7-bit unsigned literals. */
13758 STRIP_NOPS (arg1);
13759 if (TREE_CODE (arg1) != INTEGER_CST
13760 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13761 {
13762 error ("argument 2 must be a 7-bit unsigned literal");
13763 return CONST0_RTX (tmode);
13764 }
13765 }
13766
13767 if (target == 0
13768 || GET_MODE (target) != tmode
13769 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13770 target = gen_reg_rtx (tmode);
13771
13772 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13773 op0 = copy_to_mode_reg (mode0, op0);
13774 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13775 op1 = copy_to_mode_reg (mode1, op1);
13776
13777 pat = GEN_FCN (icode) (target, op0, op1);
13778 if (! pat)
13779 return 0;
13780 emit_insn (pat);
13781
13782 return target;
13783 }
13784
13785 static rtx
13786 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13787 {
13788 rtx pat, scratch;
13789 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13790 tree arg0 = CALL_EXPR_ARG (exp, 1);
13791 tree arg1 = CALL_EXPR_ARG (exp, 2);
13792 rtx op0 = expand_normal (arg0);
13793 rtx op1 = expand_normal (arg1);
13794 machine_mode tmode = SImode;
13795 machine_mode mode0 = insn_data[icode].operand[1].mode;
13796 machine_mode mode1 = insn_data[icode].operand[2].mode;
13797 int cr6_form_int;
13798
13799 if (TREE_CODE (cr6_form) != INTEGER_CST)
13800 {
13801 error ("argument 1 of %qs must be a constant",
13802 "__builtin_altivec_predicate");
13803 return const0_rtx;
13804 }
13805 else
13806 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13807
13808 gcc_assert (mode0 == mode1);
13809
13810 /* If we have invalid arguments, bail out before generating bad rtl. */
13811 if (arg0 == error_mark_node || arg1 == error_mark_node)
13812 return const0_rtx;
13813
13814 if (target == 0
13815 || GET_MODE (target) != tmode
13816 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13817 target = gen_reg_rtx (tmode);
13818
13819 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13820 op0 = copy_to_mode_reg (mode0, op0);
13821 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13822 op1 = copy_to_mode_reg (mode1, op1);
13823
13824 /* Note that for many of the relevant operations (e.g. cmpne or
13825 cmpeq) with float or double operands, it makes more sense for the
13826 mode of the allocated scratch register to select a vector of
13827 integer. But the choice to copy the mode of operand 0 was made
13828 long ago and there are no plans to change it. */
13829 scratch = gen_reg_rtx (mode0);
13830
13831 pat = GEN_FCN (icode) (scratch, op0, op1);
13832 if (! pat)
13833 return 0;
13834 emit_insn (pat);
13835
13836 /* The vec_any* and vec_all* predicates use the same opcodes for two
13837 different operations, but the bits in CR6 will be different
13838 depending on what information we want. So we have to play tricks
13839 with CR6 to get the right bits out.
13840
13841 If you think this is disgusting, look at the specs for the
13842 AltiVec predicates. */
13843
13844 switch (cr6_form_int)
13845 {
13846 case 0:
13847 emit_insn (gen_cr6_test_for_zero (target));
13848 break;
13849 case 1:
13850 emit_insn (gen_cr6_test_for_zero_reverse (target));
13851 break;
13852 case 2:
13853 emit_insn (gen_cr6_test_for_lt (target));
13854 break;
13855 case 3:
13856 emit_insn (gen_cr6_test_for_lt_reverse (target));
13857 break;
13858 default:
13859 error ("argument 1 of %qs is out of range",
13860 "__builtin_altivec_predicate");
13861 break;
13862 }
13863
13864 return target;
13865 }
13866
13867 rtx
13868 swap_endian_selector_for_mode (machine_mode mode)
13869 {
13870 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13871 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13872 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13873 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13874
13875 unsigned int *swaparray, i;
13876 rtx perm[16];
13877
13878 switch (mode)
13879 {
13880 case E_V1TImode:
13881 swaparray = swap1;
13882 break;
13883 case E_V2DFmode:
13884 case E_V2DImode:
13885 swaparray = swap2;
13886 break;
13887 case E_V4SFmode:
13888 case E_V4SImode:
13889 swaparray = swap4;
13890 break;
13891 case E_V8HImode:
13892 swaparray = swap8;
13893 break;
13894 default:
13895 gcc_unreachable ();
13896 }
13897
13898 for (i = 0; i < 16; ++i)
13899 perm[i] = GEN_INT (swaparray[i]);
13900
13901 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13902 gen_rtvec_v (16, perm)));
13903 }
13904
13905 static rtx
13906 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13907 {
13908 rtx pat, addr;
13909 tree arg0 = CALL_EXPR_ARG (exp, 0);
13910 tree arg1 = CALL_EXPR_ARG (exp, 1);
13911 machine_mode tmode = insn_data[icode].operand[0].mode;
13912 machine_mode mode0 = Pmode;
13913 machine_mode mode1 = Pmode;
13914 rtx op0 = expand_normal (arg0);
13915 rtx op1 = expand_normal (arg1);
13916
13917 if (icode == CODE_FOR_nothing)
13918 /* Builtin not supported on this processor. */
13919 return 0;
13920
13921 /* If we got invalid arguments bail out before generating bad rtl. */
13922 if (arg0 == error_mark_node || arg1 == error_mark_node)
13923 return const0_rtx;
13924
13925 if (target == 0
13926 || GET_MODE (target) != tmode
13927 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13928 target = gen_reg_rtx (tmode);
13929
13930 op1 = copy_to_mode_reg (mode1, op1);
13931
13932 /* For LVX, express the RTL accurately by ANDing the address with -16.
13933 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13934 so the raw address is fine. */
13935 if (icode == CODE_FOR_altivec_lvx_v1ti
13936 || icode == CODE_FOR_altivec_lvx_v2df
13937 || icode == CODE_FOR_altivec_lvx_v2di
13938 || icode == CODE_FOR_altivec_lvx_v4sf
13939 || icode == CODE_FOR_altivec_lvx_v4si
13940 || icode == CODE_FOR_altivec_lvx_v8hi
13941 || icode == CODE_FOR_altivec_lvx_v16qi)
13942 {
13943 rtx rawaddr;
13944 if (op0 == const0_rtx)
13945 rawaddr = op1;
13946 else
13947 {
13948 op0 = copy_to_mode_reg (mode0, op0);
13949 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13950 }
13951 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13952 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13953
13954 emit_insn (gen_rtx_SET (target, addr));
13955 }
13956 else
13957 {
13958 if (op0 == const0_rtx)
13959 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13960 else
13961 {
13962 op0 = copy_to_mode_reg (mode0, op0);
13963 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13964 gen_rtx_PLUS (Pmode, op1, op0));
13965 }
13966
13967 pat = GEN_FCN (icode) (target, addr);
13968 if (! pat)
13969 return 0;
13970 emit_insn (pat);
13971 }
13972
13973 return target;
13974 }
13975
13976 static rtx
13977 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13978 {
13979 rtx pat;
13980 tree arg0 = CALL_EXPR_ARG (exp, 0);
13981 tree arg1 = CALL_EXPR_ARG (exp, 1);
13982 tree arg2 = CALL_EXPR_ARG (exp, 2);
13983 rtx op0 = expand_normal (arg0);
13984 rtx op1 = expand_normal (arg1);
13985 rtx op2 = expand_normal (arg2);
13986 machine_mode mode0 = insn_data[icode].operand[0].mode;
13987 machine_mode mode1 = insn_data[icode].operand[1].mode;
13988 machine_mode mode2 = insn_data[icode].operand[2].mode;
13989
13990 if (icode == CODE_FOR_nothing)
13991 /* Builtin not supported on this processor. */
13992 return NULL_RTX;
13993
13994 /* If we got invalid arguments bail out before generating bad rtl. */
13995 if (arg0 == error_mark_node
13996 || arg1 == error_mark_node
13997 || arg2 == error_mark_node)
13998 return NULL_RTX;
13999
14000 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14001 op0 = copy_to_mode_reg (mode0, op0);
14002 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14003 op1 = copy_to_mode_reg (mode1, op1);
14004 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14005 op2 = copy_to_mode_reg (mode2, op2);
14006
14007 pat = GEN_FCN (icode) (op0, op1, op2);
14008 if (pat)
14009 emit_insn (pat);
14010
14011 return NULL_RTX;
14012 }
14013
14014 static rtx
14015 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14016 {
14017 tree arg0 = CALL_EXPR_ARG (exp, 0);
14018 tree arg1 = CALL_EXPR_ARG (exp, 1);
14019 tree arg2 = CALL_EXPR_ARG (exp, 2);
14020 rtx op0 = expand_normal (arg0);
14021 rtx op1 = expand_normal (arg1);
14022 rtx op2 = expand_normal (arg2);
14023 rtx pat, addr, rawaddr;
14024 machine_mode tmode = insn_data[icode].operand[0].mode;
14025 machine_mode smode = insn_data[icode].operand[1].mode;
14026 machine_mode mode1 = Pmode;
14027 machine_mode mode2 = Pmode;
14028
14029 /* Invalid arguments. Bail before doing anything stoopid! */
14030 if (arg0 == error_mark_node
14031 || arg1 == error_mark_node
14032 || arg2 == error_mark_node)
14033 return const0_rtx;
14034
14035 op2 = copy_to_mode_reg (mode2, op2);
14036
14037 /* For STVX, express the RTL accurately by ANDing the address with -16.
14038 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14039 so the raw address is fine. */
14040 if (icode == CODE_FOR_altivec_stvx_v2df
14041 || icode == CODE_FOR_altivec_stvx_v2di
14042 || icode == CODE_FOR_altivec_stvx_v4sf
14043 || icode == CODE_FOR_altivec_stvx_v4si
14044 || icode == CODE_FOR_altivec_stvx_v8hi
14045 || icode == CODE_FOR_altivec_stvx_v16qi)
14046 {
14047 if (op1 == const0_rtx)
14048 rawaddr = op2;
14049 else
14050 {
14051 op1 = copy_to_mode_reg (mode1, op1);
14052 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14053 }
14054
14055 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14056 addr = gen_rtx_MEM (tmode, addr);
14057
14058 op0 = copy_to_mode_reg (tmode, op0);
14059
14060 emit_insn (gen_rtx_SET (addr, op0));
14061 }
14062 else
14063 {
14064 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14065 op0 = copy_to_mode_reg (smode, op0);
14066
14067 if (op1 == const0_rtx)
14068 addr = gen_rtx_MEM (tmode, op2);
14069 else
14070 {
14071 op1 = copy_to_mode_reg (mode1, op1);
14072 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14073 }
14074
14075 pat = GEN_FCN (icode) (addr, op0);
14076 if (pat)
14077 emit_insn (pat);
14078 }
14079
14080 return NULL_RTX;
14081 }
14082
14083 /* Return the appropriate SPR number associated with the given builtin. */
14084 static inline HOST_WIDE_INT
14085 htm_spr_num (enum rs6000_builtins code)
14086 {
14087 if (code == HTM_BUILTIN_GET_TFHAR
14088 || code == HTM_BUILTIN_SET_TFHAR)
14089 return TFHAR_SPR;
14090 else if (code == HTM_BUILTIN_GET_TFIAR
14091 || code == HTM_BUILTIN_SET_TFIAR)
14092 return TFIAR_SPR;
14093 else if (code == HTM_BUILTIN_GET_TEXASR
14094 || code == HTM_BUILTIN_SET_TEXASR)
14095 return TEXASR_SPR;
14096 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14097 || code == HTM_BUILTIN_SET_TEXASRU);
14098 return TEXASRU_SPR;
14099 }
14100
14101 /* Return the appropriate SPR regno associated with the given builtin. */
14102 static inline HOST_WIDE_INT
14103 htm_spr_regno (enum rs6000_builtins code)
14104 {
14105 if (code == HTM_BUILTIN_GET_TFHAR
14106 || code == HTM_BUILTIN_SET_TFHAR)
14107 return TFHAR_REGNO;
14108 else if (code == HTM_BUILTIN_GET_TFIAR
14109 || code == HTM_BUILTIN_SET_TFIAR)
14110 return TFIAR_REGNO;
14111 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14112 || code == HTM_BUILTIN_SET_TEXASR
14113 || code == HTM_BUILTIN_GET_TEXASRU
14114 || code == HTM_BUILTIN_SET_TEXASRU);
14115 return TEXASR_REGNO;
14116 }
14117
14118 /* Return the correct ICODE value depending on whether we are
14119 setting or reading the HTM SPRs. */
14120 static inline enum insn_code
14121 rs6000_htm_spr_icode (bool nonvoid)
14122 {
14123 if (nonvoid)
14124 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14125 else
14126 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14127 }
14128
14129 /* Expand the HTM builtin in EXP and store the result in TARGET.
14130 Store true in *EXPANDEDP if we found a builtin to expand. */
14131 static rtx
14132 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14133 {
14134 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14135 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14136 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14137 const struct builtin_description *d;
14138 size_t i;
14139
14140 *expandedp = true;
14141
14142 if (!TARGET_POWERPC64
14143 && (fcode == HTM_BUILTIN_TABORTDC
14144 || fcode == HTM_BUILTIN_TABORTDCI))
14145 {
14146 size_t uns_fcode = (size_t)fcode;
14147 const char *name = rs6000_builtin_info[uns_fcode].name;
14148 error ("builtin %qs is only valid in 64-bit mode", name);
14149 return const0_rtx;
14150 }
14151
14152 /* Expand the HTM builtins. */
14153 d = bdesc_htm;
14154 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14155 if (d->code == fcode)
14156 {
14157 rtx op[MAX_HTM_OPERANDS], pat;
14158 int nopnds = 0;
14159 tree arg;
14160 call_expr_arg_iterator iter;
14161 unsigned attr = rs6000_builtin_info[fcode].attr;
14162 enum insn_code icode = d->icode;
14163 const struct insn_operand_data *insn_op;
14164 bool uses_spr = (attr & RS6000_BTC_SPR);
14165 rtx cr = NULL_RTX;
14166
14167 if (uses_spr)
14168 icode = rs6000_htm_spr_icode (nonvoid);
14169 insn_op = &insn_data[icode].operand[0];
14170
14171 if (nonvoid)
14172 {
14173 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14174 if (!target
14175 || GET_MODE (target) != tmode
14176 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14177 target = gen_reg_rtx (tmode);
14178 if (uses_spr)
14179 op[nopnds++] = target;
14180 }
14181
14182 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14183 {
14184 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14185 return const0_rtx;
14186
14187 insn_op = &insn_data[icode].operand[nopnds];
14188
14189 op[nopnds] = expand_normal (arg);
14190
14191 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14192 {
14193 if (!strcmp (insn_op->constraint, "n"))
14194 {
14195 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14196 if (!CONST_INT_P (op[nopnds]))
14197 error ("argument %d must be an unsigned literal", arg_num);
14198 else
14199 error ("argument %d is an unsigned literal that is "
14200 "out of range", arg_num);
14201 return const0_rtx;
14202 }
14203 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14204 }
14205
14206 nopnds++;
14207 }
14208
14209 /* Handle the builtins for extended mnemonics. These accept
14210 no arguments, but map to builtins that take arguments. */
14211 switch (fcode)
14212 {
14213 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14214 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14215 op[nopnds++] = GEN_INT (1);
14216 if (flag_checking)
14217 attr |= RS6000_BTC_UNARY;
14218 break;
14219 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14220 op[nopnds++] = GEN_INT (0);
14221 if (flag_checking)
14222 attr |= RS6000_BTC_UNARY;
14223 break;
14224 default:
14225 break;
14226 }
14227
14228 /* If this builtin accesses SPRs, then pass in the appropriate
14229 SPR number and SPR regno as the last two operands. */
14230 if (uses_spr)
14231 {
14232 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14233 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14234 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14235 }
14236 /* If this builtin accesses a CR, then pass in a scratch
14237 CR as the last operand. */
14238 else if (attr & RS6000_BTC_CR)
14239 { cr = gen_reg_rtx (CCmode);
14240 op[nopnds++] = cr;
14241 }
14242
14243 if (flag_checking)
14244 {
14245 int expected_nopnds = 0;
14246 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14247 expected_nopnds = 1;
14248 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14249 expected_nopnds = 2;
14250 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14251 expected_nopnds = 3;
14252 if (!(attr & RS6000_BTC_VOID))
14253 expected_nopnds += 1;
14254 if (uses_spr)
14255 expected_nopnds += 2;
14256
14257 gcc_assert (nopnds == expected_nopnds
14258 && nopnds <= MAX_HTM_OPERANDS);
14259 }
14260
14261 switch (nopnds)
14262 {
14263 case 1:
14264 pat = GEN_FCN (icode) (op[0]);
14265 break;
14266 case 2:
14267 pat = GEN_FCN (icode) (op[0], op[1]);
14268 break;
14269 case 3:
14270 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14271 break;
14272 case 4:
14273 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14274 break;
14275 default:
14276 gcc_unreachable ();
14277 }
14278 if (!pat)
14279 return NULL_RTX;
14280 emit_insn (pat);
14281
14282 if (attr & RS6000_BTC_CR)
14283 {
14284 if (fcode == HTM_BUILTIN_TBEGIN)
14285 {
14286 /* Emit code to set TARGET to true or false depending on
14287 whether the tbegin. instruction successfully or failed
14288 to start a transaction. We do this by placing the 1's
14289 complement of CR's EQ bit into TARGET. */
14290 rtx scratch = gen_reg_rtx (SImode);
14291 emit_insn (gen_rtx_SET (scratch,
14292 gen_rtx_EQ (SImode, cr,
14293 const0_rtx)));
14294 emit_insn (gen_rtx_SET (target,
14295 gen_rtx_XOR (SImode, scratch,
14296 GEN_INT (1))));
14297 }
14298 else
14299 {
14300 /* Emit code to copy the 4-bit condition register field
14301 CR into the least significant end of register TARGET. */
14302 rtx scratch1 = gen_reg_rtx (SImode);
14303 rtx scratch2 = gen_reg_rtx (SImode);
14304 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14305 emit_insn (gen_movcc (subreg, cr));
14306 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14307 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14308 }
14309 }
14310
14311 if (nonvoid)
14312 return target;
14313 return const0_rtx;
14314 }
14315
14316 *expandedp = false;
14317 return NULL_RTX;
14318 }
14319
14320 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14321
14322 static rtx
14323 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14324 rtx target)
14325 {
14326 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14327 if (fcode == RS6000_BUILTIN_CPU_INIT)
14328 return const0_rtx;
14329
14330 if (target == 0 || GET_MODE (target) != SImode)
14331 target = gen_reg_rtx (SImode);
14332
14333 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14334 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14335 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14336 to a STRING_CST. */
14337 if (TREE_CODE (arg) == ARRAY_REF
14338 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14339 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14340 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14341 arg = TREE_OPERAND (arg, 0);
14342
14343 if (TREE_CODE (arg) != STRING_CST)
14344 {
14345 error ("builtin %qs only accepts a string argument",
14346 rs6000_builtin_info[(size_t) fcode].name);
14347 return const0_rtx;
14348 }
14349
14350 if (fcode == RS6000_BUILTIN_CPU_IS)
14351 {
14352 const char *cpu = TREE_STRING_POINTER (arg);
14353 rtx cpuid = NULL_RTX;
14354 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14355 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14356 {
14357 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14358 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14359 break;
14360 }
14361 if (cpuid == NULL_RTX)
14362 {
14363 /* Invalid CPU argument. */
14364 error ("cpu %qs is an invalid argument to builtin %qs",
14365 cpu, rs6000_builtin_info[(size_t) fcode].name);
14366 return const0_rtx;
14367 }
14368
14369 rtx platform = gen_reg_rtx (SImode);
14370 rtx tcbmem = gen_const_mem (SImode,
14371 gen_rtx_PLUS (Pmode,
14372 gen_rtx_REG (Pmode, TLS_REGNUM),
14373 GEN_INT (TCB_PLATFORM_OFFSET)));
14374 emit_move_insn (platform, tcbmem);
14375 emit_insn (gen_eqsi3 (target, platform, cpuid));
14376 }
14377 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14378 {
14379 const char *hwcap = TREE_STRING_POINTER (arg);
14380 rtx mask = NULL_RTX;
14381 int hwcap_offset;
14382 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14383 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14384 {
14385 mask = GEN_INT (cpu_supports_info[i].mask);
14386 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14387 break;
14388 }
14389 if (mask == NULL_RTX)
14390 {
14391 /* Invalid HWCAP argument. */
14392 error ("%s %qs is an invalid argument to builtin %qs",
14393 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14394 return const0_rtx;
14395 }
14396
14397 rtx tcb_hwcap = gen_reg_rtx (SImode);
14398 rtx tcbmem = gen_const_mem (SImode,
14399 gen_rtx_PLUS (Pmode,
14400 gen_rtx_REG (Pmode, TLS_REGNUM),
14401 GEN_INT (hwcap_offset)));
14402 emit_move_insn (tcb_hwcap, tcbmem);
14403 rtx scratch1 = gen_reg_rtx (SImode);
14404 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14405 rtx scratch2 = gen_reg_rtx (SImode);
14406 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14407 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14408 }
14409 else
14410 gcc_unreachable ();
14411
14412 /* Record that we have expanded a CPU builtin, so that we can later
14413 emit a reference to the special symbol exported by LIBC to ensure we
14414 do not link against an old LIBC that doesn't support this feature. */
14415 cpu_builtin_p = true;
14416
14417 #else
14418 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14419 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14420
14421 /* For old LIBCs, always return FALSE. */
14422 emit_move_insn (target, GEN_INT (0));
14423 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14424
14425 return target;
14426 }
14427
14428 static rtx
14429 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14430 {
14431 rtx pat;
14432 tree arg0 = CALL_EXPR_ARG (exp, 0);
14433 tree arg1 = CALL_EXPR_ARG (exp, 1);
14434 tree arg2 = CALL_EXPR_ARG (exp, 2);
14435 rtx op0 = expand_normal (arg0);
14436 rtx op1 = expand_normal (arg1);
14437 rtx op2 = expand_normal (arg2);
14438 machine_mode tmode = insn_data[icode].operand[0].mode;
14439 machine_mode mode0 = insn_data[icode].operand[1].mode;
14440 machine_mode mode1 = insn_data[icode].operand[2].mode;
14441 machine_mode mode2 = insn_data[icode].operand[3].mode;
14442
14443 if (icode == CODE_FOR_nothing)
14444 /* Builtin not supported on this processor. */
14445 return 0;
14446
14447 /* If we got invalid arguments bail out before generating bad rtl. */
14448 if (arg0 == error_mark_node
14449 || arg1 == error_mark_node
14450 || arg2 == error_mark_node)
14451 return const0_rtx;
14452
14453 /* Check and prepare argument depending on the instruction code.
14454
14455 Note that a switch statement instead of the sequence of tests
14456 would be incorrect as many of the CODE_FOR values could be
14457 CODE_FOR_nothing and that would yield multiple alternatives
14458 with identical values. We'd never reach here at runtime in
14459 this case. */
14460 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14461 || icode == CODE_FOR_altivec_vsldoi_v2df
14462 || icode == CODE_FOR_altivec_vsldoi_v4si
14463 || icode == CODE_FOR_altivec_vsldoi_v8hi
14464 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14465 {
14466 /* Only allow 4-bit unsigned literals. */
14467 STRIP_NOPS (arg2);
14468 if (TREE_CODE (arg2) != INTEGER_CST
14469 || TREE_INT_CST_LOW (arg2) & ~0xf)
14470 {
14471 error ("argument 3 must be a 4-bit unsigned literal");
14472 return CONST0_RTX (tmode);
14473 }
14474 }
14475 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14476 || icode == CODE_FOR_vsx_xxpermdi_v2di
14477 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14478 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14479 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14480 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14481 || icode == CODE_FOR_vsx_xxpermdi_v4si
14482 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14483 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14484 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14485 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14486 || icode == CODE_FOR_vsx_xxsldwi_v4si
14487 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14488 || icode == CODE_FOR_vsx_xxsldwi_v2di
14489 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14490 {
14491 /* Only allow 2-bit unsigned literals. */
14492 STRIP_NOPS (arg2);
14493 if (TREE_CODE (arg2) != INTEGER_CST
14494 || TREE_INT_CST_LOW (arg2) & ~0x3)
14495 {
14496 error ("argument 3 must be a 2-bit unsigned literal");
14497 return CONST0_RTX (tmode);
14498 }
14499 }
14500 else if (icode == CODE_FOR_vsx_set_v2df
14501 || icode == CODE_FOR_vsx_set_v2di
14502 || icode == CODE_FOR_bcdadd
14503 || icode == CODE_FOR_bcdadd_lt
14504 || icode == CODE_FOR_bcdadd_eq
14505 || icode == CODE_FOR_bcdadd_gt
14506 || icode == CODE_FOR_bcdsub
14507 || icode == CODE_FOR_bcdsub_lt
14508 || icode == CODE_FOR_bcdsub_eq
14509 || icode == CODE_FOR_bcdsub_gt)
14510 {
14511 /* Only allow 1-bit unsigned literals. */
14512 STRIP_NOPS (arg2);
14513 if (TREE_CODE (arg2) != INTEGER_CST
14514 || TREE_INT_CST_LOW (arg2) & ~0x1)
14515 {
14516 error ("argument 3 must be a 1-bit unsigned literal");
14517 return CONST0_RTX (tmode);
14518 }
14519 }
14520 else if (icode == CODE_FOR_dfp_ddedpd_dd
14521 || icode == CODE_FOR_dfp_ddedpd_td)
14522 {
14523 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14524 STRIP_NOPS (arg0);
14525 if (TREE_CODE (arg0) != INTEGER_CST
14526 || TREE_INT_CST_LOW (arg2) & ~0x3)
14527 {
14528 error ("argument 1 must be 0 or 2");
14529 return CONST0_RTX (tmode);
14530 }
14531 }
14532 else if (icode == CODE_FOR_dfp_denbcd_dd
14533 || icode == CODE_FOR_dfp_denbcd_td)
14534 {
14535 /* Only allow 1-bit unsigned literals. */
14536 STRIP_NOPS (arg0);
14537 if (TREE_CODE (arg0) != INTEGER_CST
14538 || TREE_INT_CST_LOW (arg0) & ~0x1)
14539 {
14540 error ("argument 1 must be a 1-bit unsigned literal");
14541 return CONST0_RTX (tmode);
14542 }
14543 }
14544 else if (icode == CODE_FOR_dfp_dscli_dd
14545 || icode == CODE_FOR_dfp_dscli_td
14546 || icode == CODE_FOR_dfp_dscri_dd
14547 || icode == CODE_FOR_dfp_dscri_td)
14548 {
14549 /* Only allow 6-bit unsigned literals. */
14550 STRIP_NOPS (arg1);
14551 if (TREE_CODE (arg1) != INTEGER_CST
14552 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14553 {
14554 error ("argument 2 must be a 6-bit unsigned literal");
14555 return CONST0_RTX (tmode);
14556 }
14557 }
14558 else if (icode == CODE_FOR_crypto_vshasigmaw
14559 || icode == CODE_FOR_crypto_vshasigmad)
14560 {
14561 /* Check whether the 2nd and 3rd arguments are integer constants and in
14562 range and prepare arguments. */
14563 STRIP_NOPS (arg1);
14564 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14565 {
14566 error ("argument 2 must be 0 or 1");
14567 return CONST0_RTX (tmode);
14568 }
14569
14570 STRIP_NOPS (arg2);
14571 if (TREE_CODE (arg2) != INTEGER_CST
14572 || wi::geu_p (wi::to_wide (arg2), 16))
14573 {
14574 error ("argument 3 must be in the range 0..15");
14575 return CONST0_RTX (tmode);
14576 }
14577 }
14578
14579 if (target == 0
14580 || GET_MODE (target) != tmode
14581 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14582 target = gen_reg_rtx (tmode);
14583
14584 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14585 op0 = copy_to_mode_reg (mode0, op0);
14586 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14587 op1 = copy_to_mode_reg (mode1, op1);
14588 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14589 op2 = copy_to_mode_reg (mode2, op2);
14590
14591 pat = GEN_FCN (icode) (target, op0, op1, op2);
14592 if (! pat)
14593 return 0;
14594 emit_insn (pat);
14595
14596 return target;
14597 }
14598
14599
14600 /* Expand the dst builtins. */
14601 static rtx
14602 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14603 bool *expandedp)
14604 {
14605 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14606 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14607 tree arg0, arg1, arg2;
14608 machine_mode mode0, mode1;
14609 rtx pat, op0, op1, op2;
14610 const struct builtin_description *d;
14611 size_t i;
14612
14613 *expandedp = false;
14614
14615 /* Handle DST variants. */
14616 d = bdesc_dst;
14617 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14618 if (d->code == fcode)
14619 {
14620 arg0 = CALL_EXPR_ARG (exp, 0);
14621 arg1 = CALL_EXPR_ARG (exp, 1);
14622 arg2 = CALL_EXPR_ARG (exp, 2);
14623 op0 = expand_normal (arg0);
14624 op1 = expand_normal (arg1);
14625 op2 = expand_normal (arg2);
14626 mode0 = insn_data[d->icode].operand[0].mode;
14627 mode1 = insn_data[d->icode].operand[1].mode;
14628
14629 /* Invalid arguments, bail out before generating bad rtl. */
14630 if (arg0 == error_mark_node
14631 || arg1 == error_mark_node
14632 || arg2 == error_mark_node)
14633 return const0_rtx;
14634
14635 *expandedp = true;
14636 STRIP_NOPS (arg2);
14637 if (TREE_CODE (arg2) != INTEGER_CST
14638 || TREE_INT_CST_LOW (arg2) & ~0x3)
14639 {
14640 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14641 return const0_rtx;
14642 }
14643
14644 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14645 op0 = copy_to_mode_reg (Pmode, op0);
14646 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14647 op1 = copy_to_mode_reg (mode1, op1);
14648
14649 pat = GEN_FCN (d->icode) (op0, op1, op2);
14650 if (pat != 0)
14651 emit_insn (pat);
14652
14653 return NULL_RTX;
14654 }
14655
14656 return NULL_RTX;
14657 }
14658
14659 /* Expand vec_init builtin. */
14660 static rtx
14661 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14662 {
14663 machine_mode tmode = TYPE_MODE (type);
14664 machine_mode inner_mode = GET_MODE_INNER (tmode);
14665 int i, n_elt = GET_MODE_NUNITS (tmode);
14666
14667 gcc_assert (VECTOR_MODE_P (tmode));
14668 gcc_assert (n_elt == call_expr_nargs (exp));
14669
14670 if (!target || !register_operand (target, tmode))
14671 target = gen_reg_rtx (tmode);
14672
14673 /* If we have a vector compromised of a single element, such as V1TImode, do
14674 the initialization directly. */
14675 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14676 {
14677 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14678 emit_move_insn (target, gen_lowpart (tmode, x));
14679 }
14680 else
14681 {
14682 rtvec v = rtvec_alloc (n_elt);
14683
14684 for (i = 0; i < n_elt; ++i)
14685 {
14686 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14687 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14688 }
14689
14690 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14691 }
14692
14693 return target;
14694 }
14695
14696 /* Return the integer constant in ARG. Constrain it to be in the range
14697 of the subparts of VEC_TYPE; issue an error if not. */
14698
14699 static int
14700 get_element_number (tree vec_type, tree arg)
14701 {
14702 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14703
14704 if (!tree_fits_uhwi_p (arg)
14705 || (elt = tree_to_uhwi (arg), elt > max))
14706 {
14707 error ("selector must be an integer constant in the range 0..%wi", max);
14708 return 0;
14709 }
14710
14711 return elt;
14712 }
14713
14714 /* Expand vec_set builtin. */
14715 static rtx
14716 altivec_expand_vec_set_builtin (tree exp)
14717 {
14718 machine_mode tmode, mode1;
14719 tree arg0, arg1, arg2;
14720 int elt;
14721 rtx op0, op1;
14722
14723 arg0 = CALL_EXPR_ARG (exp, 0);
14724 arg1 = CALL_EXPR_ARG (exp, 1);
14725 arg2 = CALL_EXPR_ARG (exp, 2);
14726
14727 tmode = TYPE_MODE (TREE_TYPE (arg0));
14728 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14729 gcc_assert (VECTOR_MODE_P (tmode));
14730
14731 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14732 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14733 elt = get_element_number (TREE_TYPE (arg0), arg2);
14734
14735 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14736 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14737
14738 op0 = force_reg (tmode, op0);
14739 op1 = force_reg (mode1, op1);
14740
14741 rs6000_expand_vector_set (op0, op1, elt);
14742
14743 return op0;
14744 }
14745
14746 /* Expand vec_ext builtin. */
14747 static rtx
14748 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14749 {
14750 machine_mode tmode, mode0;
14751 tree arg0, arg1;
14752 rtx op0;
14753 rtx op1;
14754
14755 arg0 = CALL_EXPR_ARG (exp, 0);
14756 arg1 = CALL_EXPR_ARG (exp, 1);
14757
14758 op0 = expand_normal (arg0);
14759 op1 = expand_normal (arg1);
14760
14761 if (TREE_CODE (arg1) == INTEGER_CST)
14762 {
14763 unsigned HOST_WIDE_INT elt;
14764 unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
14765 unsigned int truncated_selector;
14766 /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
14767 returns low-order bits of INTEGER_CST for modulo indexing. */
14768 elt = TREE_INT_CST_LOW (arg1);
14769 truncated_selector = elt % size;
14770 op1 = GEN_INT (truncated_selector);
14771 }
14772
14773 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14774 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14775 gcc_assert (VECTOR_MODE_P (mode0));
14776
14777 op0 = force_reg (mode0, op0);
14778
14779 if (optimize || !target || !register_operand (target, tmode))
14780 target = gen_reg_rtx (tmode);
14781
14782 rs6000_expand_vector_extract (target, op0, op1);
14783
14784 return target;
14785 }
14786
14787 /* Expand the builtin in EXP and store the result in TARGET. Store
14788 true in *EXPANDEDP if we found a builtin to expand. */
14789 static rtx
14790 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14791 {
14792 const struct builtin_description *d;
14793 size_t i;
14794 enum insn_code icode;
14795 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14796 tree arg0, arg1, arg2;
14797 rtx op0, pat;
14798 machine_mode tmode, mode0;
14799 enum rs6000_builtins fcode
14800 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14801
14802 if (rs6000_overloaded_builtin_p (fcode))
14803 {
14804 *expandedp = true;
14805 error ("unresolved overload for Altivec builtin %qF", fndecl);
14806
14807 /* Given it is invalid, just generate a normal call. */
14808 return expand_call (exp, target, false);
14809 }
14810
14811 target = altivec_expand_dst_builtin (exp, target, expandedp);
14812 if (*expandedp)
14813 return target;
14814
14815 *expandedp = true;
14816
14817 switch (fcode)
14818 {
14819 case ALTIVEC_BUILTIN_STVX_V2DF:
14820 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14821 case ALTIVEC_BUILTIN_STVX_V2DI:
14822 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14823 case ALTIVEC_BUILTIN_STVX_V4SF:
14824 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14825 case ALTIVEC_BUILTIN_STVX:
14826 case ALTIVEC_BUILTIN_STVX_V4SI:
14827 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14828 case ALTIVEC_BUILTIN_STVX_V8HI:
14829 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14830 case ALTIVEC_BUILTIN_STVX_V16QI:
14831 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14832 case ALTIVEC_BUILTIN_STVEBX:
14833 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14834 case ALTIVEC_BUILTIN_STVEHX:
14835 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14836 case ALTIVEC_BUILTIN_STVEWX:
14837 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14838 case ALTIVEC_BUILTIN_STVXL_V2DF:
14839 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14840 case ALTIVEC_BUILTIN_STVXL_V2DI:
14841 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14842 case ALTIVEC_BUILTIN_STVXL_V4SF:
14843 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14844 case ALTIVEC_BUILTIN_STVXL:
14845 case ALTIVEC_BUILTIN_STVXL_V4SI:
14846 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14847 case ALTIVEC_BUILTIN_STVXL_V8HI:
14848 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14849 case ALTIVEC_BUILTIN_STVXL_V16QI:
14850 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14851
14852 case ALTIVEC_BUILTIN_STVLX:
14853 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14854 case ALTIVEC_BUILTIN_STVLXL:
14855 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14856 case ALTIVEC_BUILTIN_STVRX:
14857 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14858 case ALTIVEC_BUILTIN_STVRXL:
14859 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14860
14861 case P9V_BUILTIN_STXVL:
14862 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14863
14864 case P9V_BUILTIN_XST_LEN_R:
14865 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14866
14867 case VSX_BUILTIN_STXVD2X_V1TI:
14868 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14869 case VSX_BUILTIN_STXVD2X_V2DF:
14870 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14871 case VSX_BUILTIN_STXVD2X_V2DI:
14872 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14873 case VSX_BUILTIN_STXVW4X_V4SF:
14874 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14875 case VSX_BUILTIN_STXVW4X_V4SI:
14876 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14877 case VSX_BUILTIN_STXVW4X_V8HI:
14878 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14879 case VSX_BUILTIN_STXVW4X_V16QI:
14880 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14881
14882 /* For the following on big endian, it's ok to use any appropriate
14883 unaligned-supporting store, so use a generic expander. For
14884 little-endian, the exact element-reversing instruction must
14885 be used. */
14886 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14887 {
14888 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14889 : CODE_FOR_vsx_st_elemrev_v1ti);
14890 return altivec_expand_stv_builtin (code, exp);
14891 }
14892 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14893 {
14894 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14895 : CODE_FOR_vsx_st_elemrev_v2df);
14896 return altivec_expand_stv_builtin (code, exp);
14897 }
14898 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14899 {
14900 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14901 : CODE_FOR_vsx_st_elemrev_v2di);
14902 return altivec_expand_stv_builtin (code, exp);
14903 }
14904 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14905 {
14906 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14907 : CODE_FOR_vsx_st_elemrev_v4sf);
14908 return altivec_expand_stv_builtin (code, exp);
14909 }
14910 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14911 {
14912 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14913 : CODE_FOR_vsx_st_elemrev_v4si);
14914 return altivec_expand_stv_builtin (code, exp);
14915 }
14916 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14917 {
14918 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14919 : CODE_FOR_vsx_st_elemrev_v8hi);
14920 return altivec_expand_stv_builtin (code, exp);
14921 }
14922 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14923 {
14924 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14925 : CODE_FOR_vsx_st_elemrev_v16qi);
14926 return altivec_expand_stv_builtin (code, exp);
14927 }
14928
14929 case ALTIVEC_BUILTIN_MFVSCR:
14930 icode = CODE_FOR_altivec_mfvscr;
14931 tmode = insn_data[icode].operand[0].mode;
14932
14933 if (target == 0
14934 || GET_MODE (target) != tmode
14935 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14936 target = gen_reg_rtx (tmode);
14937
14938 pat = GEN_FCN (icode) (target);
14939 if (! pat)
14940 return 0;
14941 emit_insn (pat);
14942 return target;
14943
14944 case ALTIVEC_BUILTIN_MTVSCR:
14945 icode = CODE_FOR_altivec_mtvscr;
14946 arg0 = CALL_EXPR_ARG (exp, 0);
14947 op0 = expand_normal (arg0);
14948 mode0 = insn_data[icode].operand[0].mode;
14949
14950 /* If we got invalid arguments bail out before generating bad rtl. */
14951 if (arg0 == error_mark_node)
14952 return const0_rtx;
14953
14954 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14955 op0 = copy_to_mode_reg (mode0, op0);
14956
14957 pat = GEN_FCN (icode) (op0);
14958 if (pat)
14959 emit_insn (pat);
14960 return NULL_RTX;
14961
14962 case ALTIVEC_BUILTIN_DSSALL:
14963 emit_insn (gen_altivec_dssall ());
14964 return NULL_RTX;
14965
14966 case ALTIVEC_BUILTIN_DSS:
14967 icode = CODE_FOR_altivec_dss;
14968 arg0 = CALL_EXPR_ARG (exp, 0);
14969 STRIP_NOPS (arg0);
14970 op0 = expand_normal (arg0);
14971 mode0 = insn_data[icode].operand[0].mode;
14972
14973 /* If we got invalid arguments bail out before generating bad rtl. */
14974 if (arg0 == error_mark_node)
14975 return const0_rtx;
14976
14977 if (TREE_CODE (arg0) != INTEGER_CST
14978 || TREE_INT_CST_LOW (arg0) & ~0x3)
14979 {
14980 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14981 return const0_rtx;
14982 }
14983
14984 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14985 op0 = copy_to_mode_reg (mode0, op0);
14986
14987 emit_insn (gen_altivec_dss (op0));
14988 return NULL_RTX;
14989
14990 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14991 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14992 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14993 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14994 case VSX_BUILTIN_VEC_INIT_V2DF:
14995 case VSX_BUILTIN_VEC_INIT_V2DI:
14996 case VSX_BUILTIN_VEC_INIT_V1TI:
14997 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14998
14999 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15000 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15001 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15002 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15003 case VSX_BUILTIN_VEC_SET_V2DF:
15004 case VSX_BUILTIN_VEC_SET_V2DI:
15005 case VSX_BUILTIN_VEC_SET_V1TI:
15006 return altivec_expand_vec_set_builtin (exp);
15007
15008 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15009 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15010 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15011 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15012 case VSX_BUILTIN_VEC_EXT_V2DF:
15013 case VSX_BUILTIN_VEC_EXT_V2DI:
15014 case VSX_BUILTIN_VEC_EXT_V1TI:
15015 return altivec_expand_vec_ext_builtin (exp, target);
15016
15017 case P9V_BUILTIN_VEC_EXTRACT4B:
15018 arg1 = CALL_EXPR_ARG (exp, 1);
15019 STRIP_NOPS (arg1);
15020
15021 /* Generate a normal call if it is invalid. */
15022 if (arg1 == error_mark_node)
15023 return expand_call (exp, target, false);
15024
15025 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15026 {
15027 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15028 return expand_call (exp, target, false);
15029 }
15030 break;
15031
15032 case P9V_BUILTIN_VEC_INSERT4B:
15033 arg2 = CALL_EXPR_ARG (exp, 2);
15034 STRIP_NOPS (arg2);
15035
15036 /* Generate a normal call if it is invalid. */
15037 if (arg2 == error_mark_node)
15038 return expand_call (exp, target, false);
15039
15040 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15041 {
15042 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15043 return expand_call (exp, target, false);
15044 }
15045 break;
15046
15047 default:
15048 break;
15049 /* Fall through. */
15050 }
15051
15052 /* Expand abs* operations. */
15053 d = bdesc_abs;
15054 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15055 if (d->code == fcode)
15056 return altivec_expand_abs_builtin (d->icode, exp, target);
15057
15058 /* Expand the AltiVec predicates. */
15059 d = bdesc_altivec_preds;
15060 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15061 if (d->code == fcode)
15062 return altivec_expand_predicate_builtin (d->icode, exp, target);
15063
15064 /* LV* are funky. We initialized them differently. */
15065 switch (fcode)
15066 {
15067 case ALTIVEC_BUILTIN_LVSL:
15068 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15069 exp, target, false);
15070 case ALTIVEC_BUILTIN_LVSR:
15071 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15072 exp, target, false);
15073 case ALTIVEC_BUILTIN_LVEBX:
15074 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15075 exp, target, false);
15076 case ALTIVEC_BUILTIN_LVEHX:
15077 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15078 exp, target, false);
15079 case ALTIVEC_BUILTIN_LVEWX:
15080 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15081 exp, target, false);
15082 case ALTIVEC_BUILTIN_LVXL_V2DF:
15083 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15084 exp, target, false);
15085 case ALTIVEC_BUILTIN_LVXL_V2DI:
15086 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15087 exp, target, false);
15088 case ALTIVEC_BUILTIN_LVXL_V4SF:
15089 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15090 exp, target, false);
15091 case ALTIVEC_BUILTIN_LVXL:
15092 case ALTIVEC_BUILTIN_LVXL_V4SI:
15093 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15094 exp, target, false);
15095 case ALTIVEC_BUILTIN_LVXL_V8HI:
15096 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15097 exp, target, false);
15098 case ALTIVEC_BUILTIN_LVXL_V16QI:
15099 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15100 exp, target, false);
15101 case ALTIVEC_BUILTIN_LVX_V1TI:
15102 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15103 exp, target, false);
15104 case ALTIVEC_BUILTIN_LVX_V2DF:
15105 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15106 exp, target, false);
15107 case ALTIVEC_BUILTIN_LVX_V2DI:
15108 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15109 exp, target, false);
15110 case ALTIVEC_BUILTIN_LVX_V4SF:
15111 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15112 exp, target, false);
15113 case ALTIVEC_BUILTIN_LVX:
15114 case ALTIVEC_BUILTIN_LVX_V4SI:
15115 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15116 exp, target, false);
15117 case ALTIVEC_BUILTIN_LVX_V8HI:
15118 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15119 exp, target, false);
15120 case ALTIVEC_BUILTIN_LVX_V16QI:
15121 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15122 exp, target, false);
15123 case ALTIVEC_BUILTIN_LVLX:
15124 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15125 exp, target, true);
15126 case ALTIVEC_BUILTIN_LVLXL:
15127 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15128 exp, target, true);
15129 case ALTIVEC_BUILTIN_LVRX:
15130 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15131 exp, target, true);
15132 case ALTIVEC_BUILTIN_LVRXL:
15133 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15134 exp, target, true);
15135 case VSX_BUILTIN_LXVD2X_V1TI:
15136 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15137 exp, target, false);
15138 case VSX_BUILTIN_LXVD2X_V2DF:
15139 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15140 exp, target, false);
15141 case VSX_BUILTIN_LXVD2X_V2DI:
15142 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15143 exp, target, false);
15144 case VSX_BUILTIN_LXVW4X_V4SF:
15145 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15146 exp, target, false);
15147 case VSX_BUILTIN_LXVW4X_V4SI:
15148 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15149 exp, target, false);
15150 case VSX_BUILTIN_LXVW4X_V8HI:
15151 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15152 exp, target, false);
15153 case VSX_BUILTIN_LXVW4X_V16QI:
15154 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15155 exp, target, false);
15156 /* For the following on big endian, it's ok to use any appropriate
15157 unaligned-supporting load, so use a generic expander. For
15158 little-endian, the exact element-reversing instruction must
15159 be used. */
15160 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15161 {
15162 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15163 : CODE_FOR_vsx_ld_elemrev_v2df);
15164 return altivec_expand_lv_builtin (code, exp, target, false);
15165 }
15166 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15167 {
15168 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15169 : CODE_FOR_vsx_ld_elemrev_v1ti);
15170 return altivec_expand_lv_builtin (code, exp, target, false);
15171 }
15172 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15173 {
15174 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15175 : CODE_FOR_vsx_ld_elemrev_v2di);
15176 return altivec_expand_lv_builtin (code, exp, target, false);
15177 }
15178 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15179 {
15180 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15181 : CODE_FOR_vsx_ld_elemrev_v4sf);
15182 return altivec_expand_lv_builtin (code, exp, target, false);
15183 }
15184 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15185 {
15186 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15187 : CODE_FOR_vsx_ld_elemrev_v4si);
15188 return altivec_expand_lv_builtin (code, exp, target, false);
15189 }
15190 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15191 {
15192 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15193 : CODE_FOR_vsx_ld_elemrev_v8hi);
15194 return altivec_expand_lv_builtin (code, exp, target, false);
15195 }
15196 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15197 {
15198 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15199 : CODE_FOR_vsx_ld_elemrev_v16qi);
15200 return altivec_expand_lv_builtin (code, exp, target, false);
15201 }
15202 break;
15203 default:
15204 break;
15205 /* Fall through. */
15206 }
15207
15208 *expandedp = false;
15209 return NULL_RTX;
15210 }
15211
15212 /* Check whether a builtin function is supported in this target
15213 configuration. */
15214 bool
15215 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15216 {
15217 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15218 if ((fnmask & rs6000_builtin_mask) != fnmask)
15219 return false;
15220 else
15221 return true;
15222 }
15223
15224 /* Raise an error message for a builtin function that is called without the
15225 appropriate target options being set. */
15226
15227 static void
15228 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15229 {
15230 size_t uns_fncode = (size_t) fncode;
15231 const char *name = rs6000_builtin_info[uns_fncode].name;
15232 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15233
15234 gcc_assert (name != NULL);
15235 if ((fnmask & RS6000_BTM_CELL) != 0)
15236 error ("builtin function %qs is only valid for the cell processor", name);
15237 else if ((fnmask & RS6000_BTM_VSX) != 0)
15238 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15239 else if ((fnmask & RS6000_BTM_HTM) != 0)
15240 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15241 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15242 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15243 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15244 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15245 error ("builtin function %qs requires the %qs and %qs options",
15246 name, "-mhard-dfp", "-mpower8-vector");
15247 else if ((fnmask & RS6000_BTM_DFP) != 0)
15248 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15249 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15250 error ("builtin function %qs requires the %qs option", name,
15251 "-mpower8-vector");
15252 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15253 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15254 error ("builtin function %qs requires the %qs and %qs options",
15255 name, "-mcpu=power9", "-m64");
15256 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15257 error ("builtin function %qs requires the %qs option", name,
15258 "-mcpu=power9");
15259 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15260 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15261 error ("builtin function %qs requires the %qs and %qs options",
15262 name, "-mcpu=power9", "-m64");
15263 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15264 error ("builtin function %qs requires the %qs option", name,
15265 "-mcpu=power9");
15266 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15267 {
15268 if (!TARGET_HARD_FLOAT)
15269 error ("builtin function %qs requires the %qs option", name,
15270 "-mhard-float");
15271 else
15272 error ("builtin function %qs requires the %qs option", name,
15273 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15274 }
15275 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15276 error ("builtin function %qs requires the %qs option", name,
15277 "-mhard-float");
15278 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15279 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15280 name);
15281 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15282 error ("builtin function %qs requires the %qs option", name,
15283 "%<-mfloat128%>");
15284 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15285 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15286 error ("builtin function %qs requires the %qs (or newer), and "
15287 "%qs or %qs options",
15288 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15289 else
15290 error ("builtin function %qs is not supported with the current options",
15291 name);
15292 }
15293
15294 /* Target hook for early folding of built-ins, shamelessly stolen
15295 from ia64.c. */
15296
15297 static tree
15298 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15299 int n_args ATTRIBUTE_UNUSED,
15300 tree *args ATTRIBUTE_UNUSED,
15301 bool ignore ATTRIBUTE_UNUSED)
15302 {
15303 #ifdef SUBTARGET_FOLD_BUILTIN
15304 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15305 #else
15306 return NULL_TREE;
15307 #endif
15308 }
15309
15310 /* Helper function to sort out which built-ins may be valid without having
15311 a LHS. */
15312 static bool
15313 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15314 {
15315 switch (fn_code)
15316 {
15317 case ALTIVEC_BUILTIN_STVX_V16QI:
15318 case ALTIVEC_BUILTIN_STVX_V8HI:
15319 case ALTIVEC_BUILTIN_STVX_V4SI:
15320 case ALTIVEC_BUILTIN_STVX_V4SF:
15321 case ALTIVEC_BUILTIN_STVX_V2DI:
15322 case ALTIVEC_BUILTIN_STVX_V2DF:
15323 case VSX_BUILTIN_STXVW4X_V16QI:
15324 case VSX_BUILTIN_STXVW4X_V8HI:
15325 case VSX_BUILTIN_STXVW4X_V4SF:
15326 case VSX_BUILTIN_STXVW4X_V4SI:
15327 case VSX_BUILTIN_STXVD2X_V2DF:
15328 case VSX_BUILTIN_STXVD2X_V2DI:
15329 return true;
15330 default:
15331 return false;
15332 }
15333 }
15334
15335 /* Helper function to handle the gimple folding of a vector compare
15336 operation. This sets up true/false vectors, and uses the
15337 VEC_COND_EXPR operation.
15338 CODE indicates which comparison is to be made. (EQ, GT, ...).
15339 TYPE indicates the type of the result. */
15340 static tree
15341 fold_build_vec_cmp (tree_code code, tree type,
15342 tree arg0, tree arg1)
15343 {
15344 tree cmp_type = build_same_sized_truth_vector_type (type);
15345 tree zero_vec = build_zero_cst (type);
15346 tree minus_one_vec = build_minus_one_cst (type);
15347 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15348 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15349 }
15350
15351 /* Helper function to handle the in-between steps for the
15352 vector compare built-ins. */
15353 static void
15354 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15355 {
15356 tree arg0 = gimple_call_arg (stmt, 0);
15357 tree arg1 = gimple_call_arg (stmt, 1);
15358 tree lhs = gimple_call_lhs (stmt);
15359 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15360 gimple *g = gimple_build_assign (lhs, cmp);
15361 gimple_set_location (g, gimple_location (stmt));
15362 gsi_replace (gsi, g, true);
15363 }
15364
15365 /* Helper function to map V2DF and V4SF types to their
15366 integral equivalents (V2DI and V4SI). */
15367 tree map_to_integral_tree_type (tree input_tree_type)
15368 {
15369 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15370 return input_tree_type;
15371 else
15372 {
15373 if (types_compatible_p (TREE_TYPE (input_tree_type),
15374 TREE_TYPE (V2DF_type_node)))
15375 return V2DI_type_node;
15376 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15377 TREE_TYPE (V4SF_type_node)))
15378 return V4SI_type_node;
15379 else
15380 gcc_unreachable ();
15381 }
15382 }
15383
15384 /* Helper function to handle the vector merge[hl] built-ins. The
15385 implementation difference between h and l versions for this code are in
15386 the values used when building of the permute vector for high word versus
15387 low word merge. The variance is keyed off the use_high parameter. */
15388 static void
15389 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15390 {
15391 tree arg0 = gimple_call_arg (stmt, 0);
15392 tree arg1 = gimple_call_arg (stmt, 1);
15393 tree lhs = gimple_call_lhs (stmt);
15394 tree lhs_type = TREE_TYPE (lhs);
15395 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15396 int midpoint = n_elts / 2;
15397 int offset = 0;
15398
15399 if (use_high == 1)
15400 offset = midpoint;
15401
15402 /* The permute_type will match the lhs for integral types. For double and
15403 float types, the permute type needs to map to the V2 or V4 type that
15404 matches size. */
15405 tree permute_type;
15406 permute_type = map_to_integral_tree_type (lhs_type);
15407 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15408
15409 for (int i = 0; i < midpoint; i++)
15410 {
15411 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15412 offset + i));
15413 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15414 offset + n_elts + i));
15415 }
15416
15417 tree permute = elts.build ();
15418
15419 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15420 gimple_set_location (g, gimple_location (stmt));
15421 gsi_replace (gsi, g, true);
15422 }
15423
15424 /* Helper function to handle the vector merge[eo] built-ins. */
15425 static void
15426 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15427 {
15428 tree arg0 = gimple_call_arg (stmt, 0);
15429 tree arg1 = gimple_call_arg (stmt, 1);
15430 tree lhs = gimple_call_lhs (stmt);
15431 tree lhs_type = TREE_TYPE (lhs);
15432 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15433
15434 /* The permute_type will match the lhs for integral types. For double and
15435 float types, the permute type needs to map to the V2 or V4 type that
15436 matches size. */
15437 tree permute_type;
15438 permute_type = map_to_integral_tree_type (lhs_type);
15439
15440 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15441
15442 /* Build the permute vector. */
15443 for (int i = 0; i < n_elts / 2; i++)
15444 {
15445 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15446 2*i + use_odd));
15447 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15448 2*i + use_odd + n_elts));
15449 }
15450
15451 tree permute = elts.build ();
15452
15453 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15454 gimple_set_location (g, gimple_location (stmt));
15455 gsi_replace (gsi, g, true);
15456 }
15457
15458 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15459 a constant, use rs6000_fold_builtin.) */
15460
15461 bool
15462 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15463 {
15464 gimple *stmt = gsi_stmt (*gsi);
15465 tree fndecl = gimple_call_fndecl (stmt);
15466 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15467 enum rs6000_builtins fn_code
15468 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15469 tree arg0, arg1, lhs, temp;
15470 enum tree_code bcode;
15471 gimple *g;
15472
15473 size_t uns_fncode = (size_t) fn_code;
15474 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15475 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15476 const char *fn_name2 = (icode != CODE_FOR_nothing)
15477 ? get_insn_name ((int) icode)
15478 : "nothing";
15479
15480 if (TARGET_DEBUG_BUILTIN)
15481 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15482 fn_code, fn_name1, fn_name2);
15483
15484 if (!rs6000_fold_gimple)
15485 return false;
15486
15487 /* Prevent gimple folding for code that does not have a LHS, unless it is
15488 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15489 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15490 return false;
15491
15492 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15493 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15494 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15495 if (!func_valid_p)
15496 return false;
15497
15498 switch (fn_code)
15499 {
15500 /* Flavors of vec_add. We deliberately don't expand
15501 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15502 TImode, resulting in much poorer code generation. */
15503 case ALTIVEC_BUILTIN_VADDUBM:
15504 case ALTIVEC_BUILTIN_VADDUHM:
15505 case ALTIVEC_BUILTIN_VADDUWM:
15506 case P8V_BUILTIN_VADDUDM:
15507 case ALTIVEC_BUILTIN_VADDFP:
15508 case VSX_BUILTIN_XVADDDP:
15509 bcode = PLUS_EXPR;
15510 do_binary:
15511 arg0 = gimple_call_arg (stmt, 0);
15512 arg1 = gimple_call_arg (stmt, 1);
15513 lhs = gimple_call_lhs (stmt);
15514 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15515 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15516 {
15517 /* Ensure the binary operation is performed in a type
15518 that wraps if it is integral type. */
15519 gimple_seq stmts = NULL;
15520 tree type = unsigned_type_for (TREE_TYPE (lhs));
15521 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15522 type, arg0);
15523 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15524 type, arg1);
15525 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15526 type, uarg0, uarg1);
15527 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15528 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15529 build1 (VIEW_CONVERT_EXPR,
15530 TREE_TYPE (lhs), res));
15531 gsi_replace (gsi, g, true);
15532 return true;
15533 }
15534 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15535 gimple_set_location (g, gimple_location (stmt));
15536 gsi_replace (gsi, g, true);
15537 return true;
15538 /* Flavors of vec_sub. We deliberately don't expand
15539 P8V_BUILTIN_VSUBUQM. */
15540 case ALTIVEC_BUILTIN_VSUBUBM:
15541 case ALTIVEC_BUILTIN_VSUBUHM:
15542 case ALTIVEC_BUILTIN_VSUBUWM:
15543 case P8V_BUILTIN_VSUBUDM:
15544 case ALTIVEC_BUILTIN_VSUBFP:
15545 case VSX_BUILTIN_XVSUBDP:
15546 bcode = MINUS_EXPR;
15547 goto do_binary;
15548 case VSX_BUILTIN_XVMULSP:
15549 case VSX_BUILTIN_XVMULDP:
15550 arg0 = gimple_call_arg (stmt, 0);
15551 arg1 = gimple_call_arg (stmt, 1);
15552 lhs = gimple_call_lhs (stmt);
15553 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15554 gimple_set_location (g, gimple_location (stmt));
15555 gsi_replace (gsi, g, true);
15556 return true;
15557 /* Even element flavors of vec_mul (signed). */
15558 case ALTIVEC_BUILTIN_VMULESB:
15559 case ALTIVEC_BUILTIN_VMULESH:
15560 case P8V_BUILTIN_VMULESW:
15561 /* Even element flavors of vec_mul (unsigned). */
15562 case ALTIVEC_BUILTIN_VMULEUB:
15563 case ALTIVEC_BUILTIN_VMULEUH:
15564 case P8V_BUILTIN_VMULEUW:
15565 arg0 = gimple_call_arg (stmt, 0);
15566 arg1 = gimple_call_arg (stmt, 1);
15567 lhs = gimple_call_lhs (stmt);
15568 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15569 gimple_set_location (g, gimple_location (stmt));
15570 gsi_replace (gsi, g, true);
15571 return true;
15572 /* Odd element flavors of vec_mul (signed). */
15573 case ALTIVEC_BUILTIN_VMULOSB:
15574 case ALTIVEC_BUILTIN_VMULOSH:
15575 case P8V_BUILTIN_VMULOSW:
15576 /* Odd element flavors of vec_mul (unsigned). */
15577 case ALTIVEC_BUILTIN_VMULOUB:
15578 case ALTIVEC_BUILTIN_VMULOUH:
15579 case P8V_BUILTIN_VMULOUW:
15580 arg0 = gimple_call_arg (stmt, 0);
15581 arg1 = gimple_call_arg (stmt, 1);
15582 lhs = gimple_call_lhs (stmt);
15583 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15584 gimple_set_location (g, gimple_location (stmt));
15585 gsi_replace (gsi, g, true);
15586 return true;
15587 /* Flavors of vec_div (Integer). */
15588 case VSX_BUILTIN_DIV_V2DI:
15589 case VSX_BUILTIN_UDIV_V2DI:
15590 arg0 = gimple_call_arg (stmt, 0);
15591 arg1 = gimple_call_arg (stmt, 1);
15592 lhs = gimple_call_lhs (stmt);
15593 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15594 gimple_set_location (g, gimple_location (stmt));
15595 gsi_replace (gsi, g, true);
15596 return true;
15597 /* Flavors of vec_div (Float). */
15598 case VSX_BUILTIN_XVDIVSP:
15599 case VSX_BUILTIN_XVDIVDP:
15600 arg0 = gimple_call_arg (stmt, 0);
15601 arg1 = gimple_call_arg (stmt, 1);
15602 lhs = gimple_call_lhs (stmt);
15603 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15604 gimple_set_location (g, gimple_location (stmt));
15605 gsi_replace (gsi, g, true);
15606 return true;
15607 /* Flavors of vec_and. */
15608 case ALTIVEC_BUILTIN_VAND:
15609 arg0 = gimple_call_arg (stmt, 0);
15610 arg1 = gimple_call_arg (stmt, 1);
15611 lhs = gimple_call_lhs (stmt);
15612 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15613 gimple_set_location (g, gimple_location (stmt));
15614 gsi_replace (gsi, g, true);
15615 return true;
15616 /* Flavors of vec_andc. */
15617 case ALTIVEC_BUILTIN_VANDC:
15618 arg0 = gimple_call_arg (stmt, 0);
15619 arg1 = gimple_call_arg (stmt, 1);
15620 lhs = gimple_call_lhs (stmt);
15621 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15622 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15623 gimple_set_location (g, gimple_location (stmt));
15624 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15625 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15626 gimple_set_location (g, gimple_location (stmt));
15627 gsi_replace (gsi, g, true);
15628 return true;
15629 /* Flavors of vec_nand. */
15630 case P8V_BUILTIN_VEC_NAND:
15631 case P8V_BUILTIN_NAND_V16QI:
15632 case P8V_BUILTIN_NAND_V8HI:
15633 case P8V_BUILTIN_NAND_V4SI:
15634 case P8V_BUILTIN_NAND_V4SF:
15635 case P8V_BUILTIN_NAND_V2DF:
15636 case P8V_BUILTIN_NAND_V2DI:
15637 arg0 = gimple_call_arg (stmt, 0);
15638 arg1 = gimple_call_arg (stmt, 1);
15639 lhs = gimple_call_lhs (stmt);
15640 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15641 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15642 gimple_set_location (g, gimple_location (stmt));
15643 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15644 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15645 gimple_set_location (g, gimple_location (stmt));
15646 gsi_replace (gsi, g, true);
15647 return true;
15648 /* Flavors of vec_or. */
15649 case ALTIVEC_BUILTIN_VOR:
15650 arg0 = gimple_call_arg (stmt, 0);
15651 arg1 = gimple_call_arg (stmt, 1);
15652 lhs = gimple_call_lhs (stmt);
15653 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15654 gimple_set_location (g, gimple_location (stmt));
15655 gsi_replace (gsi, g, true);
15656 return true;
15657 /* flavors of vec_orc. */
15658 case P8V_BUILTIN_ORC_V16QI:
15659 case P8V_BUILTIN_ORC_V8HI:
15660 case P8V_BUILTIN_ORC_V4SI:
15661 case P8V_BUILTIN_ORC_V4SF:
15662 case P8V_BUILTIN_ORC_V2DF:
15663 case P8V_BUILTIN_ORC_V2DI:
15664 arg0 = gimple_call_arg (stmt, 0);
15665 arg1 = gimple_call_arg (stmt, 1);
15666 lhs = gimple_call_lhs (stmt);
15667 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15668 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15669 gimple_set_location (g, gimple_location (stmt));
15670 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15671 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15672 gimple_set_location (g, gimple_location (stmt));
15673 gsi_replace (gsi, g, true);
15674 return true;
15675 /* Flavors of vec_xor. */
15676 case ALTIVEC_BUILTIN_VXOR:
15677 arg0 = gimple_call_arg (stmt, 0);
15678 arg1 = gimple_call_arg (stmt, 1);
15679 lhs = gimple_call_lhs (stmt);
15680 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15681 gimple_set_location (g, gimple_location (stmt));
15682 gsi_replace (gsi, g, true);
15683 return true;
15684 /* Flavors of vec_nor. */
15685 case ALTIVEC_BUILTIN_VNOR:
15686 arg0 = gimple_call_arg (stmt, 0);
15687 arg1 = gimple_call_arg (stmt, 1);
15688 lhs = gimple_call_lhs (stmt);
15689 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15690 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15691 gimple_set_location (g, gimple_location (stmt));
15692 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15693 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15694 gimple_set_location (g, gimple_location (stmt));
15695 gsi_replace (gsi, g, true);
15696 return true;
15697 /* flavors of vec_abs. */
15698 case ALTIVEC_BUILTIN_ABS_V16QI:
15699 case ALTIVEC_BUILTIN_ABS_V8HI:
15700 case ALTIVEC_BUILTIN_ABS_V4SI:
15701 case ALTIVEC_BUILTIN_ABS_V4SF:
15702 case P8V_BUILTIN_ABS_V2DI:
15703 case VSX_BUILTIN_XVABSDP:
15704 arg0 = gimple_call_arg (stmt, 0);
15705 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15706 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15707 return false;
15708 lhs = gimple_call_lhs (stmt);
15709 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15710 gimple_set_location (g, gimple_location (stmt));
15711 gsi_replace (gsi, g, true);
15712 return true;
15713 /* flavors of vec_min. */
15714 case VSX_BUILTIN_XVMINDP:
15715 case P8V_BUILTIN_VMINSD:
15716 case P8V_BUILTIN_VMINUD:
15717 case ALTIVEC_BUILTIN_VMINSB:
15718 case ALTIVEC_BUILTIN_VMINSH:
15719 case ALTIVEC_BUILTIN_VMINSW:
15720 case ALTIVEC_BUILTIN_VMINUB:
15721 case ALTIVEC_BUILTIN_VMINUH:
15722 case ALTIVEC_BUILTIN_VMINUW:
15723 case ALTIVEC_BUILTIN_VMINFP:
15724 arg0 = gimple_call_arg (stmt, 0);
15725 arg1 = gimple_call_arg (stmt, 1);
15726 lhs = gimple_call_lhs (stmt);
15727 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15728 gimple_set_location (g, gimple_location (stmt));
15729 gsi_replace (gsi, g, true);
15730 return true;
15731 /* flavors of vec_max. */
15732 case VSX_BUILTIN_XVMAXDP:
15733 case P8V_BUILTIN_VMAXSD:
15734 case P8V_BUILTIN_VMAXUD:
15735 case ALTIVEC_BUILTIN_VMAXSB:
15736 case ALTIVEC_BUILTIN_VMAXSH:
15737 case ALTIVEC_BUILTIN_VMAXSW:
15738 case ALTIVEC_BUILTIN_VMAXUB:
15739 case ALTIVEC_BUILTIN_VMAXUH:
15740 case ALTIVEC_BUILTIN_VMAXUW:
15741 case ALTIVEC_BUILTIN_VMAXFP:
15742 arg0 = gimple_call_arg (stmt, 0);
15743 arg1 = gimple_call_arg (stmt, 1);
15744 lhs = gimple_call_lhs (stmt);
15745 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15746 gimple_set_location (g, gimple_location (stmt));
15747 gsi_replace (gsi, g, true);
15748 return true;
15749 /* Flavors of vec_eqv. */
15750 case P8V_BUILTIN_EQV_V16QI:
15751 case P8V_BUILTIN_EQV_V8HI:
15752 case P8V_BUILTIN_EQV_V4SI:
15753 case P8V_BUILTIN_EQV_V4SF:
15754 case P8V_BUILTIN_EQV_V2DF:
15755 case P8V_BUILTIN_EQV_V2DI:
15756 arg0 = gimple_call_arg (stmt, 0);
15757 arg1 = gimple_call_arg (stmt, 1);
15758 lhs = gimple_call_lhs (stmt);
15759 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15760 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15761 gimple_set_location (g, gimple_location (stmt));
15762 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15763 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15764 gimple_set_location (g, gimple_location (stmt));
15765 gsi_replace (gsi, g, true);
15766 return true;
15767 /* Flavors of vec_rotate_left. */
15768 case ALTIVEC_BUILTIN_VRLB:
15769 case ALTIVEC_BUILTIN_VRLH:
15770 case ALTIVEC_BUILTIN_VRLW:
15771 case P8V_BUILTIN_VRLD:
15772 arg0 = gimple_call_arg (stmt, 0);
15773 arg1 = gimple_call_arg (stmt, 1);
15774 lhs = gimple_call_lhs (stmt);
15775 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15776 gimple_set_location (g, gimple_location (stmt));
15777 gsi_replace (gsi, g, true);
15778 return true;
15779 /* Flavors of vector shift right algebraic.
15780 vec_sra{b,h,w} -> vsra{b,h,w}. */
15781 case ALTIVEC_BUILTIN_VSRAB:
15782 case ALTIVEC_BUILTIN_VSRAH:
15783 case ALTIVEC_BUILTIN_VSRAW:
15784 case P8V_BUILTIN_VSRAD:
15785 {
15786 arg0 = gimple_call_arg (stmt, 0);
15787 arg1 = gimple_call_arg (stmt, 1);
15788 lhs = gimple_call_lhs (stmt);
15789 tree arg1_type = TREE_TYPE (arg1);
15790 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15791 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15792 location_t loc = gimple_location (stmt);
15793 /* Force arg1 into the range valid matching the arg0 type. */
15794 /* Build a vector consisting of the max valid bit-size values. */
15795 int n_elts = VECTOR_CST_NELTS (arg1);
15796 tree element_size = build_int_cst (unsigned_element_type,
15797 128 / n_elts);
15798 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15799 for (int i = 0; i < n_elts; i++)
15800 elts.safe_push (element_size);
15801 tree modulo_tree = elts.build ();
15802 /* Modulo the provided shift value against that vector. */
15803 gimple_seq stmts = NULL;
15804 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15805 unsigned_arg1_type, arg1);
15806 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15807 unsigned_arg1_type, unsigned_arg1,
15808 modulo_tree);
15809 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15810 /* And finally, do the shift. */
15811 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15812 gimple_set_location (g, loc);
15813 gsi_replace (gsi, g, true);
15814 return true;
15815 }
15816 /* Flavors of vector shift left.
15817 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15818 case ALTIVEC_BUILTIN_VSLB:
15819 case ALTIVEC_BUILTIN_VSLH:
15820 case ALTIVEC_BUILTIN_VSLW:
15821 case P8V_BUILTIN_VSLD:
15822 {
15823 location_t loc;
15824 gimple_seq stmts = NULL;
15825 arg0 = gimple_call_arg (stmt, 0);
15826 tree arg0_type = TREE_TYPE (arg0);
15827 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15828 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15829 return false;
15830 arg1 = gimple_call_arg (stmt, 1);
15831 tree arg1_type = TREE_TYPE (arg1);
15832 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15833 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15834 loc = gimple_location (stmt);
15835 lhs = gimple_call_lhs (stmt);
15836 /* Force arg1 into the range valid matching the arg0 type. */
15837 /* Build a vector consisting of the max valid bit-size values. */
15838 int n_elts = VECTOR_CST_NELTS (arg1);
15839 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15840 * BITS_PER_UNIT;
15841 tree element_size = build_int_cst (unsigned_element_type,
15842 tree_size_in_bits / n_elts);
15843 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15844 for (int i = 0; i < n_elts; i++)
15845 elts.safe_push (element_size);
15846 tree modulo_tree = elts.build ();
15847 /* Modulo the provided shift value against that vector. */
15848 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15849 unsigned_arg1_type, arg1);
15850 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15851 unsigned_arg1_type, unsigned_arg1,
15852 modulo_tree);
15853 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15854 /* And finally, do the shift. */
15855 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15856 gimple_set_location (g, gimple_location (stmt));
15857 gsi_replace (gsi, g, true);
15858 return true;
15859 }
15860 /* Flavors of vector shift right. */
15861 case ALTIVEC_BUILTIN_VSRB:
15862 case ALTIVEC_BUILTIN_VSRH:
15863 case ALTIVEC_BUILTIN_VSRW:
15864 case P8V_BUILTIN_VSRD:
15865 {
15866 arg0 = gimple_call_arg (stmt, 0);
15867 arg1 = gimple_call_arg (stmt, 1);
15868 lhs = gimple_call_lhs (stmt);
15869 tree arg1_type = TREE_TYPE (arg1);
15870 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15871 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15872 location_t loc = gimple_location (stmt);
15873 gimple_seq stmts = NULL;
15874 /* Convert arg0 to unsigned. */
15875 tree arg0_unsigned
15876 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15877 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15878 /* Force arg1 into the range valid matching the arg0 type. */
15879 /* Build a vector consisting of the max valid bit-size values. */
15880 int n_elts = VECTOR_CST_NELTS (arg1);
15881 tree element_size = build_int_cst (unsigned_element_type,
15882 128 / n_elts);
15883 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15884 for (int i = 0; i < n_elts; i++)
15885 elts.safe_push (element_size);
15886 tree modulo_tree = elts.build ();
15887 /* Modulo the provided shift value against that vector. */
15888 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15889 unsigned_arg1_type, arg1);
15890 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15891 unsigned_arg1_type, unsigned_arg1,
15892 modulo_tree);
15893 /* Do the shift. */
15894 tree res
15895 = gimple_build (&stmts, RSHIFT_EXPR,
15896 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15897 /* Convert result back to the lhs type. */
15898 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15899 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15900 update_call_from_tree (gsi, res);
15901 return true;
15902 }
15903 /* Vector loads. */
15904 case ALTIVEC_BUILTIN_LVX_V16QI:
15905 case ALTIVEC_BUILTIN_LVX_V8HI:
15906 case ALTIVEC_BUILTIN_LVX_V4SI:
15907 case ALTIVEC_BUILTIN_LVX_V4SF:
15908 case ALTIVEC_BUILTIN_LVX_V2DI:
15909 case ALTIVEC_BUILTIN_LVX_V2DF:
15910 case ALTIVEC_BUILTIN_LVX_V1TI:
15911 {
15912 arg0 = gimple_call_arg (stmt, 0); // offset
15913 arg1 = gimple_call_arg (stmt, 1); // address
15914 lhs = gimple_call_lhs (stmt);
15915 location_t loc = gimple_location (stmt);
15916 /* Since arg1 may be cast to a different type, just use ptr_type_node
15917 here instead of trying to enforce TBAA on pointer types. */
15918 tree arg1_type = ptr_type_node;
15919 tree lhs_type = TREE_TYPE (lhs);
15920 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15921 the tree using the value from arg0. The resulting type will match
15922 the type of arg1. */
15923 gimple_seq stmts = NULL;
15924 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15925 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15926 arg1_type, arg1, temp_offset);
15927 /* Mask off any lower bits from the address. */
15928 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15929 arg1_type, temp_addr,
15930 build_int_cst (arg1_type, -16));
15931 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15932 if (!is_gimple_mem_ref_addr (aligned_addr))
15933 {
15934 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15935 gimple *g = gimple_build_assign (t, aligned_addr);
15936 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15937 aligned_addr = t;
15938 }
15939 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15940 take an offset, but since we've already incorporated the offset
15941 above, here we just pass in a zero. */
15942 gimple *g
15943 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15944 build_int_cst (arg1_type, 0)));
15945 gimple_set_location (g, loc);
15946 gsi_replace (gsi, g, true);
15947 return true;
15948 }
15949 /* Vector stores. */
15950 case ALTIVEC_BUILTIN_STVX_V16QI:
15951 case ALTIVEC_BUILTIN_STVX_V8HI:
15952 case ALTIVEC_BUILTIN_STVX_V4SI:
15953 case ALTIVEC_BUILTIN_STVX_V4SF:
15954 case ALTIVEC_BUILTIN_STVX_V2DI:
15955 case ALTIVEC_BUILTIN_STVX_V2DF:
15956 {
15957 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15958 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15959 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15960 location_t loc = gimple_location (stmt);
15961 tree arg0_type = TREE_TYPE (arg0);
15962 /* Use ptr_type_node (no TBAA) for the arg2_type.
15963 FIXME: (Richard) "A proper fix would be to transition this type as
15964 seen from the frontend to GIMPLE, for example in a similar way we
15965 do for MEM_REFs by piggy-backing that on an extra argument, a
15966 constant zero pointer of the alias pointer type to use (which would
15967 also serve as a type indicator of the store itself). I'd use a
15968 target specific internal function for this (not sure if we can have
15969 those target specific, but I guess if it's folded away then that's
15970 fine) and get away with the overload set." */
15971 tree arg2_type = ptr_type_node;
15972 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15973 the tree using the value from arg0. The resulting type will match
15974 the type of arg2. */
15975 gimple_seq stmts = NULL;
15976 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15977 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15978 arg2_type, arg2, temp_offset);
15979 /* Mask off any lower bits from the address. */
15980 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15981 arg2_type, temp_addr,
15982 build_int_cst (arg2_type, -16));
15983 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15984 if (!is_gimple_mem_ref_addr (aligned_addr))
15985 {
15986 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15987 gimple *g = gimple_build_assign (t, aligned_addr);
15988 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15989 aligned_addr = t;
15990 }
15991 /* The desired gimple result should be similar to:
15992 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15993 gimple *g
15994 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15995 build_int_cst (arg2_type, 0)), arg0);
15996 gimple_set_location (g, loc);
15997 gsi_replace (gsi, g, true);
15998 return true;
15999 }
16000
16001 /* unaligned Vector loads. */
16002 case VSX_BUILTIN_LXVW4X_V16QI:
16003 case VSX_BUILTIN_LXVW4X_V8HI:
16004 case VSX_BUILTIN_LXVW4X_V4SF:
16005 case VSX_BUILTIN_LXVW4X_V4SI:
16006 case VSX_BUILTIN_LXVD2X_V2DF:
16007 case VSX_BUILTIN_LXVD2X_V2DI:
16008 {
16009 arg0 = gimple_call_arg (stmt, 0); // offset
16010 arg1 = gimple_call_arg (stmt, 1); // address
16011 lhs = gimple_call_lhs (stmt);
16012 location_t loc = gimple_location (stmt);
16013 /* Since arg1 may be cast to a different type, just use ptr_type_node
16014 here instead of trying to enforce TBAA on pointer types. */
16015 tree arg1_type = ptr_type_node;
16016 tree lhs_type = TREE_TYPE (lhs);
16017 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
16018 required alignment (power) is 4 bytes regardless of data type. */
16019 tree align_ltype = build_aligned_type (lhs_type, 4);
16020 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16021 the tree using the value from arg0. The resulting type will match
16022 the type of arg1. */
16023 gimple_seq stmts = NULL;
16024 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
16025 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16026 arg1_type, arg1, temp_offset);
16027 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16028 if (!is_gimple_mem_ref_addr (temp_addr))
16029 {
16030 tree t = make_ssa_name (TREE_TYPE (temp_addr));
16031 gimple *g = gimple_build_assign (t, temp_addr);
16032 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16033 temp_addr = t;
16034 }
16035 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16036 take an offset, but since we've already incorporated the offset
16037 above, here we just pass in a zero. */
16038 gimple *g;
16039 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
16040 build_int_cst (arg1_type, 0)));
16041 gimple_set_location (g, loc);
16042 gsi_replace (gsi, g, true);
16043 return true;
16044 }
16045
16046 /* unaligned Vector stores. */
16047 case VSX_BUILTIN_STXVW4X_V16QI:
16048 case VSX_BUILTIN_STXVW4X_V8HI:
16049 case VSX_BUILTIN_STXVW4X_V4SF:
16050 case VSX_BUILTIN_STXVW4X_V4SI:
16051 case VSX_BUILTIN_STXVD2X_V2DF:
16052 case VSX_BUILTIN_STXVD2X_V2DI:
16053 {
16054 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
16055 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
16056 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
16057 location_t loc = gimple_location (stmt);
16058 tree arg0_type = TREE_TYPE (arg0);
16059 /* Use ptr_type_node (no TBAA) for the arg2_type. */
16060 tree arg2_type = ptr_type_node;
16061 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
16062 required alignment (power) is 4 bytes regardless of data type. */
16063 tree align_stype = build_aligned_type (arg0_type, 4);
16064 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16065 the tree using the value from arg1. */
16066 gimple_seq stmts = NULL;
16067 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
16068 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16069 arg2_type, arg2, temp_offset);
16070 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16071 if (!is_gimple_mem_ref_addr (temp_addr))
16072 {
16073 tree t = make_ssa_name (TREE_TYPE (temp_addr));
16074 gimple *g = gimple_build_assign (t, temp_addr);
16075 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16076 temp_addr = t;
16077 }
16078 gimple *g;
16079 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
16080 build_int_cst (arg2_type, 0)), arg0);
16081 gimple_set_location (g, loc);
16082 gsi_replace (gsi, g, true);
16083 return true;
16084 }
16085
16086 /* Vector Fused multiply-add (fma). */
16087 case ALTIVEC_BUILTIN_VMADDFP:
16088 case VSX_BUILTIN_XVMADDDP:
16089 case ALTIVEC_BUILTIN_VMLADDUHM:
16090 {
16091 arg0 = gimple_call_arg (stmt, 0);
16092 arg1 = gimple_call_arg (stmt, 1);
16093 tree arg2 = gimple_call_arg (stmt, 2);
16094 lhs = gimple_call_lhs (stmt);
16095 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
16096 gimple_call_set_lhs (g, lhs);
16097 gimple_call_set_nothrow (g, true);
16098 gimple_set_location (g, gimple_location (stmt));
16099 gsi_replace (gsi, g, true);
16100 return true;
16101 }
16102
16103 /* Vector compares; EQ, NE, GE, GT, LE. */
16104 case ALTIVEC_BUILTIN_VCMPEQUB:
16105 case ALTIVEC_BUILTIN_VCMPEQUH:
16106 case ALTIVEC_BUILTIN_VCMPEQUW:
16107 case P8V_BUILTIN_VCMPEQUD:
16108 fold_compare_helper (gsi, EQ_EXPR, stmt);
16109 return true;
16110
16111 case P9V_BUILTIN_CMPNEB:
16112 case P9V_BUILTIN_CMPNEH:
16113 case P9V_BUILTIN_CMPNEW:
16114 fold_compare_helper (gsi, NE_EXPR, stmt);
16115 return true;
16116
16117 case VSX_BUILTIN_CMPGE_16QI:
16118 case VSX_BUILTIN_CMPGE_U16QI:
16119 case VSX_BUILTIN_CMPGE_8HI:
16120 case VSX_BUILTIN_CMPGE_U8HI:
16121 case VSX_BUILTIN_CMPGE_4SI:
16122 case VSX_BUILTIN_CMPGE_U4SI:
16123 case VSX_BUILTIN_CMPGE_2DI:
16124 case VSX_BUILTIN_CMPGE_U2DI:
16125 fold_compare_helper (gsi, GE_EXPR, stmt);
16126 return true;
16127
16128 case ALTIVEC_BUILTIN_VCMPGTSB:
16129 case ALTIVEC_BUILTIN_VCMPGTUB:
16130 case ALTIVEC_BUILTIN_VCMPGTSH:
16131 case ALTIVEC_BUILTIN_VCMPGTUH:
16132 case ALTIVEC_BUILTIN_VCMPGTSW:
16133 case ALTIVEC_BUILTIN_VCMPGTUW:
16134 case P8V_BUILTIN_VCMPGTUD:
16135 case P8V_BUILTIN_VCMPGTSD:
16136 fold_compare_helper (gsi, GT_EXPR, stmt);
16137 return true;
16138
16139 case VSX_BUILTIN_CMPLE_16QI:
16140 case VSX_BUILTIN_CMPLE_U16QI:
16141 case VSX_BUILTIN_CMPLE_8HI:
16142 case VSX_BUILTIN_CMPLE_U8HI:
16143 case VSX_BUILTIN_CMPLE_4SI:
16144 case VSX_BUILTIN_CMPLE_U4SI:
16145 case VSX_BUILTIN_CMPLE_2DI:
16146 case VSX_BUILTIN_CMPLE_U2DI:
16147 fold_compare_helper (gsi, LE_EXPR, stmt);
16148 return true;
16149
16150 /* flavors of vec_splat_[us]{8,16,32}. */
16151 case ALTIVEC_BUILTIN_VSPLTISB:
16152 case ALTIVEC_BUILTIN_VSPLTISH:
16153 case ALTIVEC_BUILTIN_VSPLTISW:
16154 {
16155 arg0 = gimple_call_arg (stmt, 0);
16156 lhs = gimple_call_lhs (stmt);
16157
16158 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
16159 5-bit signed constant in range -16 to +15. */
16160 if (TREE_CODE (arg0) != INTEGER_CST
16161 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
16162 return false;
16163 gimple_seq stmts = NULL;
16164 location_t loc = gimple_location (stmt);
16165 tree splat_value = gimple_convert (&stmts, loc,
16166 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16167 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16168 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16169 g = gimple_build_assign (lhs, splat_tree);
16170 gimple_set_location (g, gimple_location (stmt));
16171 gsi_replace (gsi, g, true);
16172 return true;
16173 }
16174
16175 /* Flavors of vec_splat. */
16176 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
16177 case ALTIVEC_BUILTIN_VSPLTB:
16178 case ALTIVEC_BUILTIN_VSPLTH:
16179 case ALTIVEC_BUILTIN_VSPLTW:
16180 case VSX_BUILTIN_XXSPLTD_V2DI:
16181 case VSX_BUILTIN_XXSPLTD_V2DF:
16182 {
16183 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
16184 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
16185 /* Only fold the vec_splat_*() if arg1 is both a constant value and
16186 is a valid index into the arg0 vector. */
16187 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
16188 if (TREE_CODE (arg1) != INTEGER_CST
16189 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
16190 return false;
16191 lhs = gimple_call_lhs (stmt);
16192 tree lhs_type = TREE_TYPE (lhs);
16193 tree arg0_type = TREE_TYPE (arg0);
16194 tree splat;
16195 if (TREE_CODE (arg0) == VECTOR_CST)
16196 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
16197 else
16198 {
16199 /* Determine (in bits) the length and start location of the
16200 splat value for a call to the tree_vec_extract helper. */
16201 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
16202 * BITS_PER_UNIT / n_elts;
16203 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16204 tree len = build_int_cst (bitsizetype, splat_elem_size);
16205 tree start = build_int_cst (bitsizetype, splat_start_bit);
16206 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16207 len, start);
16208 }
16209 /* And finally, build the new vector. */
16210 tree splat_tree = build_vector_from_val (lhs_type, splat);
16211 g = gimple_build_assign (lhs, splat_tree);
16212 gimple_set_location (g, gimple_location (stmt));
16213 gsi_replace (gsi, g, true);
16214 return true;
16215 }
16216
16217 /* vec_mergel (integrals). */
16218 case ALTIVEC_BUILTIN_VMRGLH:
16219 case ALTIVEC_BUILTIN_VMRGLW:
16220 case VSX_BUILTIN_XXMRGLW_4SI:
16221 case ALTIVEC_BUILTIN_VMRGLB:
16222 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16223 case VSX_BUILTIN_XXMRGLW_4SF:
16224 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16225 fold_mergehl_helper (gsi, stmt, 1);
16226 return true;
16227 /* vec_mergeh (integrals). */
16228 case ALTIVEC_BUILTIN_VMRGHH:
16229 case ALTIVEC_BUILTIN_VMRGHW:
16230 case VSX_BUILTIN_XXMRGHW_4SI:
16231 case ALTIVEC_BUILTIN_VMRGHB:
16232 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16233 case VSX_BUILTIN_XXMRGHW_4SF:
16234 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16235 fold_mergehl_helper (gsi, stmt, 0);
16236 return true;
16237
16238 /* Flavors of vec_mergee. */
16239 case P8V_BUILTIN_VMRGEW_V4SI:
16240 case P8V_BUILTIN_VMRGEW_V2DI:
16241 case P8V_BUILTIN_VMRGEW_V4SF:
16242 case P8V_BUILTIN_VMRGEW_V2DF:
16243 fold_mergeeo_helper (gsi, stmt, 0);
16244 return true;
16245 /* Flavors of vec_mergeo. */
16246 case P8V_BUILTIN_VMRGOW_V4SI:
16247 case P8V_BUILTIN_VMRGOW_V2DI:
16248 case P8V_BUILTIN_VMRGOW_V4SF:
16249 case P8V_BUILTIN_VMRGOW_V2DF:
16250 fold_mergeeo_helper (gsi, stmt, 1);
16251 return true;
16252
16253 /* d = vec_pack (a, b) */
16254 case P8V_BUILTIN_VPKUDUM:
16255 case ALTIVEC_BUILTIN_VPKUHUM:
16256 case ALTIVEC_BUILTIN_VPKUWUM:
16257 {
16258 arg0 = gimple_call_arg (stmt, 0);
16259 arg1 = gimple_call_arg (stmt, 1);
16260 lhs = gimple_call_lhs (stmt);
16261 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16262 gimple_set_location (g, gimple_location (stmt));
16263 gsi_replace (gsi, g, true);
16264 return true;
16265 }
16266
16267 /* d = vec_unpackh (a) */
16268 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16269 in this code is sensitive to endian-ness, and needs to be inverted to
16270 handle both LE and BE targets. */
16271 case ALTIVEC_BUILTIN_VUPKHSB:
16272 case ALTIVEC_BUILTIN_VUPKHSH:
16273 case P8V_BUILTIN_VUPKHSW:
16274 {
16275 arg0 = gimple_call_arg (stmt, 0);
16276 lhs = gimple_call_lhs (stmt);
16277 if (BYTES_BIG_ENDIAN)
16278 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16279 else
16280 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16281 gimple_set_location (g, gimple_location (stmt));
16282 gsi_replace (gsi, g, true);
16283 return true;
16284 }
16285 /* d = vec_unpackl (a) */
16286 case ALTIVEC_BUILTIN_VUPKLSB:
16287 case ALTIVEC_BUILTIN_VUPKLSH:
16288 case P8V_BUILTIN_VUPKLSW:
16289 {
16290 arg0 = gimple_call_arg (stmt, 0);
16291 lhs = gimple_call_lhs (stmt);
16292 if (BYTES_BIG_ENDIAN)
16293 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16294 else
16295 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16296 gimple_set_location (g, gimple_location (stmt));
16297 gsi_replace (gsi, g, true);
16298 return true;
16299 }
16300 /* There is no gimple type corresponding with pixel, so just return. */
16301 case ALTIVEC_BUILTIN_VUPKHPX:
16302 case ALTIVEC_BUILTIN_VUPKLPX:
16303 return false;
16304
16305 /* vec_perm. */
16306 case ALTIVEC_BUILTIN_VPERM_16QI:
16307 case ALTIVEC_BUILTIN_VPERM_8HI:
16308 case ALTIVEC_BUILTIN_VPERM_4SI:
16309 case ALTIVEC_BUILTIN_VPERM_2DI:
16310 case ALTIVEC_BUILTIN_VPERM_4SF:
16311 case ALTIVEC_BUILTIN_VPERM_2DF:
16312 {
16313 arg0 = gimple_call_arg (stmt, 0);
16314 arg1 = gimple_call_arg (stmt, 1);
16315 tree permute = gimple_call_arg (stmt, 2);
16316 lhs = gimple_call_lhs (stmt);
16317 location_t loc = gimple_location (stmt);
16318 gimple_seq stmts = NULL;
16319 // convert arg0 and arg1 to match the type of the permute
16320 // for the VEC_PERM_EXPR operation.
16321 tree permute_type = (TREE_TYPE (permute));
16322 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16323 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16324 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16325 permute_type, arg0_ptype, arg1_ptype,
16326 permute);
16327 // Convert the result back to the desired lhs type upon completion.
16328 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16329 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16330 g = gimple_build_assign (lhs, temp);
16331 gimple_set_location (g, loc);
16332 gsi_replace (gsi, g, true);
16333 return true;
16334 }
16335
16336 default:
16337 if (TARGET_DEBUG_BUILTIN)
16338 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16339 fn_code, fn_name1, fn_name2);
16340 break;
16341 }
16342
16343 return false;
16344 }
16345
16346 /* Expand an expression EXP that calls a built-in function,
16347 with result going to TARGET if that's convenient
16348 (and in mode MODE if that's convenient).
16349 SUBTARGET may be used as the target for computing one of EXP's operands.
16350 IGNORE is nonzero if the value is to be ignored. */
16351
16352 static rtx
16353 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16354 machine_mode mode ATTRIBUTE_UNUSED,
16355 int ignore ATTRIBUTE_UNUSED)
16356 {
16357 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16358 enum rs6000_builtins fcode
16359 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16360 size_t uns_fcode = (size_t)fcode;
16361 const struct builtin_description *d;
16362 size_t i;
16363 rtx ret;
16364 bool success;
16365 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16366 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16367 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16368
16369 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16370 floating point type, depending on whether long double is the IBM extended
16371 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16372 we only define one variant of the built-in function, and switch the code
16373 when defining it, rather than defining two built-ins and using the
16374 overload table in rs6000-c.c to switch between the two. If we don't have
16375 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16376 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16377 if (FLOAT128_IEEE_P (TFmode))
16378 switch (icode)
16379 {
16380 default:
16381 break;
16382
16383 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16384 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16385 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16386 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16387 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16388 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16389 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16390 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16391 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16392 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16393 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16394 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16395 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16396 }
16397
16398 if (TARGET_DEBUG_BUILTIN)
16399 {
16400 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16401 const char *name2 = (icode != CODE_FOR_nothing)
16402 ? get_insn_name ((int) icode)
16403 : "nothing";
16404 const char *name3;
16405
16406 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16407 {
16408 default: name3 = "unknown"; break;
16409 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16410 case RS6000_BTC_UNARY: name3 = "unary"; break;
16411 case RS6000_BTC_BINARY: name3 = "binary"; break;
16412 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16413 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16414 case RS6000_BTC_ABS: name3 = "abs"; break;
16415 case RS6000_BTC_DST: name3 = "dst"; break;
16416 }
16417
16418
16419 fprintf (stderr,
16420 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16421 (name1) ? name1 : "---", fcode,
16422 (name2) ? name2 : "---", (int) icode,
16423 name3,
16424 func_valid_p ? "" : ", not valid");
16425 }
16426
16427 if (!func_valid_p)
16428 {
16429 rs6000_invalid_builtin (fcode);
16430
16431 /* Given it is invalid, just generate a normal call. */
16432 return expand_call (exp, target, ignore);
16433 }
16434
16435 switch (fcode)
16436 {
16437 case RS6000_BUILTIN_RECIP:
16438 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16439
16440 case RS6000_BUILTIN_RECIPF:
16441 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16442
16443 case RS6000_BUILTIN_RSQRTF:
16444 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16445
16446 case RS6000_BUILTIN_RSQRT:
16447 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16448
16449 case POWER7_BUILTIN_BPERMD:
16450 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16451 ? CODE_FOR_bpermd_di
16452 : CODE_FOR_bpermd_si), exp, target);
16453
16454 case RS6000_BUILTIN_GET_TB:
16455 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16456 target);
16457
16458 case RS6000_BUILTIN_MFTB:
16459 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16460 ? CODE_FOR_rs6000_mftb_di
16461 : CODE_FOR_rs6000_mftb_si),
16462 target);
16463
16464 case RS6000_BUILTIN_MFFS:
16465 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16466
16467 case RS6000_BUILTIN_MTFSB0:
16468 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16469
16470 case RS6000_BUILTIN_MTFSB1:
16471 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16472
16473 case RS6000_BUILTIN_SET_FPSCR_RN:
16474 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16475 exp);
16476
16477 case RS6000_BUILTIN_SET_FPSCR_DRN:
16478 return
16479 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16480 exp);
16481
16482 case RS6000_BUILTIN_MFFSL:
16483 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16484
16485 case RS6000_BUILTIN_MTFSF:
16486 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16487
16488 case RS6000_BUILTIN_CPU_INIT:
16489 case RS6000_BUILTIN_CPU_IS:
16490 case RS6000_BUILTIN_CPU_SUPPORTS:
16491 return cpu_expand_builtin (fcode, exp, target);
16492
16493 case MISC_BUILTIN_SPEC_BARRIER:
16494 {
16495 emit_insn (gen_speculation_barrier ());
16496 return NULL_RTX;
16497 }
16498
16499 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16500 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16501 {
16502 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16503 : (int) CODE_FOR_altivec_lvsl_direct);
16504 machine_mode tmode = insn_data[icode2].operand[0].mode;
16505 machine_mode mode = insn_data[icode2].operand[1].mode;
16506 tree arg;
16507 rtx op, addr, pat;
16508
16509 gcc_assert (TARGET_ALTIVEC);
16510
16511 arg = CALL_EXPR_ARG (exp, 0);
16512 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16513 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16514 addr = memory_address (mode, op);
16515 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16516 op = addr;
16517 else
16518 {
16519 /* For the load case need to negate the address. */
16520 op = gen_reg_rtx (GET_MODE (addr));
16521 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16522 }
16523 op = gen_rtx_MEM (mode, op);
16524
16525 if (target == 0
16526 || GET_MODE (target) != tmode
16527 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16528 target = gen_reg_rtx (tmode);
16529
16530 pat = GEN_FCN (icode2) (target, op);
16531 if (!pat)
16532 return 0;
16533 emit_insn (pat);
16534
16535 return target;
16536 }
16537
16538 case ALTIVEC_BUILTIN_VCFUX:
16539 case ALTIVEC_BUILTIN_VCFSX:
16540 case ALTIVEC_BUILTIN_VCTUXS:
16541 case ALTIVEC_BUILTIN_VCTSXS:
16542 /* FIXME: There's got to be a nicer way to handle this case than
16543 constructing a new CALL_EXPR. */
16544 if (call_expr_nargs (exp) == 1)
16545 {
16546 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16547 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16548 }
16549 break;
16550
16551 /* For the pack and unpack int128 routines, fix up the builtin so it
16552 uses the correct IBM128 type. */
16553 case MISC_BUILTIN_PACK_IF:
16554 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16555 {
16556 icode = CODE_FOR_packtf;
16557 fcode = MISC_BUILTIN_PACK_TF;
16558 uns_fcode = (size_t)fcode;
16559 }
16560 break;
16561
16562 case MISC_BUILTIN_UNPACK_IF:
16563 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16564 {
16565 icode = CODE_FOR_unpacktf;
16566 fcode = MISC_BUILTIN_UNPACK_TF;
16567 uns_fcode = (size_t)fcode;
16568 }
16569 break;
16570
16571 default:
16572 break;
16573 }
16574
16575 if (TARGET_ALTIVEC)
16576 {
16577 ret = altivec_expand_builtin (exp, target, &success);
16578
16579 if (success)
16580 return ret;
16581 }
16582 if (TARGET_HTM)
16583 {
16584 ret = htm_expand_builtin (exp, target, &success);
16585
16586 if (success)
16587 return ret;
16588 }
16589
16590 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16591 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16592 gcc_assert (attr == RS6000_BTC_UNARY
16593 || attr == RS6000_BTC_BINARY
16594 || attr == RS6000_BTC_TERNARY
16595 || attr == RS6000_BTC_SPECIAL);
16596
16597 /* Handle simple unary operations. */
16598 d = bdesc_1arg;
16599 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16600 if (d->code == fcode)
16601 return rs6000_expand_unop_builtin (icode, exp, target);
16602
16603 /* Handle simple binary operations. */
16604 d = bdesc_2arg;
16605 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16606 if (d->code == fcode)
16607 return rs6000_expand_binop_builtin (icode, exp, target);
16608
16609 /* Handle simple ternary operations. */
16610 d = bdesc_3arg;
16611 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16612 if (d->code == fcode)
16613 return rs6000_expand_ternop_builtin (icode, exp, target);
16614
16615 /* Handle simple no-argument operations. */
16616 d = bdesc_0arg;
16617 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16618 if (d->code == fcode)
16619 return rs6000_expand_zeroop_builtin (icode, target);
16620
16621 gcc_unreachable ();
16622 }
16623
16624 /* Create a builtin vector type with a name. Taking care not to give
16625 the canonical type a name. */
16626
16627 static tree
16628 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16629 {
16630 tree result = build_vector_type (elt_type, num_elts);
16631
16632 /* Copy so we don't give the canonical type a name. */
16633 result = build_variant_type_copy (result);
16634
16635 add_builtin_type (name, result);
16636
16637 return result;
16638 }
16639
16640 static void
16641 rs6000_init_builtins (void)
16642 {
16643 tree tdecl;
16644 tree ftype;
16645 machine_mode mode;
16646
16647 if (TARGET_DEBUG_BUILTIN)
16648 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16649 (TARGET_ALTIVEC) ? ", altivec" : "",
16650 (TARGET_VSX) ? ", vsx" : "");
16651
16652 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16653 : "__vector long long",
16654 intDI_type_node, 2);
16655 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16656 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16657 intSI_type_node, 4);
16658 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16659 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16660 intHI_type_node, 8);
16661 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16662 intQI_type_node, 16);
16663
16664 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16665 unsigned_intQI_type_node, 16);
16666 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16667 unsigned_intHI_type_node, 8);
16668 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16669 unsigned_intSI_type_node, 4);
16670 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16671 ? "__vector unsigned long"
16672 : "__vector unsigned long long",
16673 unsigned_intDI_type_node, 2);
16674
16675 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16676
16677 const_str_type_node
16678 = build_pointer_type (build_qualified_type (char_type_node,
16679 TYPE_QUAL_CONST));
16680
16681 /* We use V1TI mode as a special container to hold __int128_t items that
16682 must live in VSX registers. */
16683 if (intTI_type_node)
16684 {
16685 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16686 intTI_type_node, 1);
16687 unsigned_V1TI_type_node
16688 = rs6000_vector_type ("__vector unsigned __int128",
16689 unsigned_intTI_type_node, 1);
16690 }
16691
16692 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16693 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16694 'vector unsigned short'. */
16695
16696 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16697 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16698 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16699 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16700 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16701
16702 long_integer_type_internal_node = long_integer_type_node;
16703 long_unsigned_type_internal_node = long_unsigned_type_node;
16704 long_long_integer_type_internal_node = long_long_integer_type_node;
16705 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16706 intQI_type_internal_node = intQI_type_node;
16707 uintQI_type_internal_node = unsigned_intQI_type_node;
16708 intHI_type_internal_node = intHI_type_node;
16709 uintHI_type_internal_node = unsigned_intHI_type_node;
16710 intSI_type_internal_node = intSI_type_node;
16711 uintSI_type_internal_node = unsigned_intSI_type_node;
16712 intDI_type_internal_node = intDI_type_node;
16713 uintDI_type_internal_node = unsigned_intDI_type_node;
16714 intTI_type_internal_node = intTI_type_node;
16715 uintTI_type_internal_node = unsigned_intTI_type_node;
16716 float_type_internal_node = float_type_node;
16717 double_type_internal_node = double_type_node;
16718 long_double_type_internal_node = long_double_type_node;
16719 dfloat64_type_internal_node = dfloat64_type_node;
16720 dfloat128_type_internal_node = dfloat128_type_node;
16721 void_type_internal_node = void_type_node;
16722
16723 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16724 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16725 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16726 format that uses a pair of doubles, depending on the switches and
16727 defaults.
16728
16729 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16730 floating point, we need make sure the type is non-zero or else self-test
16731 fails during bootstrap.
16732
16733 Always create __ibm128 as a separate type, even if the current long double
16734 format is IBM extended double.
16735
16736 For IEEE 128-bit floating point, always create the type __ieee128. If the
16737 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16738 __ieee128. */
16739 if (TARGET_FLOAT128_TYPE)
16740 {
16741 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16742 ibm128_float_type_node = long_double_type_node;
16743 else
16744 {
16745 ibm128_float_type_node = make_node (REAL_TYPE);
16746 TYPE_PRECISION (ibm128_float_type_node) = 128;
16747 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16748 layout_type (ibm128_float_type_node);
16749 }
16750
16751 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16752 "__ibm128");
16753
16754 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16755 ieee128_float_type_node = long_double_type_node;
16756 else
16757 ieee128_float_type_node = float128_type_node;
16758
16759 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16760 "__ieee128");
16761 }
16762
16763 else
16764 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16765
16766 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16767 tree type node. */
16768 builtin_mode_to_type[QImode][0] = integer_type_node;
16769 builtin_mode_to_type[HImode][0] = integer_type_node;
16770 builtin_mode_to_type[SImode][0] = intSI_type_node;
16771 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16772 builtin_mode_to_type[DImode][0] = intDI_type_node;
16773 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16774 builtin_mode_to_type[TImode][0] = intTI_type_node;
16775 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16776 builtin_mode_to_type[SFmode][0] = float_type_node;
16777 builtin_mode_to_type[DFmode][0] = double_type_node;
16778 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16779 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16780 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16781 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16782 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16783 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16784 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16785 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16786 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16787 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16788 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16789 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16790 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16791 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16792 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16793 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16794 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16795
16796 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16797 TYPE_NAME (bool_char_type_node) = tdecl;
16798
16799 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16800 TYPE_NAME (bool_short_type_node) = tdecl;
16801
16802 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16803 TYPE_NAME (bool_int_type_node) = tdecl;
16804
16805 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16806 TYPE_NAME (pixel_type_node) = tdecl;
16807
16808 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16809 bool_char_type_node, 16);
16810 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16811 bool_short_type_node, 8);
16812 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16813 bool_int_type_node, 4);
16814 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16815 ? "__vector __bool long"
16816 : "__vector __bool long long",
16817 bool_long_long_type_node, 2);
16818 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16819 pixel_type_node, 8);
16820
16821 /* Create Altivec and VSX builtins on machines with at least the
16822 general purpose extensions (970 and newer) to allow the use of
16823 the target attribute. */
16824 if (TARGET_EXTRA_BUILTINS)
16825 altivec_init_builtins ();
16826 if (TARGET_HTM)
16827 htm_init_builtins ();
16828
16829 if (TARGET_EXTRA_BUILTINS)
16830 rs6000_common_init_builtins ();
16831
16832 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16833 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16834 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16835
16836 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16837 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16838 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16839
16840 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16841 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16842 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16843
16844 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16845 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16846 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16847
16848 mode = (TARGET_64BIT) ? DImode : SImode;
16849 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16850 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16851 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16852
16853 ftype = build_function_type_list (unsigned_intDI_type_node,
16854 NULL_TREE);
16855 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16856
16857 if (TARGET_64BIT)
16858 ftype = build_function_type_list (unsigned_intDI_type_node,
16859 NULL_TREE);
16860 else
16861 ftype = build_function_type_list (unsigned_intSI_type_node,
16862 NULL_TREE);
16863 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16864
16865 ftype = build_function_type_list (double_type_node, NULL_TREE);
16866 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16867
16868 ftype = build_function_type_list (double_type_node, NULL_TREE);
16869 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16870
16871 ftype = build_function_type_list (void_type_node,
16872 intSI_type_node,
16873 NULL_TREE);
16874 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16875
16876 ftype = build_function_type_list (void_type_node,
16877 intSI_type_node,
16878 NULL_TREE);
16879 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16880
16881 ftype = build_function_type_list (void_type_node,
16882 intDI_type_node,
16883 NULL_TREE);
16884 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16885
16886 ftype = build_function_type_list (void_type_node,
16887 intDI_type_node,
16888 NULL_TREE);
16889 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16890
16891 ftype = build_function_type_list (void_type_node,
16892 intSI_type_node, double_type_node,
16893 NULL_TREE);
16894 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16895
16896 ftype = build_function_type_list (void_type_node, NULL_TREE);
16897 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16898 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16899 MISC_BUILTIN_SPEC_BARRIER);
16900
16901 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16902 NULL_TREE);
16903 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16904 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16905
16906 /* AIX libm provides clog as __clog. */
16907 if (TARGET_XCOFF &&
16908 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16909 set_user_assembler_name (tdecl, "__clog");
16910
16911 #ifdef SUBTARGET_INIT_BUILTINS
16912 SUBTARGET_INIT_BUILTINS;
16913 #endif
16914 }
16915
16916 /* Returns the rs6000 builtin decl for CODE. */
16917
16918 static tree
16919 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16920 {
16921 HOST_WIDE_INT fnmask;
16922
16923 if (code >= RS6000_BUILTIN_COUNT)
16924 return error_mark_node;
16925
16926 fnmask = rs6000_builtin_info[code].mask;
16927 if ((fnmask & rs6000_builtin_mask) != fnmask)
16928 {
16929 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16930 return error_mark_node;
16931 }
16932
16933 return rs6000_builtin_decls[code];
16934 }
16935
16936 static void
16937 altivec_init_builtins (void)
16938 {
16939 const struct builtin_description *d;
16940 size_t i;
16941 tree ftype;
16942 tree decl;
16943 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16944
16945 tree pvoid_type_node = build_pointer_type (void_type_node);
16946
16947 tree pcvoid_type_node
16948 = build_pointer_type (build_qualified_type (void_type_node,
16949 TYPE_QUAL_CONST));
16950
16951 tree int_ftype_opaque
16952 = build_function_type_list (integer_type_node,
16953 opaque_V4SI_type_node, NULL_TREE);
16954 tree opaque_ftype_opaque
16955 = build_function_type_list (integer_type_node, NULL_TREE);
16956 tree opaque_ftype_opaque_int
16957 = build_function_type_list (opaque_V4SI_type_node,
16958 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16959 tree opaque_ftype_opaque_opaque_int
16960 = build_function_type_list (opaque_V4SI_type_node,
16961 opaque_V4SI_type_node, opaque_V4SI_type_node,
16962 integer_type_node, NULL_TREE);
16963 tree opaque_ftype_opaque_opaque_opaque
16964 = build_function_type_list (opaque_V4SI_type_node,
16965 opaque_V4SI_type_node, opaque_V4SI_type_node,
16966 opaque_V4SI_type_node, NULL_TREE);
16967 tree opaque_ftype_opaque_opaque
16968 = build_function_type_list (opaque_V4SI_type_node,
16969 opaque_V4SI_type_node, opaque_V4SI_type_node,
16970 NULL_TREE);
16971 tree int_ftype_int_opaque_opaque
16972 = build_function_type_list (integer_type_node,
16973 integer_type_node, opaque_V4SI_type_node,
16974 opaque_V4SI_type_node, NULL_TREE);
16975 tree int_ftype_int_v4si_v4si
16976 = build_function_type_list (integer_type_node,
16977 integer_type_node, V4SI_type_node,
16978 V4SI_type_node, NULL_TREE);
16979 tree int_ftype_int_v2di_v2di
16980 = build_function_type_list (integer_type_node,
16981 integer_type_node, V2DI_type_node,
16982 V2DI_type_node, NULL_TREE);
16983 tree void_ftype_v4si
16984 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16985 tree v8hi_ftype_void
16986 = build_function_type_list (V8HI_type_node, NULL_TREE);
16987 tree void_ftype_void
16988 = build_function_type_list (void_type_node, NULL_TREE);
16989 tree void_ftype_int
16990 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16991
16992 tree opaque_ftype_long_pcvoid
16993 = build_function_type_list (opaque_V4SI_type_node,
16994 long_integer_type_node, pcvoid_type_node,
16995 NULL_TREE);
16996 tree v16qi_ftype_long_pcvoid
16997 = build_function_type_list (V16QI_type_node,
16998 long_integer_type_node, pcvoid_type_node,
16999 NULL_TREE);
17000 tree v8hi_ftype_long_pcvoid
17001 = build_function_type_list (V8HI_type_node,
17002 long_integer_type_node, pcvoid_type_node,
17003 NULL_TREE);
17004 tree v4si_ftype_long_pcvoid
17005 = build_function_type_list (V4SI_type_node,
17006 long_integer_type_node, pcvoid_type_node,
17007 NULL_TREE);
17008 tree v4sf_ftype_long_pcvoid
17009 = build_function_type_list (V4SF_type_node,
17010 long_integer_type_node, pcvoid_type_node,
17011 NULL_TREE);
17012 tree v2df_ftype_long_pcvoid
17013 = build_function_type_list (V2DF_type_node,
17014 long_integer_type_node, pcvoid_type_node,
17015 NULL_TREE);
17016 tree v2di_ftype_long_pcvoid
17017 = build_function_type_list (V2DI_type_node,
17018 long_integer_type_node, pcvoid_type_node,
17019 NULL_TREE);
17020 tree v1ti_ftype_long_pcvoid
17021 = build_function_type_list (V1TI_type_node,
17022 long_integer_type_node, pcvoid_type_node,
17023 NULL_TREE);
17024
17025 tree void_ftype_opaque_long_pvoid
17026 = build_function_type_list (void_type_node,
17027 opaque_V4SI_type_node, long_integer_type_node,
17028 pvoid_type_node, NULL_TREE);
17029 tree void_ftype_v4si_long_pvoid
17030 = build_function_type_list (void_type_node,
17031 V4SI_type_node, long_integer_type_node,
17032 pvoid_type_node, NULL_TREE);
17033 tree void_ftype_v16qi_long_pvoid
17034 = build_function_type_list (void_type_node,
17035 V16QI_type_node, long_integer_type_node,
17036 pvoid_type_node, NULL_TREE);
17037
17038 tree void_ftype_v16qi_pvoid_long
17039 = build_function_type_list (void_type_node,
17040 V16QI_type_node, pvoid_type_node,
17041 long_integer_type_node, NULL_TREE);
17042
17043 tree void_ftype_v8hi_long_pvoid
17044 = build_function_type_list (void_type_node,
17045 V8HI_type_node, long_integer_type_node,
17046 pvoid_type_node, NULL_TREE);
17047 tree void_ftype_v4sf_long_pvoid
17048 = build_function_type_list (void_type_node,
17049 V4SF_type_node, long_integer_type_node,
17050 pvoid_type_node, NULL_TREE);
17051 tree void_ftype_v2df_long_pvoid
17052 = build_function_type_list (void_type_node,
17053 V2DF_type_node, long_integer_type_node,
17054 pvoid_type_node, NULL_TREE);
17055 tree void_ftype_v1ti_long_pvoid
17056 = build_function_type_list (void_type_node,
17057 V1TI_type_node, long_integer_type_node,
17058 pvoid_type_node, NULL_TREE);
17059 tree void_ftype_v2di_long_pvoid
17060 = build_function_type_list (void_type_node,
17061 V2DI_type_node, long_integer_type_node,
17062 pvoid_type_node, NULL_TREE);
17063 tree int_ftype_int_v8hi_v8hi
17064 = build_function_type_list (integer_type_node,
17065 integer_type_node, V8HI_type_node,
17066 V8HI_type_node, NULL_TREE);
17067 tree int_ftype_int_v16qi_v16qi
17068 = build_function_type_list (integer_type_node,
17069 integer_type_node, V16QI_type_node,
17070 V16QI_type_node, NULL_TREE);
17071 tree int_ftype_int_v4sf_v4sf
17072 = build_function_type_list (integer_type_node,
17073 integer_type_node, V4SF_type_node,
17074 V4SF_type_node, NULL_TREE);
17075 tree int_ftype_int_v2df_v2df
17076 = build_function_type_list (integer_type_node,
17077 integer_type_node, V2DF_type_node,
17078 V2DF_type_node, NULL_TREE);
17079 tree v2di_ftype_v2di
17080 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17081 tree v4si_ftype_v4si
17082 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17083 tree v8hi_ftype_v8hi
17084 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17085 tree v16qi_ftype_v16qi
17086 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17087 tree v4sf_ftype_v4sf
17088 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17089 tree v2df_ftype_v2df
17090 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17091 tree void_ftype_pcvoid_int_int
17092 = build_function_type_list (void_type_node,
17093 pcvoid_type_node, integer_type_node,
17094 integer_type_node, NULL_TREE);
17095
17096 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17097 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17098 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17099 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17100 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17101 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17102 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17103 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17104 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17105 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17106 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17107 ALTIVEC_BUILTIN_LVXL_V2DF);
17108 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17109 ALTIVEC_BUILTIN_LVXL_V2DI);
17110 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17111 ALTIVEC_BUILTIN_LVXL_V4SF);
17112 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17113 ALTIVEC_BUILTIN_LVXL_V4SI);
17114 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17115 ALTIVEC_BUILTIN_LVXL_V8HI);
17116 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17117 ALTIVEC_BUILTIN_LVXL_V16QI);
17118 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17119 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
17120 ALTIVEC_BUILTIN_LVX_V1TI);
17121 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17122 ALTIVEC_BUILTIN_LVX_V2DF);
17123 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17124 ALTIVEC_BUILTIN_LVX_V2DI);
17125 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17126 ALTIVEC_BUILTIN_LVX_V4SF);
17127 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17128 ALTIVEC_BUILTIN_LVX_V4SI);
17129 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17130 ALTIVEC_BUILTIN_LVX_V8HI);
17131 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17132 ALTIVEC_BUILTIN_LVX_V16QI);
17133 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17134 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17135 ALTIVEC_BUILTIN_STVX_V2DF);
17136 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17137 ALTIVEC_BUILTIN_STVX_V2DI);
17138 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17139 ALTIVEC_BUILTIN_STVX_V4SF);
17140 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17141 ALTIVEC_BUILTIN_STVX_V4SI);
17142 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17143 ALTIVEC_BUILTIN_STVX_V8HI);
17144 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17145 ALTIVEC_BUILTIN_STVX_V16QI);
17146 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17147 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17148 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17149 ALTIVEC_BUILTIN_STVXL_V2DF);
17150 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17151 ALTIVEC_BUILTIN_STVXL_V2DI);
17152 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17153 ALTIVEC_BUILTIN_STVXL_V4SF);
17154 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17155 ALTIVEC_BUILTIN_STVXL_V4SI);
17156 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17157 ALTIVEC_BUILTIN_STVXL_V8HI);
17158 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17159 ALTIVEC_BUILTIN_STVXL_V16QI);
17160 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17161 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17162 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17163 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17164 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17165 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17166 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17167 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17168 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17169 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17170 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17171 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17172 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17173 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17174 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17175 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17176
17177 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17178 VSX_BUILTIN_LXVD2X_V2DF);
17179 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17180 VSX_BUILTIN_LXVD2X_V2DI);
17181 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17182 VSX_BUILTIN_LXVW4X_V4SF);
17183 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17184 VSX_BUILTIN_LXVW4X_V4SI);
17185 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17186 VSX_BUILTIN_LXVW4X_V8HI);
17187 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17188 VSX_BUILTIN_LXVW4X_V16QI);
17189 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17190 VSX_BUILTIN_STXVD2X_V2DF);
17191 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17192 VSX_BUILTIN_STXVD2X_V2DI);
17193 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17194 VSX_BUILTIN_STXVW4X_V4SF);
17195 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17196 VSX_BUILTIN_STXVW4X_V4SI);
17197 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17198 VSX_BUILTIN_STXVW4X_V8HI);
17199 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17200 VSX_BUILTIN_STXVW4X_V16QI);
17201
17202 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17203 VSX_BUILTIN_LD_ELEMREV_V2DF);
17204 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17205 VSX_BUILTIN_LD_ELEMREV_V2DI);
17206 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17207 VSX_BUILTIN_LD_ELEMREV_V4SF);
17208 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17209 VSX_BUILTIN_LD_ELEMREV_V4SI);
17210 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17211 VSX_BUILTIN_LD_ELEMREV_V8HI);
17212 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17213 VSX_BUILTIN_LD_ELEMREV_V16QI);
17214 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17215 VSX_BUILTIN_ST_ELEMREV_V2DF);
17216 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17217 VSX_BUILTIN_ST_ELEMREV_V1TI);
17218 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17219 VSX_BUILTIN_ST_ELEMREV_V2DI);
17220 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17221 VSX_BUILTIN_ST_ELEMREV_V4SF);
17222 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17223 VSX_BUILTIN_ST_ELEMREV_V4SI);
17224 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17225 VSX_BUILTIN_ST_ELEMREV_V8HI);
17226 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17227 VSX_BUILTIN_ST_ELEMREV_V16QI);
17228
17229 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17230 VSX_BUILTIN_VEC_LD);
17231 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17232 VSX_BUILTIN_VEC_ST);
17233 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17234 VSX_BUILTIN_VEC_XL);
17235 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17236 VSX_BUILTIN_VEC_XL_BE);
17237 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17238 VSX_BUILTIN_VEC_XST);
17239 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17240 VSX_BUILTIN_VEC_XST_BE);
17241
17242 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17243 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17244 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17245
17246 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17247 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17248 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17249 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17250 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17251 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17252 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17253 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17254 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17255 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17256 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17257 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17258
17259 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17260 ALTIVEC_BUILTIN_VEC_ADDE);
17261 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17262 ALTIVEC_BUILTIN_VEC_ADDEC);
17263 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17264 ALTIVEC_BUILTIN_VEC_CMPNE);
17265 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17266 ALTIVEC_BUILTIN_VEC_MUL);
17267 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17268 ALTIVEC_BUILTIN_VEC_SUBE);
17269 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17270 ALTIVEC_BUILTIN_VEC_SUBEC);
17271
17272 /* Cell builtins. */
17273 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17274 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17275 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17276 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17277
17278 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17279 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17280 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17281 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17282
17283 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17284 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17285 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17286 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17287
17288 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17289 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17290 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17291 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17292
17293 if (TARGET_P9_VECTOR)
17294 {
17295 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17296 P9V_BUILTIN_STXVL);
17297 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17298 P9V_BUILTIN_XST_LEN_R);
17299 }
17300
17301 /* Add the DST variants. */
17302 d = bdesc_dst;
17303 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17304 {
17305 HOST_WIDE_INT mask = d->mask;
17306
17307 /* It is expected that these dst built-in functions may have
17308 d->icode equal to CODE_FOR_nothing. */
17309 if ((mask & builtin_mask) != mask)
17310 {
17311 if (TARGET_DEBUG_BUILTIN)
17312 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17313 d->name);
17314 continue;
17315 }
17316 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17317 }
17318
17319 /* Initialize the predicates. */
17320 d = bdesc_altivec_preds;
17321 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17322 {
17323 machine_mode mode1;
17324 tree type;
17325 HOST_WIDE_INT mask = d->mask;
17326
17327 if ((mask & builtin_mask) != mask)
17328 {
17329 if (TARGET_DEBUG_BUILTIN)
17330 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17331 d->name);
17332 continue;
17333 }
17334
17335 if (rs6000_overloaded_builtin_p (d->code))
17336 mode1 = VOIDmode;
17337 else
17338 {
17339 /* Cannot define builtin if the instruction is disabled. */
17340 gcc_assert (d->icode != CODE_FOR_nothing);
17341 mode1 = insn_data[d->icode].operand[1].mode;
17342 }
17343
17344 switch (mode1)
17345 {
17346 case E_VOIDmode:
17347 type = int_ftype_int_opaque_opaque;
17348 break;
17349 case E_V2DImode:
17350 type = int_ftype_int_v2di_v2di;
17351 break;
17352 case E_V4SImode:
17353 type = int_ftype_int_v4si_v4si;
17354 break;
17355 case E_V8HImode:
17356 type = int_ftype_int_v8hi_v8hi;
17357 break;
17358 case E_V16QImode:
17359 type = int_ftype_int_v16qi_v16qi;
17360 break;
17361 case E_V4SFmode:
17362 type = int_ftype_int_v4sf_v4sf;
17363 break;
17364 case E_V2DFmode:
17365 type = int_ftype_int_v2df_v2df;
17366 break;
17367 default:
17368 gcc_unreachable ();
17369 }
17370
17371 def_builtin (d->name, type, d->code);
17372 }
17373
17374 /* Initialize the abs* operators. */
17375 d = bdesc_abs;
17376 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17377 {
17378 machine_mode mode0;
17379 tree type;
17380 HOST_WIDE_INT mask = d->mask;
17381
17382 if ((mask & builtin_mask) != mask)
17383 {
17384 if (TARGET_DEBUG_BUILTIN)
17385 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17386 d->name);
17387 continue;
17388 }
17389
17390 /* Cannot define builtin if the instruction is disabled. */
17391 gcc_assert (d->icode != CODE_FOR_nothing);
17392 mode0 = insn_data[d->icode].operand[0].mode;
17393
17394 switch (mode0)
17395 {
17396 case E_V2DImode:
17397 type = v2di_ftype_v2di;
17398 break;
17399 case E_V4SImode:
17400 type = v4si_ftype_v4si;
17401 break;
17402 case E_V8HImode:
17403 type = v8hi_ftype_v8hi;
17404 break;
17405 case E_V16QImode:
17406 type = v16qi_ftype_v16qi;
17407 break;
17408 case E_V4SFmode:
17409 type = v4sf_ftype_v4sf;
17410 break;
17411 case E_V2DFmode:
17412 type = v2df_ftype_v2df;
17413 break;
17414 default:
17415 gcc_unreachable ();
17416 }
17417
17418 def_builtin (d->name, type, d->code);
17419 }
17420
17421 /* Initialize target builtin that implements
17422 targetm.vectorize.builtin_mask_for_load. */
17423
17424 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17425 v16qi_ftype_long_pcvoid,
17426 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17427 BUILT_IN_MD, NULL, NULL_TREE);
17428 TREE_READONLY (decl) = 1;
17429 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17430 altivec_builtin_mask_for_load = decl;
17431
17432 /* Access to the vec_init patterns. */
17433 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17434 integer_type_node, integer_type_node,
17435 integer_type_node, NULL_TREE);
17436 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17437
17438 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17439 short_integer_type_node,
17440 short_integer_type_node,
17441 short_integer_type_node,
17442 short_integer_type_node,
17443 short_integer_type_node,
17444 short_integer_type_node,
17445 short_integer_type_node, NULL_TREE);
17446 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17447
17448 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17449 char_type_node, char_type_node,
17450 char_type_node, char_type_node,
17451 char_type_node, char_type_node,
17452 char_type_node, char_type_node,
17453 char_type_node, char_type_node,
17454 char_type_node, char_type_node,
17455 char_type_node, char_type_node,
17456 char_type_node, NULL_TREE);
17457 def_builtin ("__builtin_vec_init_v16qi", ftype,
17458 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17459
17460 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17461 float_type_node, float_type_node,
17462 float_type_node, NULL_TREE);
17463 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17464
17465 /* VSX builtins. */
17466 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17467 double_type_node, NULL_TREE);
17468 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17469
17470 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17471 intDI_type_node, NULL_TREE);
17472 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17473
17474 /* Access to the vec_set patterns. */
17475 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17476 intSI_type_node,
17477 integer_type_node, NULL_TREE);
17478 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17479
17480 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17481 intHI_type_node,
17482 integer_type_node, NULL_TREE);
17483 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17484
17485 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17486 intQI_type_node,
17487 integer_type_node, NULL_TREE);
17488 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17489
17490 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17491 float_type_node,
17492 integer_type_node, NULL_TREE);
17493 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17494
17495 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17496 double_type_node,
17497 integer_type_node, NULL_TREE);
17498 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17499
17500 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17501 intDI_type_node,
17502 integer_type_node, NULL_TREE);
17503 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17504
17505 /* Access to the vec_extract patterns. */
17506 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17507 integer_type_node, NULL_TREE);
17508 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17509
17510 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17511 integer_type_node, NULL_TREE);
17512 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17513
17514 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17515 integer_type_node, NULL_TREE);
17516 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17517
17518 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17519 integer_type_node, NULL_TREE);
17520 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17521
17522 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17523 integer_type_node, NULL_TREE);
17524 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17525
17526 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17527 integer_type_node, NULL_TREE);
17528 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17529
17530
17531 if (V1TI_type_node)
17532 {
17533 tree v1ti_ftype_long_pcvoid
17534 = build_function_type_list (V1TI_type_node,
17535 long_integer_type_node, pcvoid_type_node,
17536 NULL_TREE);
17537 tree void_ftype_v1ti_long_pvoid
17538 = build_function_type_list (void_type_node,
17539 V1TI_type_node, long_integer_type_node,
17540 pvoid_type_node, NULL_TREE);
17541 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17542 VSX_BUILTIN_LD_ELEMREV_V1TI);
17543 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17544 VSX_BUILTIN_LXVD2X_V1TI);
17545 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17546 VSX_BUILTIN_STXVD2X_V1TI);
17547 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17548 NULL_TREE, NULL_TREE);
17549 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17550 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17551 intTI_type_node,
17552 integer_type_node, NULL_TREE);
17553 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17554 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17555 integer_type_node, NULL_TREE);
17556 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17557 }
17558
17559 }
17560
17561 static void
17562 htm_init_builtins (void)
17563 {
17564 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17565 const struct builtin_description *d;
17566 size_t i;
17567
17568 d = bdesc_htm;
17569 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17570 {
17571 tree op[MAX_HTM_OPERANDS], type;
17572 HOST_WIDE_INT mask = d->mask;
17573 unsigned attr = rs6000_builtin_info[d->code].attr;
17574 bool void_func = (attr & RS6000_BTC_VOID);
17575 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17576 int nopnds = 0;
17577 tree gpr_type_node;
17578 tree rettype;
17579 tree argtype;
17580
17581 /* It is expected that these htm built-in functions may have
17582 d->icode equal to CODE_FOR_nothing. */
17583
17584 if (TARGET_32BIT && TARGET_POWERPC64)
17585 gpr_type_node = long_long_unsigned_type_node;
17586 else
17587 gpr_type_node = long_unsigned_type_node;
17588
17589 if (attr & RS6000_BTC_SPR)
17590 {
17591 rettype = gpr_type_node;
17592 argtype = gpr_type_node;
17593 }
17594 else if (d->code == HTM_BUILTIN_TABORTDC
17595 || d->code == HTM_BUILTIN_TABORTDCI)
17596 {
17597 rettype = unsigned_type_node;
17598 argtype = gpr_type_node;
17599 }
17600 else
17601 {
17602 rettype = unsigned_type_node;
17603 argtype = unsigned_type_node;
17604 }
17605
17606 if ((mask & builtin_mask) != mask)
17607 {
17608 if (TARGET_DEBUG_BUILTIN)
17609 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17610 continue;
17611 }
17612
17613 if (d->name == 0)
17614 {
17615 if (TARGET_DEBUG_BUILTIN)
17616 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17617 (long unsigned) i);
17618 continue;
17619 }
17620
17621 op[nopnds++] = (void_func) ? void_type_node : rettype;
17622
17623 if (attr_args == RS6000_BTC_UNARY)
17624 op[nopnds++] = argtype;
17625 else if (attr_args == RS6000_BTC_BINARY)
17626 {
17627 op[nopnds++] = argtype;
17628 op[nopnds++] = argtype;
17629 }
17630 else if (attr_args == RS6000_BTC_TERNARY)
17631 {
17632 op[nopnds++] = argtype;
17633 op[nopnds++] = argtype;
17634 op[nopnds++] = argtype;
17635 }
17636
17637 switch (nopnds)
17638 {
17639 case 1:
17640 type = build_function_type_list (op[0], NULL_TREE);
17641 break;
17642 case 2:
17643 type = build_function_type_list (op[0], op[1], NULL_TREE);
17644 break;
17645 case 3:
17646 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17647 break;
17648 case 4:
17649 type = build_function_type_list (op[0], op[1], op[2], op[3],
17650 NULL_TREE);
17651 break;
17652 default:
17653 gcc_unreachable ();
17654 }
17655
17656 def_builtin (d->name, type, d->code);
17657 }
17658 }
17659
17660 /* Hash function for builtin functions with up to 3 arguments and a return
17661 type. */
17662 hashval_t
17663 builtin_hasher::hash (builtin_hash_struct *bh)
17664 {
17665 unsigned ret = 0;
17666 int i;
17667
17668 for (i = 0; i < 4; i++)
17669 {
17670 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17671 ret = (ret * 2) + bh->uns_p[i];
17672 }
17673
17674 return ret;
17675 }
17676
17677 /* Compare builtin hash entries H1 and H2 for equivalence. */
17678 bool
17679 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17680 {
17681 return ((p1->mode[0] == p2->mode[0])
17682 && (p1->mode[1] == p2->mode[1])
17683 && (p1->mode[2] == p2->mode[2])
17684 && (p1->mode[3] == p2->mode[3])
17685 && (p1->uns_p[0] == p2->uns_p[0])
17686 && (p1->uns_p[1] == p2->uns_p[1])
17687 && (p1->uns_p[2] == p2->uns_p[2])
17688 && (p1->uns_p[3] == p2->uns_p[3]));
17689 }
17690
17691 /* Map types for builtin functions with an explicit return type and up to 3
17692 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17693 of the argument. */
17694 static tree
17695 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17696 machine_mode mode_arg1, machine_mode mode_arg2,
17697 enum rs6000_builtins builtin, const char *name)
17698 {
17699 struct builtin_hash_struct h;
17700 struct builtin_hash_struct *h2;
17701 int num_args = 3;
17702 int i;
17703 tree ret_type = NULL_TREE;
17704 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17705
17706 /* Create builtin_hash_table. */
17707 if (builtin_hash_table == NULL)
17708 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17709
17710 h.type = NULL_TREE;
17711 h.mode[0] = mode_ret;
17712 h.mode[1] = mode_arg0;
17713 h.mode[2] = mode_arg1;
17714 h.mode[3] = mode_arg2;
17715 h.uns_p[0] = 0;
17716 h.uns_p[1] = 0;
17717 h.uns_p[2] = 0;
17718 h.uns_p[3] = 0;
17719
17720 /* If the builtin is a type that produces unsigned results or takes unsigned
17721 arguments, and it is returned as a decl for the vectorizer (such as
17722 widening multiplies, permute), make sure the arguments and return value
17723 are type correct. */
17724 switch (builtin)
17725 {
17726 /* unsigned 1 argument functions. */
17727 case CRYPTO_BUILTIN_VSBOX:
17728 case CRYPTO_BUILTIN_VSBOX_BE:
17729 case P8V_BUILTIN_VGBBD:
17730 case MISC_BUILTIN_CDTBCD:
17731 case MISC_BUILTIN_CBCDTD:
17732 h.uns_p[0] = 1;
17733 h.uns_p[1] = 1;
17734 break;
17735
17736 /* unsigned 2 argument functions. */
17737 case ALTIVEC_BUILTIN_VMULEUB:
17738 case ALTIVEC_BUILTIN_VMULEUH:
17739 case P8V_BUILTIN_VMULEUW:
17740 case ALTIVEC_BUILTIN_VMULOUB:
17741 case ALTIVEC_BUILTIN_VMULOUH:
17742 case P8V_BUILTIN_VMULOUW:
17743 case CRYPTO_BUILTIN_VCIPHER:
17744 case CRYPTO_BUILTIN_VCIPHER_BE:
17745 case CRYPTO_BUILTIN_VCIPHERLAST:
17746 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17747 case CRYPTO_BUILTIN_VNCIPHER:
17748 case CRYPTO_BUILTIN_VNCIPHER_BE:
17749 case CRYPTO_BUILTIN_VNCIPHERLAST:
17750 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17751 case CRYPTO_BUILTIN_VPMSUMB:
17752 case CRYPTO_BUILTIN_VPMSUMH:
17753 case CRYPTO_BUILTIN_VPMSUMW:
17754 case CRYPTO_BUILTIN_VPMSUMD:
17755 case CRYPTO_BUILTIN_VPMSUM:
17756 case MISC_BUILTIN_ADDG6S:
17757 case MISC_BUILTIN_DIVWEU:
17758 case MISC_BUILTIN_DIVDEU:
17759 case VSX_BUILTIN_UDIV_V2DI:
17760 case ALTIVEC_BUILTIN_VMAXUB:
17761 case ALTIVEC_BUILTIN_VMINUB:
17762 case ALTIVEC_BUILTIN_VMAXUH:
17763 case ALTIVEC_BUILTIN_VMINUH:
17764 case ALTIVEC_BUILTIN_VMAXUW:
17765 case ALTIVEC_BUILTIN_VMINUW:
17766 case P8V_BUILTIN_VMAXUD:
17767 case P8V_BUILTIN_VMINUD:
17768 h.uns_p[0] = 1;
17769 h.uns_p[1] = 1;
17770 h.uns_p[2] = 1;
17771 break;
17772
17773 /* unsigned 3 argument functions. */
17774 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17775 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17776 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17777 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17778 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17779 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17780 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17781 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17782 case VSX_BUILTIN_VPERM_16QI_UNS:
17783 case VSX_BUILTIN_VPERM_8HI_UNS:
17784 case VSX_BUILTIN_VPERM_4SI_UNS:
17785 case VSX_BUILTIN_VPERM_2DI_UNS:
17786 case VSX_BUILTIN_XXSEL_16QI_UNS:
17787 case VSX_BUILTIN_XXSEL_8HI_UNS:
17788 case VSX_BUILTIN_XXSEL_4SI_UNS:
17789 case VSX_BUILTIN_XXSEL_2DI_UNS:
17790 case CRYPTO_BUILTIN_VPERMXOR:
17791 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17792 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17793 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17794 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17795 case CRYPTO_BUILTIN_VSHASIGMAW:
17796 case CRYPTO_BUILTIN_VSHASIGMAD:
17797 case CRYPTO_BUILTIN_VSHASIGMA:
17798 h.uns_p[0] = 1;
17799 h.uns_p[1] = 1;
17800 h.uns_p[2] = 1;
17801 h.uns_p[3] = 1;
17802 break;
17803
17804 /* signed permute functions with unsigned char mask. */
17805 case ALTIVEC_BUILTIN_VPERM_16QI:
17806 case ALTIVEC_BUILTIN_VPERM_8HI:
17807 case ALTIVEC_BUILTIN_VPERM_4SI:
17808 case ALTIVEC_BUILTIN_VPERM_4SF:
17809 case ALTIVEC_BUILTIN_VPERM_2DI:
17810 case ALTIVEC_BUILTIN_VPERM_2DF:
17811 case VSX_BUILTIN_VPERM_16QI:
17812 case VSX_BUILTIN_VPERM_8HI:
17813 case VSX_BUILTIN_VPERM_4SI:
17814 case VSX_BUILTIN_VPERM_4SF:
17815 case VSX_BUILTIN_VPERM_2DI:
17816 case VSX_BUILTIN_VPERM_2DF:
17817 h.uns_p[3] = 1;
17818 break;
17819
17820 /* unsigned args, signed return. */
17821 case VSX_BUILTIN_XVCVUXDSP:
17822 case VSX_BUILTIN_XVCVUXDDP_UNS:
17823 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17824 h.uns_p[1] = 1;
17825 break;
17826
17827 /* signed args, unsigned return. */
17828 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17829 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17830 case MISC_BUILTIN_UNPACK_TD:
17831 case MISC_BUILTIN_UNPACK_V1TI:
17832 h.uns_p[0] = 1;
17833 break;
17834
17835 /* unsigned arguments, bool return (compares). */
17836 case ALTIVEC_BUILTIN_VCMPEQUB:
17837 case ALTIVEC_BUILTIN_VCMPEQUH:
17838 case ALTIVEC_BUILTIN_VCMPEQUW:
17839 case P8V_BUILTIN_VCMPEQUD:
17840 case VSX_BUILTIN_CMPGE_U16QI:
17841 case VSX_BUILTIN_CMPGE_U8HI:
17842 case VSX_BUILTIN_CMPGE_U4SI:
17843 case VSX_BUILTIN_CMPGE_U2DI:
17844 case ALTIVEC_BUILTIN_VCMPGTUB:
17845 case ALTIVEC_BUILTIN_VCMPGTUH:
17846 case ALTIVEC_BUILTIN_VCMPGTUW:
17847 case P8V_BUILTIN_VCMPGTUD:
17848 h.uns_p[1] = 1;
17849 h.uns_p[2] = 1;
17850 break;
17851
17852 /* unsigned arguments for 128-bit pack instructions. */
17853 case MISC_BUILTIN_PACK_TD:
17854 case MISC_BUILTIN_PACK_V1TI:
17855 h.uns_p[1] = 1;
17856 h.uns_p[2] = 1;
17857 break;
17858
17859 /* unsigned second arguments (vector shift right). */
17860 case ALTIVEC_BUILTIN_VSRB:
17861 case ALTIVEC_BUILTIN_VSRH:
17862 case ALTIVEC_BUILTIN_VSRW:
17863 case P8V_BUILTIN_VSRD:
17864 h.uns_p[2] = 1;
17865 break;
17866
17867 default:
17868 break;
17869 }
17870
17871 /* Figure out how many args are present. */
17872 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17873 num_args--;
17874
17875 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17876 if (!ret_type && h.uns_p[0])
17877 ret_type = builtin_mode_to_type[h.mode[0]][0];
17878
17879 if (!ret_type)
17880 fatal_error (input_location,
17881 "internal error: builtin function %qs had an unexpected "
17882 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17883
17884 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17885 arg_type[i] = NULL_TREE;
17886
17887 for (i = 0; i < num_args; i++)
17888 {
17889 int m = (int) h.mode[i+1];
17890 int uns_p = h.uns_p[i+1];
17891
17892 arg_type[i] = builtin_mode_to_type[m][uns_p];
17893 if (!arg_type[i] && uns_p)
17894 arg_type[i] = builtin_mode_to_type[m][0];
17895
17896 if (!arg_type[i])
17897 fatal_error (input_location,
17898 "internal error: builtin function %qs, argument %d "
17899 "had unexpected argument type %qs", name, i,
17900 GET_MODE_NAME (m));
17901 }
17902
17903 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17904 if (*found == NULL)
17905 {
17906 h2 = ggc_alloc<builtin_hash_struct> ();
17907 *h2 = h;
17908 *found = h2;
17909
17910 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17911 arg_type[2], NULL_TREE);
17912 }
17913
17914 return (*found)->type;
17915 }
17916
17917 static void
17918 rs6000_common_init_builtins (void)
17919 {
17920 const struct builtin_description *d;
17921 size_t i;
17922
17923 tree opaque_ftype_opaque = NULL_TREE;
17924 tree opaque_ftype_opaque_opaque = NULL_TREE;
17925 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17926 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17927
17928 /* Create Altivec and VSX builtins on machines with at least the
17929 general purpose extensions (970 and newer) to allow the use of
17930 the target attribute. */
17931
17932 if (TARGET_EXTRA_BUILTINS)
17933 builtin_mask |= RS6000_BTM_COMMON;
17934
17935 /* Add the ternary operators. */
17936 d = bdesc_3arg;
17937 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17938 {
17939 tree type;
17940 HOST_WIDE_INT mask = d->mask;
17941
17942 if ((mask & builtin_mask) != mask)
17943 {
17944 if (TARGET_DEBUG_BUILTIN)
17945 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17946 continue;
17947 }
17948
17949 if (rs6000_overloaded_builtin_p (d->code))
17950 {
17951 if (! (type = opaque_ftype_opaque_opaque_opaque))
17952 type = opaque_ftype_opaque_opaque_opaque
17953 = build_function_type_list (opaque_V4SI_type_node,
17954 opaque_V4SI_type_node,
17955 opaque_V4SI_type_node,
17956 opaque_V4SI_type_node,
17957 NULL_TREE);
17958 }
17959 else
17960 {
17961 enum insn_code icode = d->icode;
17962 if (d->name == 0)
17963 {
17964 if (TARGET_DEBUG_BUILTIN)
17965 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17966 (long unsigned)i);
17967
17968 continue;
17969 }
17970
17971 if (icode == CODE_FOR_nothing)
17972 {
17973 if (TARGET_DEBUG_BUILTIN)
17974 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17975 d->name);
17976
17977 continue;
17978 }
17979
17980 type = builtin_function_type (insn_data[icode].operand[0].mode,
17981 insn_data[icode].operand[1].mode,
17982 insn_data[icode].operand[2].mode,
17983 insn_data[icode].operand[3].mode,
17984 d->code, d->name);
17985 }
17986
17987 def_builtin (d->name, type, d->code);
17988 }
17989
17990 /* Add the binary operators. */
17991 d = bdesc_2arg;
17992 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17993 {
17994 machine_mode mode0, mode1, mode2;
17995 tree type;
17996 HOST_WIDE_INT mask = d->mask;
17997
17998 if ((mask & builtin_mask) != mask)
17999 {
18000 if (TARGET_DEBUG_BUILTIN)
18001 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18002 continue;
18003 }
18004
18005 if (rs6000_overloaded_builtin_p (d->code))
18006 {
18007 if (! (type = opaque_ftype_opaque_opaque))
18008 type = opaque_ftype_opaque_opaque
18009 = build_function_type_list (opaque_V4SI_type_node,
18010 opaque_V4SI_type_node,
18011 opaque_V4SI_type_node,
18012 NULL_TREE);
18013 }
18014 else
18015 {
18016 enum insn_code icode = d->icode;
18017 if (d->name == 0)
18018 {
18019 if (TARGET_DEBUG_BUILTIN)
18020 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18021 (long unsigned)i);
18022
18023 continue;
18024 }
18025
18026 if (icode == CODE_FOR_nothing)
18027 {
18028 if (TARGET_DEBUG_BUILTIN)
18029 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18030 d->name);
18031
18032 continue;
18033 }
18034
18035 mode0 = insn_data[icode].operand[0].mode;
18036 mode1 = insn_data[icode].operand[1].mode;
18037 mode2 = insn_data[icode].operand[2].mode;
18038
18039 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18040 d->code, d->name);
18041 }
18042
18043 def_builtin (d->name, type, d->code);
18044 }
18045
18046 /* Add the simple unary operators. */
18047 d = bdesc_1arg;
18048 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18049 {
18050 machine_mode mode0, mode1;
18051 tree type;
18052 HOST_WIDE_INT mask = d->mask;
18053
18054 if ((mask & builtin_mask) != mask)
18055 {
18056 if (TARGET_DEBUG_BUILTIN)
18057 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18058 continue;
18059 }
18060
18061 if (rs6000_overloaded_builtin_p (d->code))
18062 {
18063 if (! (type = opaque_ftype_opaque))
18064 type = opaque_ftype_opaque
18065 = build_function_type_list (opaque_V4SI_type_node,
18066 opaque_V4SI_type_node,
18067 NULL_TREE);
18068 }
18069 else
18070 {
18071 enum insn_code icode = d->icode;
18072 if (d->name == 0)
18073 {
18074 if (TARGET_DEBUG_BUILTIN)
18075 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18076 (long unsigned)i);
18077
18078 continue;
18079 }
18080
18081 if (icode == CODE_FOR_nothing)
18082 {
18083 if (TARGET_DEBUG_BUILTIN)
18084 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18085 d->name);
18086
18087 continue;
18088 }
18089
18090 mode0 = insn_data[icode].operand[0].mode;
18091 mode1 = insn_data[icode].operand[1].mode;
18092
18093 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18094 d->code, d->name);
18095 }
18096
18097 def_builtin (d->name, type, d->code);
18098 }
18099
18100 /* Add the simple no-argument operators. */
18101 d = bdesc_0arg;
18102 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18103 {
18104 machine_mode mode0;
18105 tree type;
18106 HOST_WIDE_INT mask = d->mask;
18107
18108 if ((mask & builtin_mask) != mask)
18109 {
18110 if (TARGET_DEBUG_BUILTIN)
18111 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18112 continue;
18113 }
18114 if (rs6000_overloaded_builtin_p (d->code))
18115 {
18116 if (!opaque_ftype_opaque)
18117 opaque_ftype_opaque
18118 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18119 type = opaque_ftype_opaque;
18120 }
18121 else
18122 {
18123 enum insn_code icode = d->icode;
18124 if (d->name == 0)
18125 {
18126 if (TARGET_DEBUG_BUILTIN)
18127 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18128 (long unsigned) i);
18129 continue;
18130 }
18131 if (icode == CODE_FOR_nothing)
18132 {
18133 if (TARGET_DEBUG_BUILTIN)
18134 fprintf (stderr,
18135 "rs6000_builtin, skip no-argument %s (no code)\n",
18136 d->name);
18137 continue;
18138 }
18139 mode0 = insn_data[icode].operand[0].mode;
18140 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18141 d->code, d->name);
18142 }
18143 def_builtin (d->name, type, d->code);
18144 }
18145 }
18146
18147 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18148 static void
18149 init_float128_ibm (machine_mode mode)
18150 {
18151 if (!TARGET_XL_COMPAT)
18152 {
18153 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18154 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18155 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18156 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18157
18158 if (!TARGET_HARD_FLOAT)
18159 {
18160 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18161 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18162 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18163 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18164 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18165 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18166 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18167 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18168
18169 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18170 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18171 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18172 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18173 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18174 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18175 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18176 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18177 }
18178 }
18179 else
18180 {
18181 set_optab_libfunc (add_optab, mode, "_xlqadd");
18182 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18183 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18184 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18185 }
18186
18187 /* Add various conversions for IFmode to use the traditional TFmode
18188 names. */
18189 if (mode == IFmode)
18190 {
18191 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
18192 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
18193 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
18194 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
18195 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
18196 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
18197
18198 if (TARGET_POWERPC64)
18199 {
18200 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18201 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18202 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18203 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18204 }
18205 }
18206 }
18207
18208 /* Create a decl for either complex long double multiply or complex long double
18209 divide when long double is IEEE 128-bit floating point. We can't use
18210 __multc3 and __divtc3 because the original long double using IBM extended
18211 double used those names. The complex multiply/divide functions are encoded
18212 as builtin functions with a complex result and 4 scalar inputs. */
18213
18214 static void
18215 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18216 {
18217 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18218 name, NULL_TREE);
18219
18220 set_builtin_decl (fncode, fndecl, true);
18221
18222 if (TARGET_DEBUG_BUILTIN)
18223 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18224
18225 return;
18226 }
18227
18228 /* Set up IEEE 128-bit floating point routines. Use different names if the
18229 arguments can be passed in a vector register. The historical PowerPC
18230 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18231 continue to use that if we aren't using vector registers to pass IEEE
18232 128-bit floating point. */
18233
18234 static void
18235 init_float128_ieee (machine_mode mode)
18236 {
18237 if (FLOAT128_VECTOR_P (mode))
18238 {
18239 static bool complex_muldiv_init_p = false;
18240
18241 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18242 we have clone or target attributes, this will be called a second
18243 time. We want to create the built-in function only once. */
18244 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18245 {
18246 complex_muldiv_init_p = true;
18247 built_in_function fncode_mul =
18248 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18249 - MIN_MODE_COMPLEX_FLOAT);
18250 built_in_function fncode_div =
18251 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18252 - MIN_MODE_COMPLEX_FLOAT);
18253
18254 tree fntype = build_function_type_list (complex_long_double_type_node,
18255 long_double_type_node,
18256 long_double_type_node,
18257 long_double_type_node,
18258 long_double_type_node,
18259 NULL_TREE);
18260
18261 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18262 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18263 }
18264
18265 set_optab_libfunc (add_optab, mode, "__addkf3");
18266 set_optab_libfunc (sub_optab, mode, "__subkf3");
18267 set_optab_libfunc (neg_optab, mode, "__negkf2");
18268 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18269 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18270 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18271 set_optab_libfunc (abs_optab, mode, "__abskf2");
18272 set_optab_libfunc (powi_optab, mode, "__powikf2");
18273
18274 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18275 set_optab_libfunc (ne_optab, mode, "__nekf2");
18276 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18277 set_optab_libfunc (ge_optab, mode, "__gekf2");
18278 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18279 set_optab_libfunc (le_optab, mode, "__lekf2");
18280 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18281
18282 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18283 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18284 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18285 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18286
18287 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18288 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18289 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18290
18291 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18292 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18293 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18294
18295 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18296 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18297 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18298 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18299 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18300 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18301
18302 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18303 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18304 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18305 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18306
18307 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18308 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18309 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18310 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18311
18312 if (TARGET_POWERPC64)
18313 {
18314 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18315 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18316 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18317 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18318 }
18319 }
18320
18321 else
18322 {
18323 set_optab_libfunc (add_optab, mode, "_q_add");
18324 set_optab_libfunc (sub_optab, mode, "_q_sub");
18325 set_optab_libfunc (neg_optab, mode, "_q_neg");
18326 set_optab_libfunc (smul_optab, mode, "_q_mul");
18327 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18328 if (TARGET_PPC_GPOPT)
18329 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18330
18331 set_optab_libfunc (eq_optab, mode, "_q_feq");
18332 set_optab_libfunc (ne_optab, mode, "_q_fne");
18333 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18334 set_optab_libfunc (ge_optab, mode, "_q_fge");
18335 set_optab_libfunc (lt_optab, mode, "_q_flt");
18336 set_optab_libfunc (le_optab, mode, "_q_fle");
18337
18338 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18339 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18340 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18341 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18342 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18343 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18344 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18345 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18346 }
18347 }
18348
18349 static void
18350 rs6000_init_libfuncs (void)
18351 {
18352 /* __float128 support. */
18353 if (TARGET_FLOAT128_TYPE)
18354 {
18355 init_float128_ibm (IFmode);
18356 init_float128_ieee (KFmode);
18357 }
18358
18359 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18360 if (TARGET_LONG_DOUBLE_128)
18361 {
18362 if (!TARGET_IEEEQUAD)
18363 init_float128_ibm (TFmode);
18364
18365 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18366 else
18367 init_float128_ieee (TFmode);
18368 }
18369 }
18370
18371 /* Emit a potentially record-form instruction, setting DST from SRC.
18372 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18373 signed comparison of DST with zero. If DOT is 1, the generated RTL
18374 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18375 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18376 a separate COMPARE. */
18377
18378 void
18379 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18380 {
18381 if (dot == 0)
18382 {
18383 emit_move_insn (dst, src);
18384 return;
18385 }
18386
18387 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18388 {
18389 emit_move_insn (dst, src);
18390 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18391 return;
18392 }
18393
18394 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18395 if (dot == 1)
18396 {
18397 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18398 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18399 }
18400 else
18401 {
18402 rtx set = gen_rtx_SET (dst, src);
18403 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18404 }
18405 }
18406
18407 \f
18408 /* A validation routine: say whether CODE, a condition code, and MODE
18409 match. The other alternatives either don't make sense or should
18410 never be generated. */
18411
18412 void
18413 validate_condition_mode (enum rtx_code code, machine_mode mode)
18414 {
18415 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18416 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18417 && GET_MODE_CLASS (mode) == MODE_CC);
18418
18419 /* These don't make sense. */
18420 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18421 || mode != CCUNSmode);
18422
18423 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18424 || mode == CCUNSmode);
18425
18426 gcc_assert (mode == CCFPmode
18427 || (code != ORDERED && code != UNORDERED
18428 && code != UNEQ && code != LTGT
18429 && code != UNGT && code != UNLT
18430 && code != UNGE && code != UNLE));
18431
18432 /* These should never be generated except for
18433 flag_finite_math_only. */
18434 gcc_assert (mode != CCFPmode
18435 || flag_finite_math_only
18436 || (code != LE && code != GE
18437 && code != UNEQ && code != LTGT
18438 && code != UNGT && code != UNLT));
18439
18440 /* These are invalid; the information is not there. */
18441 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18442 }
18443
18444 \f
18445 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18446 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18447 not zero, store there the bit offset (counted from the right) where
18448 the single stretch of 1 bits begins; and similarly for B, the bit
18449 offset where it ends. */
18450
18451 bool
18452 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18453 {
18454 unsigned HOST_WIDE_INT val = INTVAL (mask);
18455 unsigned HOST_WIDE_INT bit;
18456 int nb, ne;
18457 int n = GET_MODE_PRECISION (mode);
18458
18459 if (mode != DImode && mode != SImode)
18460 return false;
18461
18462 if (INTVAL (mask) >= 0)
18463 {
18464 bit = val & -val;
18465 ne = exact_log2 (bit);
18466 nb = exact_log2 (val + bit);
18467 }
18468 else if (val + 1 == 0)
18469 {
18470 nb = n;
18471 ne = 0;
18472 }
18473 else if (val & 1)
18474 {
18475 val = ~val;
18476 bit = val & -val;
18477 nb = exact_log2 (bit);
18478 ne = exact_log2 (val + bit);
18479 }
18480 else
18481 {
18482 bit = val & -val;
18483 ne = exact_log2 (bit);
18484 if (val + bit == 0)
18485 nb = n;
18486 else
18487 nb = 0;
18488 }
18489
18490 nb--;
18491
18492 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18493 return false;
18494
18495 if (b)
18496 *b = nb;
18497 if (e)
18498 *e = ne;
18499
18500 return true;
18501 }
18502
18503 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18504 or rldicr instruction, to implement an AND with it in mode MODE. */
18505
18506 bool
18507 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18508 {
18509 int nb, ne;
18510
18511 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18512 return false;
18513
18514 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18515 does not wrap. */
18516 if (mode == DImode)
18517 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18518
18519 /* For SImode, rlwinm can do everything. */
18520 if (mode == SImode)
18521 return (nb < 32 && ne < 32);
18522
18523 return false;
18524 }
18525
18526 /* Return the instruction template for an AND with mask in mode MODE, with
18527 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18528
18529 const char *
18530 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18531 {
18532 int nb, ne;
18533
18534 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18535 gcc_unreachable ();
18536
18537 if (mode == DImode && ne == 0)
18538 {
18539 operands[3] = GEN_INT (63 - nb);
18540 if (dot)
18541 return "rldicl. %0,%1,0,%3";
18542 return "rldicl %0,%1,0,%3";
18543 }
18544
18545 if (mode == DImode && nb == 63)
18546 {
18547 operands[3] = GEN_INT (63 - ne);
18548 if (dot)
18549 return "rldicr. %0,%1,0,%3";
18550 return "rldicr %0,%1,0,%3";
18551 }
18552
18553 if (nb < 32 && ne < 32)
18554 {
18555 operands[3] = GEN_INT (31 - nb);
18556 operands[4] = GEN_INT (31 - ne);
18557 if (dot)
18558 return "rlwinm. %0,%1,0,%3,%4";
18559 return "rlwinm %0,%1,0,%3,%4";
18560 }
18561
18562 gcc_unreachable ();
18563 }
18564
18565 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18566 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18567 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18568
18569 bool
18570 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18571 {
18572 int nb, ne;
18573
18574 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18575 return false;
18576
18577 int n = GET_MODE_PRECISION (mode);
18578 int sh = -1;
18579
18580 if (CONST_INT_P (XEXP (shift, 1)))
18581 {
18582 sh = INTVAL (XEXP (shift, 1));
18583 if (sh < 0 || sh >= n)
18584 return false;
18585 }
18586
18587 rtx_code code = GET_CODE (shift);
18588
18589 /* Convert any shift by 0 to a rotate, to simplify below code. */
18590 if (sh == 0)
18591 code = ROTATE;
18592
18593 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18594 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18595 code = ASHIFT;
18596 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18597 {
18598 code = LSHIFTRT;
18599 sh = n - sh;
18600 }
18601
18602 /* DImode rotates need rld*. */
18603 if (mode == DImode && code == ROTATE)
18604 return (nb == 63 || ne == 0 || ne == sh);
18605
18606 /* SImode rotates need rlw*. */
18607 if (mode == SImode && code == ROTATE)
18608 return (nb < 32 && ne < 32 && sh < 32);
18609
18610 /* Wrap-around masks are only okay for rotates. */
18611 if (ne > nb)
18612 return false;
18613
18614 /* Variable shifts are only okay for rotates. */
18615 if (sh < 0)
18616 return false;
18617
18618 /* Don't allow ASHIFT if the mask is wrong for that. */
18619 if (code == ASHIFT && ne < sh)
18620 return false;
18621
18622 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18623 if the mask is wrong for that. */
18624 if (nb < 32 && ne < 32 && sh < 32
18625 && !(code == LSHIFTRT && nb >= 32 - sh))
18626 return true;
18627
18628 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18629 if the mask is wrong for that. */
18630 if (code == LSHIFTRT)
18631 sh = 64 - sh;
18632 if (nb == 63 || ne == 0 || ne == sh)
18633 return !(code == LSHIFTRT && nb >= sh);
18634
18635 return false;
18636 }
18637
18638 /* Return the instruction template for a shift with mask in mode MODE, with
18639 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18640
18641 const char *
18642 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18643 {
18644 int nb, ne;
18645
18646 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18647 gcc_unreachable ();
18648
18649 if (mode == DImode && ne == 0)
18650 {
18651 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18652 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18653 operands[3] = GEN_INT (63 - nb);
18654 if (dot)
18655 return "rld%I2cl. %0,%1,%2,%3";
18656 return "rld%I2cl %0,%1,%2,%3";
18657 }
18658
18659 if (mode == DImode && nb == 63)
18660 {
18661 operands[3] = GEN_INT (63 - ne);
18662 if (dot)
18663 return "rld%I2cr. %0,%1,%2,%3";
18664 return "rld%I2cr %0,%1,%2,%3";
18665 }
18666
18667 if (mode == DImode
18668 && GET_CODE (operands[4]) != LSHIFTRT
18669 && CONST_INT_P (operands[2])
18670 && ne == INTVAL (operands[2]))
18671 {
18672 operands[3] = GEN_INT (63 - nb);
18673 if (dot)
18674 return "rld%I2c. %0,%1,%2,%3";
18675 return "rld%I2c %0,%1,%2,%3";
18676 }
18677
18678 if (nb < 32 && ne < 32)
18679 {
18680 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18681 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18682 operands[3] = GEN_INT (31 - nb);
18683 operands[4] = GEN_INT (31 - ne);
18684 /* This insn can also be a 64-bit rotate with mask that really makes
18685 it just a shift right (with mask); the %h below are to adjust for
18686 that situation (shift count is >= 32 in that case). */
18687 if (dot)
18688 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18689 return "rlw%I2nm %0,%1,%h2,%3,%4";
18690 }
18691
18692 gcc_unreachable ();
18693 }
18694
18695 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18696 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18697 ASHIFT, or LSHIFTRT) in mode MODE. */
18698
18699 bool
18700 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18701 {
18702 int nb, ne;
18703
18704 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18705 return false;
18706
18707 int n = GET_MODE_PRECISION (mode);
18708
18709 int sh = INTVAL (XEXP (shift, 1));
18710 if (sh < 0 || sh >= n)
18711 return false;
18712
18713 rtx_code code = GET_CODE (shift);
18714
18715 /* Convert any shift by 0 to a rotate, to simplify below code. */
18716 if (sh == 0)
18717 code = ROTATE;
18718
18719 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18720 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18721 code = ASHIFT;
18722 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18723 {
18724 code = LSHIFTRT;
18725 sh = n - sh;
18726 }
18727
18728 /* DImode rotates need rldimi. */
18729 if (mode == DImode && code == ROTATE)
18730 return (ne == sh);
18731
18732 /* SImode rotates need rlwimi. */
18733 if (mode == SImode && code == ROTATE)
18734 return (nb < 32 && ne < 32 && sh < 32);
18735
18736 /* Wrap-around masks are only okay for rotates. */
18737 if (ne > nb)
18738 return false;
18739
18740 /* Don't allow ASHIFT if the mask is wrong for that. */
18741 if (code == ASHIFT && ne < sh)
18742 return false;
18743
18744 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18745 if the mask is wrong for that. */
18746 if (nb < 32 && ne < 32 && sh < 32
18747 && !(code == LSHIFTRT && nb >= 32 - sh))
18748 return true;
18749
18750 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18751 if the mask is wrong for that. */
18752 if (code == LSHIFTRT)
18753 sh = 64 - sh;
18754 if (ne == sh)
18755 return !(code == LSHIFTRT && nb >= sh);
18756
18757 return false;
18758 }
18759
18760 /* Return the instruction template for an insert with mask in mode MODE, with
18761 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18762
18763 const char *
18764 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18765 {
18766 int nb, ne;
18767
18768 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18769 gcc_unreachable ();
18770
18771 /* Prefer rldimi because rlwimi is cracked. */
18772 if (TARGET_POWERPC64
18773 && (!dot || mode == DImode)
18774 && GET_CODE (operands[4]) != LSHIFTRT
18775 && ne == INTVAL (operands[2]))
18776 {
18777 operands[3] = GEN_INT (63 - nb);
18778 if (dot)
18779 return "rldimi. %0,%1,%2,%3";
18780 return "rldimi %0,%1,%2,%3";
18781 }
18782
18783 if (nb < 32 && ne < 32)
18784 {
18785 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18786 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18787 operands[3] = GEN_INT (31 - nb);
18788 operands[4] = GEN_INT (31 - ne);
18789 if (dot)
18790 return "rlwimi. %0,%1,%2,%3,%4";
18791 return "rlwimi %0,%1,%2,%3,%4";
18792 }
18793
18794 gcc_unreachable ();
18795 }
18796
18797 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18798 using two machine instructions. */
18799
18800 bool
18801 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18802 {
18803 /* There are two kinds of AND we can handle with two insns:
18804 1) those we can do with two rl* insn;
18805 2) ori[s];xori[s].
18806
18807 We do not handle that last case yet. */
18808
18809 /* If there is just one stretch of ones, we can do it. */
18810 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18811 return true;
18812
18813 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18814 one insn, we can do the whole thing with two. */
18815 unsigned HOST_WIDE_INT val = INTVAL (c);
18816 unsigned HOST_WIDE_INT bit1 = val & -val;
18817 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18818 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18819 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18820 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18821 }
18822
18823 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18824 If EXPAND is true, split rotate-and-mask instructions we generate to
18825 their constituent parts as well (this is used during expand); if DOT
18826 is 1, make the last insn a record-form instruction clobbering the
18827 destination GPR and setting the CC reg (from operands[3]); if 2, set
18828 that GPR as well as the CC reg. */
18829
18830 void
18831 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18832 {
18833 gcc_assert (!(expand && dot));
18834
18835 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18836
18837 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18838 shift right. This generates better code than doing the masks without
18839 shifts, or shifting first right and then left. */
18840 int nb, ne;
18841 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18842 {
18843 gcc_assert (mode == DImode);
18844
18845 int shift = 63 - nb;
18846 if (expand)
18847 {
18848 rtx tmp1 = gen_reg_rtx (DImode);
18849 rtx tmp2 = gen_reg_rtx (DImode);
18850 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18851 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18852 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18853 }
18854 else
18855 {
18856 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18857 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18858 emit_move_insn (operands[0], tmp);
18859 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18860 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18861 }
18862 return;
18863 }
18864
18865 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18866 that does the rest. */
18867 unsigned HOST_WIDE_INT bit1 = val & -val;
18868 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18869 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18870 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18871
18872 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18873 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18874
18875 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18876
18877 /* Two "no-rotate"-and-mask instructions, for SImode. */
18878 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18879 {
18880 gcc_assert (mode == SImode);
18881
18882 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18883 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18884 emit_move_insn (reg, tmp);
18885 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18886 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18887 return;
18888 }
18889
18890 gcc_assert (mode == DImode);
18891
18892 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18893 insns; we have to do the first in SImode, because it wraps. */
18894 if (mask2 <= 0xffffffff
18895 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18896 {
18897 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18898 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18899 GEN_INT (mask1));
18900 rtx reg_low = gen_lowpart (SImode, reg);
18901 emit_move_insn (reg_low, tmp);
18902 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18903 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18904 return;
18905 }
18906
18907 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18908 at the top end), rotate back and clear the other hole. */
18909 int right = exact_log2 (bit3);
18910 int left = 64 - right;
18911
18912 /* Rotate the mask too. */
18913 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18914
18915 if (expand)
18916 {
18917 rtx tmp1 = gen_reg_rtx (DImode);
18918 rtx tmp2 = gen_reg_rtx (DImode);
18919 rtx tmp3 = gen_reg_rtx (DImode);
18920 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18921 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18922 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18923 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18924 }
18925 else
18926 {
18927 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18928 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18929 emit_move_insn (operands[0], tmp);
18930 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18931 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18932 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18933 }
18934 }
18935 \f
18936 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18937 for lfq and stfq insns iff the registers are hard registers. */
18938
18939 int
18940 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18941 {
18942 /* We might have been passed a SUBREG. */
18943 if (!REG_P (reg1) || !REG_P (reg2))
18944 return 0;
18945
18946 /* We might have been passed non floating point registers. */
18947 if (!FP_REGNO_P (REGNO (reg1))
18948 || !FP_REGNO_P (REGNO (reg2)))
18949 return 0;
18950
18951 return (REGNO (reg1) == REGNO (reg2) - 1);
18952 }
18953
18954 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18955 addr1 and addr2 must be in consecutive memory locations
18956 (addr2 == addr1 + 8). */
18957
18958 int
18959 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18960 {
18961 rtx addr1, addr2;
18962 unsigned int reg1, reg2;
18963 int offset1, offset2;
18964
18965 /* The mems cannot be volatile. */
18966 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18967 return 0;
18968
18969 addr1 = XEXP (mem1, 0);
18970 addr2 = XEXP (mem2, 0);
18971
18972 /* Extract an offset (if used) from the first addr. */
18973 if (GET_CODE (addr1) == PLUS)
18974 {
18975 /* If not a REG, return zero. */
18976 if (!REG_P (XEXP (addr1, 0)))
18977 return 0;
18978 else
18979 {
18980 reg1 = REGNO (XEXP (addr1, 0));
18981 /* The offset must be constant! */
18982 if (!CONST_INT_P (XEXP (addr1, 1)))
18983 return 0;
18984 offset1 = INTVAL (XEXP (addr1, 1));
18985 }
18986 }
18987 else if (!REG_P (addr1))
18988 return 0;
18989 else
18990 {
18991 reg1 = REGNO (addr1);
18992 /* This was a simple (mem (reg)) expression. Offset is 0. */
18993 offset1 = 0;
18994 }
18995
18996 /* And now for the second addr. */
18997 if (GET_CODE (addr2) == PLUS)
18998 {
18999 /* If not a REG, return zero. */
19000 if (!REG_P (XEXP (addr2, 0)))
19001 return 0;
19002 else
19003 {
19004 reg2 = REGNO (XEXP (addr2, 0));
19005 /* The offset must be constant. */
19006 if (!CONST_INT_P (XEXP (addr2, 1)))
19007 return 0;
19008 offset2 = INTVAL (XEXP (addr2, 1));
19009 }
19010 }
19011 else if (!REG_P (addr2))
19012 return 0;
19013 else
19014 {
19015 reg2 = REGNO (addr2);
19016 /* This was a simple (mem (reg)) expression. Offset is 0. */
19017 offset2 = 0;
19018 }
19019
19020 /* Both of these must have the same base register. */
19021 if (reg1 != reg2)
19022 return 0;
19023
19024 /* The offset for the second addr must be 8 more than the first addr. */
19025 if (offset2 != offset1 + 8)
19026 return 0;
19027
19028 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19029 instructions. */
19030 return 1;
19031 }
19032 \f
19033 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19034 need to use DDmode, in all other cases we can use the same mode. */
19035 static machine_mode
19036 rs6000_secondary_memory_needed_mode (machine_mode mode)
19037 {
19038 if (lra_in_progress && mode == SDmode)
19039 return DDmode;
19040 return mode;
19041 }
19042
19043 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19044 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19045 only work on the traditional altivec registers, note if an altivec register
19046 was chosen. */
19047
19048 static enum rs6000_reg_type
19049 register_to_reg_type (rtx reg, bool *is_altivec)
19050 {
19051 HOST_WIDE_INT regno;
19052 enum reg_class rclass;
19053
19054 if (SUBREG_P (reg))
19055 reg = SUBREG_REG (reg);
19056
19057 if (!REG_P (reg))
19058 return NO_REG_TYPE;
19059
19060 regno = REGNO (reg);
19061 if (!HARD_REGISTER_NUM_P (regno))
19062 {
19063 if (!lra_in_progress && !reload_completed)
19064 return PSEUDO_REG_TYPE;
19065
19066 regno = true_regnum (reg);
19067 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
19068 return PSEUDO_REG_TYPE;
19069 }
19070
19071 gcc_assert (regno >= 0);
19072
19073 if (is_altivec && ALTIVEC_REGNO_P (regno))
19074 *is_altivec = true;
19075
19076 rclass = rs6000_regno_regclass[regno];
19077 return reg_class_to_reg_type[(int)rclass];
19078 }
19079
19080 /* Helper function to return the cost of adding a TOC entry address. */
19081
19082 static inline int
19083 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19084 {
19085 int ret;
19086
19087 if (TARGET_CMODEL != CMODEL_SMALL)
19088 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19089
19090 else
19091 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19092
19093 return ret;
19094 }
19095
19096 /* Helper function for rs6000_secondary_reload to determine whether the memory
19097 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19098 needs reloading. Return negative if the memory is not handled by the memory
19099 helper functions and to try a different reload method, 0 if no additional
19100 instructions are need, and positive to give the extra cost for the
19101 memory. */
19102
19103 static int
19104 rs6000_secondary_reload_memory (rtx addr,
19105 enum reg_class rclass,
19106 machine_mode mode)
19107 {
19108 int extra_cost = 0;
19109 rtx reg, and_arg, plus_arg0, plus_arg1;
19110 addr_mask_type addr_mask;
19111 const char *type = NULL;
19112 const char *fail_msg = NULL;
19113
19114 if (GPR_REG_CLASS_P (rclass))
19115 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19116
19117 else if (rclass == FLOAT_REGS)
19118 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19119
19120 else if (rclass == ALTIVEC_REGS)
19121 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19122
19123 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19124 else if (rclass == VSX_REGS)
19125 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19126 & ~RELOAD_REG_AND_M16);
19127
19128 /* If the register allocator hasn't made up its mind yet on the register
19129 class to use, settle on defaults to use. */
19130 else if (rclass == NO_REGS)
19131 {
19132 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19133 & ~RELOAD_REG_AND_M16);
19134
19135 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19136 addr_mask &= ~(RELOAD_REG_INDEXED
19137 | RELOAD_REG_PRE_INCDEC
19138 | RELOAD_REG_PRE_MODIFY);
19139 }
19140
19141 else
19142 addr_mask = 0;
19143
19144 /* If the register isn't valid in this register class, just return now. */
19145 if ((addr_mask & RELOAD_REG_VALID) == 0)
19146 {
19147 if (TARGET_DEBUG_ADDR)
19148 {
19149 fprintf (stderr,
19150 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19151 "not valid in class\n",
19152 GET_MODE_NAME (mode), reg_class_names[rclass]);
19153 debug_rtx (addr);
19154 }
19155
19156 return -1;
19157 }
19158
19159 switch (GET_CODE (addr))
19160 {
19161 /* Does the register class supports auto update forms for this mode? We
19162 don't need a scratch register, since the powerpc only supports
19163 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19164 case PRE_INC:
19165 case PRE_DEC:
19166 reg = XEXP (addr, 0);
19167 if (!base_reg_operand (addr, GET_MODE (reg)))
19168 {
19169 fail_msg = "no base register #1";
19170 extra_cost = -1;
19171 }
19172
19173 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19174 {
19175 extra_cost = 1;
19176 type = "update";
19177 }
19178 break;
19179
19180 case PRE_MODIFY:
19181 reg = XEXP (addr, 0);
19182 plus_arg1 = XEXP (addr, 1);
19183 if (!base_reg_operand (reg, GET_MODE (reg))
19184 || GET_CODE (plus_arg1) != PLUS
19185 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19186 {
19187 fail_msg = "bad PRE_MODIFY";
19188 extra_cost = -1;
19189 }
19190
19191 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19192 {
19193 extra_cost = 1;
19194 type = "update";
19195 }
19196 break;
19197
19198 /* Do we need to simulate AND -16 to clear the bottom address bits used
19199 in VMX load/stores? Only allow the AND for vector sizes. */
19200 case AND:
19201 and_arg = XEXP (addr, 0);
19202 if (GET_MODE_SIZE (mode) != 16
19203 || !CONST_INT_P (XEXP (addr, 1))
19204 || INTVAL (XEXP (addr, 1)) != -16)
19205 {
19206 fail_msg = "bad Altivec AND #1";
19207 extra_cost = -1;
19208 }
19209
19210 if (rclass != ALTIVEC_REGS)
19211 {
19212 if (legitimate_indirect_address_p (and_arg, false))
19213 extra_cost = 1;
19214
19215 else if (legitimate_indexed_address_p (and_arg, false))
19216 extra_cost = 2;
19217
19218 else
19219 {
19220 fail_msg = "bad Altivec AND #2";
19221 extra_cost = -1;
19222 }
19223
19224 type = "and";
19225 }
19226 break;
19227
19228 /* If this is an indirect address, make sure it is a base register. */
19229 case REG:
19230 case SUBREG:
19231 if (!legitimate_indirect_address_p (addr, false))
19232 {
19233 extra_cost = 1;
19234 type = "move";
19235 }
19236 break;
19237
19238 /* If this is an indexed address, make sure the register class can handle
19239 indexed addresses for this mode. */
19240 case PLUS:
19241 plus_arg0 = XEXP (addr, 0);
19242 plus_arg1 = XEXP (addr, 1);
19243
19244 /* (plus (plus (reg) (constant)) (constant)) is generated during
19245 push_reload processing, so handle it now. */
19246 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19247 {
19248 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19249 {
19250 extra_cost = 1;
19251 type = "offset";
19252 }
19253 }
19254
19255 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19256 push_reload processing, so handle it now. */
19257 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19258 {
19259 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19260 {
19261 extra_cost = 1;
19262 type = "indexed #2";
19263 }
19264 }
19265
19266 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19267 {
19268 fail_msg = "no base register #2";
19269 extra_cost = -1;
19270 }
19271
19272 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19273 {
19274 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19275 || !legitimate_indexed_address_p (addr, false))
19276 {
19277 extra_cost = 1;
19278 type = "indexed";
19279 }
19280 }
19281
19282 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19283 && CONST_INT_P (plus_arg1))
19284 {
19285 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19286 {
19287 extra_cost = 1;
19288 type = "vector d-form offset";
19289 }
19290 }
19291
19292 /* Make sure the register class can handle offset addresses. */
19293 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19294 {
19295 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19296 {
19297 extra_cost = 1;
19298 type = "offset #2";
19299 }
19300 }
19301
19302 else
19303 {
19304 fail_msg = "bad PLUS";
19305 extra_cost = -1;
19306 }
19307
19308 break;
19309
19310 case LO_SUM:
19311 /* Quad offsets are restricted and can't handle normal addresses. */
19312 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19313 {
19314 extra_cost = -1;
19315 type = "vector d-form lo_sum";
19316 }
19317
19318 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19319 {
19320 fail_msg = "bad LO_SUM";
19321 extra_cost = -1;
19322 }
19323
19324 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19325 {
19326 extra_cost = 1;
19327 type = "lo_sum";
19328 }
19329 break;
19330
19331 /* Static addresses need to create a TOC entry. */
19332 case CONST:
19333 case SYMBOL_REF:
19334 case LABEL_REF:
19335 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19336 {
19337 extra_cost = -1;
19338 type = "vector d-form lo_sum #2";
19339 }
19340
19341 else
19342 {
19343 type = "address";
19344 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19345 }
19346 break;
19347
19348 /* TOC references look like offsetable memory. */
19349 case UNSPEC:
19350 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19351 {
19352 fail_msg = "bad UNSPEC";
19353 extra_cost = -1;
19354 }
19355
19356 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19357 {
19358 extra_cost = -1;
19359 type = "vector d-form lo_sum #3";
19360 }
19361
19362 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19363 {
19364 extra_cost = 1;
19365 type = "toc reference";
19366 }
19367 break;
19368
19369 default:
19370 {
19371 fail_msg = "bad address";
19372 extra_cost = -1;
19373 }
19374 }
19375
19376 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19377 {
19378 if (extra_cost < 0)
19379 fprintf (stderr,
19380 "rs6000_secondary_reload_memory error: mode = %s, "
19381 "class = %s, addr_mask = '%s', %s\n",
19382 GET_MODE_NAME (mode),
19383 reg_class_names[rclass],
19384 rs6000_debug_addr_mask (addr_mask, false),
19385 (fail_msg != NULL) ? fail_msg : "<bad address>");
19386
19387 else
19388 fprintf (stderr,
19389 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19390 "addr_mask = '%s', extra cost = %d, %s\n",
19391 GET_MODE_NAME (mode),
19392 reg_class_names[rclass],
19393 rs6000_debug_addr_mask (addr_mask, false),
19394 extra_cost,
19395 (type) ? type : "<none>");
19396
19397 debug_rtx (addr);
19398 }
19399
19400 return extra_cost;
19401 }
19402
19403 /* Helper function for rs6000_secondary_reload to return true if a move to a
19404 different register classe is really a simple move. */
19405
19406 static bool
19407 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19408 enum rs6000_reg_type from_type,
19409 machine_mode mode)
19410 {
19411 int size = GET_MODE_SIZE (mode);
19412
19413 /* Add support for various direct moves available. In this function, we only
19414 look at cases where we don't need any extra registers, and one or more
19415 simple move insns are issued. Originally small integers are not allowed
19416 in FPR/VSX registers. Single precision binary floating is not a simple
19417 move because we need to convert to the single precision memory layout.
19418 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19419 need special direct move handling, which we do not support yet. */
19420 if (TARGET_DIRECT_MOVE
19421 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19422 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19423 {
19424 if (TARGET_POWERPC64)
19425 {
19426 /* ISA 2.07: MTVSRD or MVFVSRD. */
19427 if (size == 8)
19428 return true;
19429
19430 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19431 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19432 return true;
19433 }
19434
19435 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19436 if (TARGET_P8_VECTOR)
19437 {
19438 if (mode == SImode)
19439 return true;
19440
19441 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19442 return true;
19443 }
19444
19445 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19446 if (mode == SDmode)
19447 return true;
19448 }
19449
19450 /* Power6+: MFTGPR or MFFGPR. */
19451 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19452 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19453 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19454 return true;
19455
19456 /* Move to/from SPR. */
19457 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19458 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19459 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19460 return true;
19461
19462 return false;
19463 }
19464
19465 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19466 special direct moves that involve allocating an extra register, return the
19467 insn code of the helper function if there is such a function or
19468 CODE_FOR_nothing if not. */
19469
19470 static bool
19471 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19472 enum rs6000_reg_type from_type,
19473 machine_mode mode,
19474 secondary_reload_info *sri,
19475 bool altivec_p)
19476 {
19477 bool ret = false;
19478 enum insn_code icode = CODE_FOR_nothing;
19479 int cost = 0;
19480 int size = GET_MODE_SIZE (mode);
19481
19482 if (TARGET_POWERPC64 && size == 16)
19483 {
19484 /* Handle moving 128-bit values from GPRs to VSX point registers on
19485 ISA 2.07 (power8, power9) when running in 64-bit mode using
19486 XXPERMDI to glue the two 64-bit values back together. */
19487 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19488 {
19489 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19490 icode = reg_addr[mode].reload_vsx_gpr;
19491 }
19492
19493 /* Handle moving 128-bit values from VSX point registers to GPRs on
19494 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19495 bottom 64-bit value. */
19496 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19497 {
19498 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19499 icode = reg_addr[mode].reload_gpr_vsx;
19500 }
19501 }
19502
19503 else if (TARGET_POWERPC64 && mode == SFmode)
19504 {
19505 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19506 {
19507 cost = 3; /* xscvdpspn, mfvsrd, and. */
19508 icode = reg_addr[mode].reload_gpr_vsx;
19509 }
19510
19511 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19512 {
19513 cost = 2; /* mtvsrz, xscvspdpn. */
19514 icode = reg_addr[mode].reload_vsx_gpr;
19515 }
19516 }
19517
19518 else if (!TARGET_POWERPC64 && size == 8)
19519 {
19520 /* Handle moving 64-bit values from GPRs to floating point registers on
19521 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19522 32-bit values back together. Altivec register classes must be handled
19523 specially since a different instruction is used, and the secondary
19524 reload support requires a single instruction class in the scratch
19525 register constraint. However, right now TFmode is not allowed in
19526 Altivec registers, so the pattern will never match. */
19527 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19528 {
19529 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19530 icode = reg_addr[mode].reload_fpr_gpr;
19531 }
19532 }
19533
19534 if (icode != CODE_FOR_nothing)
19535 {
19536 ret = true;
19537 if (sri)
19538 {
19539 sri->icode = icode;
19540 sri->extra_cost = cost;
19541 }
19542 }
19543
19544 return ret;
19545 }
19546
19547 /* Return whether a move between two register classes can be done either
19548 directly (simple move) or via a pattern that uses a single extra temporary
19549 (using ISA 2.07's direct move in this case. */
19550
19551 static bool
19552 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19553 enum rs6000_reg_type from_type,
19554 machine_mode mode,
19555 secondary_reload_info *sri,
19556 bool altivec_p)
19557 {
19558 /* Fall back to load/store reloads if either type is not a register. */
19559 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19560 return false;
19561
19562 /* If we haven't allocated registers yet, assume the move can be done for the
19563 standard register types. */
19564 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19565 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19566 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19567 return true;
19568
19569 /* Moves to the same set of registers is a simple move for non-specialized
19570 registers. */
19571 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19572 return true;
19573
19574 /* Check whether a simple move can be done directly. */
19575 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19576 {
19577 if (sri)
19578 {
19579 sri->icode = CODE_FOR_nothing;
19580 sri->extra_cost = 0;
19581 }
19582 return true;
19583 }
19584
19585 /* Now check if we can do it in a few steps. */
19586 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19587 altivec_p);
19588 }
19589
19590 /* Inform reload about cases where moving X with a mode MODE to a register in
19591 RCLASS requires an extra scratch or immediate register. Return the class
19592 needed for the immediate register.
19593
19594 For VSX and Altivec, we may need a register to convert sp+offset into
19595 reg+sp.
19596
19597 For misaligned 64-bit gpr loads and stores we need a register to
19598 convert an offset address to indirect. */
19599
19600 static reg_class_t
19601 rs6000_secondary_reload (bool in_p,
19602 rtx x,
19603 reg_class_t rclass_i,
19604 machine_mode mode,
19605 secondary_reload_info *sri)
19606 {
19607 enum reg_class rclass = (enum reg_class) rclass_i;
19608 reg_class_t ret = ALL_REGS;
19609 enum insn_code icode;
19610 bool default_p = false;
19611 bool done_p = false;
19612
19613 /* Allow subreg of memory before/during reload. */
19614 bool memory_p = (MEM_P (x)
19615 || (!reload_completed && SUBREG_P (x)
19616 && MEM_P (SUBREG_REG (x))));
19617
19618 sri->icode = CODE_FOR_nothing;
19619 sri->t_icode = CODE_FOR_nothing;
19620 sri->extra_cost = 0;
19621 icode = ((in_p)
19622 ? reg_addr[mode].reload_load
19623 : reg_addr[mode].reload_store);
19624
19625 if (REG_P (x) || register_operand (x, mode))
19626 {
19627 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19628 bool altivec_p = (rclass == ALTIVEC_REGS);
19629 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19630
19631 if (!in_p)
19632 std::swap (to_type, from_type);
19633
19634 /* Can we do a direct move of some sort? */
19635 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19636 altivec_p))
19637 {
19638 icode = (enum insn_code)sri->icode;
19639 default_p = false;
19640 done_p = true;
19641 ret = NO_REGS;
19642 }
19643 }
19644
19645 /* Make sure 0.0 is not reloaded or forced into memory. */
19646 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19647 {
19648 ret = NO_REGS;
19649 default_p = false;
19650 done_p = true;
19651 }
19652
19653 /* If this is a scalar floating point value and we want to load it into the
19654 traditional Altivec registers, do it via a move via a traditional floating
19655 point register, unless we have D-form addressing. Also make sure that
19656 non-zero constants use a FPR. */
19657 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19658 && !mode_supports_vmx_dform (mode)
19659 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19660 && (memory_p || CONST_DOUBLE_P (x)))
19661 {
19662 ret = FLOAT_REGS;
19663 default_p = false;
19664 done_p = true;
19665 }
19666
19667 /* Handle reload of load/stores if we have reload helper functions. */
19668 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19669 {
19670 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19671 mode);
19672
19673 if (extra_cost >= 0)
19674 {
19675 done_p = true;
19676 ret = NO_REGS;
19677 if (extra_cost > 0)
19678 {
19679 sri->extra_cost = extra_cost;
19680 sri->icode = icode;
19681 }
19682 }
19683 }
19684
19685 /* Handle unaligned loads and stores of integer registers. */
19686 if (!done_p && TARGET_POWERPC64
19687 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19688 && memory_p
19689 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19690 {
19691 rtx addr = XEXP (x, 0);
19692 rtx off = address_offset (addr);
19693
19694 if (off != NULL_RTX)
19695 {
19696 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19697 unsigned HOST_WIDE_INT offset = INTVAL (off);
19698
19699 /* We need a secondary reload when our legitimate_address_p
19700 says the address is good (as otherwise the entire address
19701 will be reloaded), and the offset is not a multiple of
19702 four or we have an address wrap. Address wrap will only
19703 occur for LO_SUMs since legitimate_offset_address_p
19704 rejects addresses for 16-byte mems that will wrap. */
19705 if (GET_CODE (addr) == LO_SUM
19706 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19707 && ((offset & 3) != 0
19708 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19709 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19710 && (offset & 3) != 0))
19711 {
19712 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19713 if (in_p)
19714 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19715 : CODE_FOR_reload_di_load);
19716 else
19717 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19718 : CODE_FOR_reload_di_store);
19719 sri->extra_cost = 2;
19720 ret = NO_REGS;
19721 done_p = true;
19722 }
19723 else
19724 default_p = true;
19725 }
19726 else
19727 default_p = true;
19728 }
19729
19730 if (!done_p && !TARGET_POWERPC64
19731 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19732 && memory_p
19733 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19734 {
19735 rtx addr = XEXP (x, 0);
19736 rtx off = address_offset (addr);
19737
19738 if (off != NULL_RTX)
19739 {
19740 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19741 unsigned HOST_WIDE_INT offset = INTVAL (off);
19742
19743 /* We need a secondary reload when our legitimate_address_p
19744 says the address is good (as otherwise the entire address
19745 will be reloaded), and we have a wrap.
19746
19747 legitimate_lo_sum_address_p allows LO_SUM addresses to
19748 have any offset so test for wrap in the low 16 bits.
19749
19750 legitimate_offset_address_p checks for the range
19751 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19752 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19753 [0x7ff4,0x7fff] respectively, so test for the
19754 intersection of these ranges, [0x7ffc,0x7fff] and
19755 [0x7ff4,0x7ff7] respectively.
19756
19757 Note that the address we see here may have been
19758 manipulated by legitimize_reload_address. */
19759 if (GET_CODE (addr) == LO_SUM
19760 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19761 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19762 {
19763 if (in_p)
19764 sri->icode = CODE_FOR_reload_si_load;
19765 else
19766 sri->icode = CODE_FOR_reload_si_store;
19767 sri->extra_cost = 2;
19768 ret = NO_REGS;
19769 done_p = true;
19770 }
19771 else
19772 default_p = true;
19773 }
19774 else
19775 default_p = true;
19776 }
19777
19778 if (!done_p)
19779 default_p = true;
19780
19781 if (default_p)
19782 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19783
19784 gcc_assert (ret != ALL_REGS);
19785
19786 if (TARGET_DEBUG_ADDR)
19787 {
19788 fprintf (stderr,
19789 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19790 "mode = %s",
19791 reg_class_names[ret],
19792 in_p ? "true" : "false",
19793 reg_class_names[rclass],
19794 GET_MODE_NAME (mode));
19795
19796 if (reload_completed)
19797 fputs (", after reload", stderr);
19798
19799 if (!done_p)
19800 fputs (", done_p not set", stderr);
19801
19802 if (default_p)
19803 fputs (", default secondary reload", stderr);
19804
19805 if (sri->icode != CODE_FOR_nothing)
19806 fprintf (stderr, ", reload func = %s, extra cost = %d",
19807 insn_data[sri->icode].name, sri->extra_cost);
19808
19809 else if (sri->extra_cost > 0)
19810 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19811
19812 fputs ("\n", stderr);
19813 debug_rtx (x);
19814 }
19815
19816 return ret;
19817 }
19818
19819 /* Better tracing for rs6000_secondary_reload_inner. */
19820
19821 static void
19822 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19823 bool store_p)
19824 {
19825 rtx set, clobber;
19826
19827 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19828
19829 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19830 store_p ? "store" : "load");
19831
19832 if (store_p)
19833 set = gen_rtx_SET (mem, reg);
19834 else
19835 set = gen_rtx_SET (reg, mem);
19836
19837 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19838 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19839 }
19840
19841 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19842 ATTRIBUTE_NORETURN;
19843
19844 static void
19845 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19846 bool store_p)
19847 {
19848 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19849 gcc_unreachable ();
19850 }
19851
19852 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19853 reload helper functions. These were identified in
19854 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19855 reload, it calls the insns:
19856 reload_<RELOAD:mode>_<P:mptrsize>_store
19857 reload_<RELOAD:mode>_<P:mptrsize>_load
19858
19859 which in turn calls this function, to do whatever is necessary to create
19860 valid addresses. */
19861
19862 void
19863 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19864 {
19865 int regno = true_regnum (reg);
19866 machine_mode mode = GET_MODE (reg);
19867 addr_mask_type addr_mask;
19868 rtx addr;
19869 rtx new_addr;
19870 rtx op_reg, op0, op1;
19871 rtx and_op;
19872 rtx cc_clobber;
19873 rtvec rv;
19874
19875 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19876 || !base_reg_operand (scratch, GET_MODE (scratch)))
19877 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19878
19879 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19880 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19881
19882 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19883 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19884
19885 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19886 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19887
19888 else
19889 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19890
19891 /* Make sure the mode is valid in this register class. */
19892 if ((addr_mask & RELOAD_REG_VALID) == 0)
19893 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19894
19895 if (TARGET_DEBUG_ADDR)
19896 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19897
19898 new_addr = addr = XEXP (mem, 0);
19899 switch (GET_CODE (addr))
19900 {
19901 /* Does the register class support auto update forms for this mode? If
19902 not, do the update now. We don't need a scratch register, since the
19903 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19904 case PRE_INC:
19905 case PRE_DEC:
19906 op_reg = XEXP (addr, 0);
19907 if (!base_reg_operand (op_reg, Pmode))
19908 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19909
19910 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19911 {
19912 int delta = GET_MODE_SIZE (mode);
19913 if (GET_CODE (addr) == PRE_DEC)
19914 delta = -delta;
19915 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19916 new_addr = op_reg;
19917 }
19918 break;
19919
19920 case PRE_MODIFY:
19921 op0 = XEXP (addr, 0);
19922 op1 = XEXP (addr, 1);
19923 if (!base_reg_operand (op0, Pmode)
19924 || GET_CODE (op1) != PLUS
19925 || !rtx_equal_p (op0, XEXP (op1, 0)))
19926 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19927
19928 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19929 {
19930 emit_insn (gen_rtx_SET (op0, op1));
19931 new_addr = reg;
19932 }
19933 break;
19934
19935 /* Do we need to simulate AND -16 to clear the bottom address bits used
19936 in VMX load/stores? */
19937 case AND:
19938 op0 = XEXP (addr, 0);
19939 op1 = XEXP (addr, 1);
19940 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19941 {
19942 if (REG_P (op0) || SUBREG_P (op0))
19943 op_reg = op0;
19944
19945 else if (GET_CODE (op1) == PLUS)
19946 {
19947 emit_insn (gen_rtx_SET (scratch, op1));
19948 op_reg = scratch;
19949 }
19950
19951 else
19952 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19953
19954 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19955 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19956 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19957 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19958 new_addr = scratch;
19959 }
19960 break;
19961
19962 /* If this is an indirect address, make sure it is a base register. */
19963 case REG:
19964 case SUBREG:
19965 if (!base_reg_operand (addr, GET_MODE (addr)))
19966 {
19967 emit_insn (gen_rtx_SET (scratch, addr));
19968 new_addr = scratch;
19969 }
19970 break;
19971
19972 /* If this is an indexed address, make sure the register class can handle
19973 indexed addresses for this mode. */
19974 case PLUS:
19975 op0 = XEXP (addr, 0);
19976 op1 = XEXP (addr, 1);
19977 if (!base_reg_operand (op0, Pmode))
19978 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19979
19980 else if (int_reg_operand (op1, Pmode))
19981 {
19982 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19983 {
19984 emit_insn (gen_rtx_SET (scratch, addr));
19985 new_addr = scratch;
19986 }
19987 }
19988
19989 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19990 {
19991 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19992 || !quad_address_p (addr, mode, false))
19993 {
19994 emit_insn (gen_rtx_SET (scratch, addr));
19995 new_addr = scratch;
19996 }
19997 }
19998
19999 /* Make sure the register class can handle offset addresses. */
20000 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20001 {
20002 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20003 {
20004 emit_insn (gen_rtx_SET (scratch, addr));
20005 new_addr = scratch;
20006 }
20007 }
20008
20009 else
20010 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20011
20012 break;
20013
20014 case LO_SUM:
20015 op0 = XEXP (addr, 0);
20016 op1 = XEXP (addr, 1);
20017 if (!base_reg_operand (op0, Pmode))
20018 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20019
20020 else if (int_reg_operand (op1, Pmode))
20021 {
20022 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20023 {
20024 emit_insn (gen_rtx_SET (scratch, addr));
20025 new_addr = scratch;
20026 }
20027 }
20028
20029 /* Quad offsets are restricted and can't handle normal addresses. */
20030 else if (mode_supports_dq_form (mode))
20031 {
20032 emit_insn (gen_rtx_SET (scratch, addr));
20033 new_addr = scratch;
20034 }
20035
20036 /* Make sure the register class can handle offset addresses. */
20037 else if (legitimate_lo_sum_address_p (mode, addr, false))
20038 {
20039 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20040 {
20041 emit_insn (gen_rtx_SET (scratch, addr));
20042 new_addr = scratch;
20043 }
20044 }
20045
20046 else
20047 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20048
20049 break;
20050
20051 case SYMBOL_REF:
20052 case CONST:
20053 case LABEL_REF:
20054 rs6000_emit_move (scratch, addr, Pmode);
20055 new_addr = scratch;
20056 break;
20057
20058 default:
20059 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20060 }
20061
20062 /* Adjust the address if it changed. */
20063 if (addr != new_addr)
20064 {
20065 mem = replace_equiv_address_nv (mem, new_addr);
20066 if (TARGET_DEBUG_ADDR)
20067 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20068 }
20069
20070 /* Now create the move. */
20071 if (store_p)
20072 emit_insn (gen_rtx_SET (mem, reg));
20073 else
20074 emit_insn (gen_rtx_SET (reg, mem));
20075
20076 return;
20077 }
20078
20079 /* Convert reloads involving 64-bit gprs and misaligned offset
20080 addressing, or multiple 32-bit gprs and offsets that are too large,
20081 to use indirect addressing. */
20082
20083 void
20084 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20085 {
20086 int regno = true_regnum (reg);
20087 enum reg_class rclass;
20088 rtx addr;
20089 rtx scratch_or_premodify = scratch;
20090
20091 if (TARGET_DEBUG_ADDR)
20092 {
20093 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20094 store_p ? "store" : "load");
20095 fprintf (stderr, "reg:\n");
20096 debug_rtx (reg);
20097 fprintf (stderr, "mem:\n");
20098 debug_rtx (mem);
20099 fprintf (stderr, "scratch:\n");
20100 debug_rtx (scratch);
20101 }
20102
20103 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
20104 gcc_assert (MEM_P (mem));
20105 rclass = REGNO_REG_CLASS (regno);
20106 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20107 addr = XEXP (mem, 0);
20108
20109 if (GET_CODE (addr) == PRE_MODIFY)
20110 {
20111 gcc_assert (REG_P (XEXP (addr, 0))
20112 && GET_CODE (XEXP (addr, 1)) == PLUS
20113 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20114 scratch_or_premodify = XEXP (addr, 0);
20115 addr = XEXP (addr, 1);
20116 }
20117 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20118
20119 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20120
20121 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20122
20123 /* Now create the move. */
20124 if (store_p)
20125 emit_insn (gen_rtx_SET (mem, reg));
20126 else
20127 emit_insn (gen_rtx_SET (reg, mem));
20128
20129 return;
20130 }
20131
20132 /* Given an rtx X being reloaded into a reg required to be
20133 in class CLASS, return the class of reg to actually use.
20134 In general this is just CLASS; but on some machines
20135 in some cases it is preferable to use a more restrictive class.
20136
20137 On the RS/6000, we have to return NO_REGS when we want to reload a
20138 floating-point CONST_DOUBLE to force it to be copied to memory.
20139
20140 We also don't want to reload integer values into floating-point
20141 registers if we can at all help it. In fact, this can
20142 cause reload to die, if it tries to generate a reload of CTR
20143 into a FP register and discovers it doesn't have the memory location
20144 required.
20145
20146 ??? Would it be a good idea to have reload do the converse, that is
20147 try to reload floating modes into FP registers if possible?
20148 */
20149
20150 static enum reg_class
20151 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20152 {
20153 machine_mode mode = GET_MODE (x);
20154 bool is_constant = CONSTANT_P (x);
20155
20156 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20157 reload class for it. */
20158 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20159 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20160 return NO_REGS;
20161
20162 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20163 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20164 return NO_REGS;
20165
20166 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20167 the reloading of address expressions using PLUS into floating point
20168 registers. */
20169 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20170 {
20171 if (is_constant)
20172 {
20173 /* Zero is always allowed in all VSX registers. */
20174 if (x == CONST0_RTX (mode))
20175 return rclass;
20176
20177 /* If this is a vector constant that can be formed with a few Altivec
20178 instructions, we want altivec registers. */
20179 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20180 return ALTIVEC_REGS;
20181
20182 /* If this is an integer constant that can easily be loaded into
20183 vector registers, allow it. */
20184 if (CONST_INT_P (x))
20185 {
20186 HOST_WIDE_INT value = INTVAL (x);
20187
20188 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20189 2.06 can generate it in the Altivec registers with
20190 VSPLTI<x>. */
20191 if (value == -1)
20192 {
20193 if (TARGET_P8_VECTOR)
20194 return rclass;
20195 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20196 return ALTIVEC_REGS;
20197 else
20198 return NO_REGS;
20199 }
20200
20201 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20202 a sign extend in the Altivec registers. */
20203 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20204 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20205 return ALTIVEC_REGS;
20206 }
20207
20208 /* Force constant to memory. */
20209 return NO_REGS;
20210 }
20211
20212 /* D-form addressing can easily reload the value. */
20213 if (mode_supports_vmx_dform (mode)
20214 || mode_supports_dq_form (mode))
20215 return rclass;
20216
20217 /* If this is a scalar floating point value and we don't have D-form
20218 addressing, prefer the traditional floating point registers so that we
20219 can use D-form (register+offset) addressing. */
20220 if (rclass == VSX_REGS
20221 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20222 return FLOAT_REGS;
20223
20224 /* Prefer the Altivec registers if Altivec is handling the vector
20225 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20226 loads. */
20227 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20228 || mode == V1TImode)
20229 return ALTIVEC_REGS;
20230
20231 return rclass;
20232 }
20233
20234 if (is_constant || GET_CODE (x) == PLUS)
20235 {
20236 if (reg_class_subset_p (GENERAL_REGS, rclass))
20237 return GENERAL_REGS;
20238 if (reg_class_subset_p (BASE_REGS, rclass))
20239 return BASE_REGS;
20240 return NO_REGS;
20241 }
20242
20243 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == GEN_OR_FLOAT_REGS)
20244 return GENERAL_REGS;
20245
20246 return rclass;
20247 }
20248
20249 /* Debug version of rs6000_preferred_reload_class. */
20250 static enum reg_class
20251 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20252 {
20253 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20254
20255 fprintf (stderr,
20256 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20257 "mode = %s, x:\n",
20258 reg_class_names[ret], reg_class_names[rclass],
20259 GET_MODE_NAME (GET_MODE (x)));
20260 debug_rtx (x);
20261
20262 return ret;
20263 }
20264
20265 /* If we are copying between FP or AltiVec registers and anything else, we need
20266 a memory location. The exception is when we are targeting ppc64 and the
20267 move to/from fpr to gpr instructions are available. Also, under VSX, you
20268 can copy vector registers from the FP register set to the Altivec register
20269 set and vice versa. */
20270
20271 static bool
20272 rs6000_secondary_memory_needed (machine_mode mode,
20273 reg_class_t from_class,
20274 reg_class_t to_class)
20275 {
20276 enum rs6000_reg_type from_type, to_type;
20277 bool altivec_p = ((from_class == ALTIVEC_REGS)
20278 || (to_class == ALTIVEC_REGS));
20279
20280 /* If a simple/direct move is available, we don't need secondary memory */
20281 from_type = reg_class_to_reg_type[(int)from_class];
20282 to_type = reg_class_to_reg_type[(int)to_class];
20283
20284 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20285 (secondary_reload_info *)0, altivec_p))
20286 return false;
20287
20288 /* If we have a floating point or vector register class, we need to use
20289 memory to transfer the data. */
20290 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20291 return true;
20292
20293 return false;
20294 }
20295
20296 /* Debug version of rs6000_secondary_memory_needed. */
20297 static bool
20298 rs6000_debug_secondary_memory_needed (machine_mode mode,
20299 reg_class_t from_class,
20300 reg_class_t to_class)
20301 {
20302 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20303
20304 fprintf (stderr,
20305 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20306 "to_class = %s, mode = %s\n",
20307 ret ? "true" : "false",
20308 reg_class_names[from_class],
20309 reg_class_names[to_class],
20310 GET_MODE_NAME (mode));
20311
20312 return ret;
20313 }
20314
20315 /* Return the register class of a scratch register needed to copy IN into
20316 or out of a register in RCLASS in MODE. If it can be done directly,
20317 NO_REGS is returned. */
20318
20319 static enum reg_class
20320 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20321 rtx in)
20322 {
20323 int regno;
20324
20325 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20326 #if TARGET_MACHO
20327 && MACHOPIC_INDIRECT
20328 #endif
20329 ))
20330 {
20331 /* We cannot copy a symbolic operand directly into anything
20332 other than BASE_REGS for TARGET_ELF. So indicate that a
20333 register from BASE_REGS is needed as an intermediate
20334 register.
20335
20336 On Darwin, pic addresses require a load from memory, which
20337 needs a base register. */
20338 if (rclass != BASE_REGS
20339 && (SYMBOL_REF_P (in)
20340 || GET_CODE (in) == HIGH
20341 || GET_CODE (in) == LABEL_REF
20342 || GET_CODE (in) == CONST))
20343 return BASE_REGS;
20344 }
20345
20346 if (REG_P (in))
20347 {
20348 regno = REGNO (in);
20349 if (!HARD_REGISTER_NUM_P (regno))
20350 {
20351 regno = true_regnum (in);
20352 if (!HARD_REGISTER_NUM_P (regno))
20353 regno = -1;
20354 }
20355 }
20356 else if (SUBREG_P (in))
20357 {
20358 regno = true_regnum (in);
20359 if (!HARD_REGISTER_NUM_P (regno))
20360 regno = -1;
20361 }
20362 else
20363 regno = -1;
20364
20365 /* If we have VSX register moves, prefer moving scalar values between
20366 Altivec registers and GPR by going via an FPR (and then via memory)
20367 instead of reloading the secondary memory address for Altivec moves. */
20368 if (TARGET_VSX
20369 && GET_MODE_SIZE (mode) < 16
20370 && !mode_supports_vmx_dform (mode)
20371 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20372 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20373 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20374 && (regno >= 0 && INT_REGNO_P (regno)))))
20375 return FLOAT_REGS;
20376
20377 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20378 into anything. */
20379 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20380 || (regno >= 0 && INT_REGNO_P (regno)))
20381 return NO_REGS;
20382
20383 /* Constants, memory, and VSX registers can go into VSX registers (both the
20384 traditional floating point and the altivec registers). */
20385 if (rclass == VSX_REGS
20386 && (regno == -1 || VSX_REGNO_P (regno)))
20387 return NO_REGS;
20388
20389 /* Constants, memory, and FP registers can go into FP registers. */
20390 if ((regno == -1 || FP_REGNO_P (regno))
20391 && (rclass == FLOAT_REGS || rclass == GEN_OR_FLOAT_REGS))
20392 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20393
20394 /* Memory, and AltiVec registers can go into AltiVec registers. */
20395 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20396 && rclass == ALTIVEC_REGS)
20397 return NO_REGS;
20398
20399 /* We can copy among the CR registers. */
20400 if ((rclass == CR_REGS || rclass == CR0_REGS)
20401 && regno >= 0 && CR_REGNO_P (regno))
20402 return NO_REGS;
20403
20404 /* Otherwise, we need GENERAL_REGS. */
20405 return GENERAL_REGS;
20406 }
20407
20408 /* Debug version of rs6000_secondary_reload_class. */
20409 static enum reg_class
20410 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20411 machine_mode mode, rtx in)
20412 {
20413 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20414 fprintf (stderr,
20415 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20416 "mode = %s, input rtx:\n",
20417 reg_class_names[ret], reg_class_names[rclass],
20418 GET_MODE_NAME (mode));
20419 debug_rtx (in);
20420
20421 return ret;
20422 }
20423
20424 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20425
20426 static bool
20427 rs6000_can_change_mode_class (machine_mode from,
20428 machine_mode to,
20429 reg_class_t rclass)
20430 {
20431 unsigned from_size = GET_MODE_SIZE (from);
20432 unsigned to_size = GET_MODE_SIZE (to);
20433
20434 if (from_size != to_size)
20435 {
20436 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20437
20438 if (reg_classes_intersect_p (xclass, rclass))
20439 {
20440 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20441 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20442 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20443 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20444
20445 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20446 single register under VSX because the scalar part of the register
20447 is in the upper 64-bits, and not the lower 64-bits. Types like
20448 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20449 IEEE floating point can't overlap, and neither can small
20450 values. */
20451
20452 if (to_float128_vector_p && from_float128_vector_p)
20453 return true;
20454
20455 else if (to_float128_vector_p || from_float128_vector_p)
20456 return false;
20457
20458 /* TDmode in floating-mode registers must always go into a register
20459 pair with the most significant word in the even-numbered register
20460 to match ISA requirements. In little-endian mode, this does not
20461 match subreg numbering, so we cannot allow subregs. */
20462 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20463 return false;
20464
20465 if (from_size < 8 || to_size < 8)
20466 return false;
20467
20468 if (from_size == 8 && (8 * to_nregs) != to_size)
20469 return false;
20470
20471 if (to_size == 8 && (8 * from_nregs) != from_size)
20472 return false;
20473
20474 return true;
20475 }
20476 else
20477 return true;
20478 }
20479
20480 /* Since the VSX register set includes traditional floating point registers
20481 and altivec registers, just check for the size being different instead of
20482 trying to check whether the modes are vector modes. Otherwise it won't
20483 allow say DF and DI to change classes. For types like TFmode and TDmode
20484 that take 2 64-bit registers, rather than a single 128-bit register, don't
20485 allow subregs of those types to other 128 bit types. */
20486 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20487 {
20488 unsigned num_regs = (from_size + 15) / 16;
20489 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20490 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20491 return false;
20492
20493 return (from_size == 8 || from_size == 16);
20494 }
20495
20496 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20497 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20498 return false;
20499
20500 return true;
20501 }
20502
20503 /* Debug version of rs6000_can_change_mode_class. */
20504 static bool
20505 rs6000_debug_can_change_mode_class (machine_mode from,
20506 machine_mode to,
20507 reg_class_t rclass)
20508 {
20509 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20510
20511 fprintf (stderr,
20512 "rs6000_can_change_mode_class, return %s, from = %s, "
20513 "to = %s, rclass = %s\n",
20514 ret ? "true" : "false",
20515 GET_MODE_NAME (from), GET_MODE_NAME (to),
20516 reg_class_names[rclass]);
20517
20518 return ret;
20519 }
20520 \f
20521 /* Return a string to do a move operation of 128 bits of data. */
20522
20523 const char *
20524 rs6000_output_move_128bit (rtx operands[])
20525 {
20526 rtx dest = operands[0];
20527 rtx src = operands[1];
20528 machine_mode mode = GET_MODE (dest);
20529 int dest_regno;
20530 int src_regno;
20531 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20532 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20533
20534 if (REG_P (dest))
20535 {
20536 dest_regno = REGNO (dest);
20537 dest_gpr_p = INT_REGNO_P (dest_regno);
20538 dest_fp_p = FP_REGNO_P (dest_regno);
20539 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20540 dest_vsx_p = dest_fp_p | dest_vmx_p;
20541 }
20542 else
20543 {
20544 dest_regno = -1;
20545 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20546 }
20547
20548 if (REG_P (src))
20549 {
20550 src_regno = REGNO (src);
20551 src_gpr_p = INT_REGNO_P (src_regno);
20552 src_fp_p = FP_REGNO_P (src_regno);
20553 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20554 src_vsx_p = src_fp_p | src_vmx_p;
20555 }
20556 else
20557 {
20558 src_regno = -1;
20559 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20560 }
20561
20562 /* Register moves. */
20563 if (dest_regno >= 0 && src_regno >= 0)
20564 {
20565 if (dest_gpr_p)
20566 {
20567 if (src_gpr_p)
20568 return "#";
20569
20570 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20571 return (WORDS_BIG_ENDIAN
20572 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20573 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20574
20575 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20576 return "#";
20577 }
20578
20579 else if (TARGET_VSX && dest_vsx_p)
20580 {
20581 if (src_vsx_p)
20582 return "xxlor %x0,%x1,%x1";
20583
20584 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20585 return (WORDS_BIG_ENDIAN
20586 ? "mtvsrdd %x0,%1,%L1"
20587 : "mtvsrdd %x0,%L1,%1");
20588
20589 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20590 return "#";
20591 }
20592
20593 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20594 return "vor %0,%1,%1";
20595
20596 else if (dest_fp_p && src_fp_p)
20597 return "#";
20598 }
20599
20600 /* Loads. */
20601 else if (dest_regno >= 0 && MEM_P (src))
20602 {
20603 if (dest_gpr_p)
20604 {
20605 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20606 return "lq %0,%1";
20607 else
20608 return "#";
20609 }
20610
20611 else if (TARGET_ALTIVEC && dest_vmx_p
20612 && altivec_indexed_or_indirect_operand (src, mode))
20613 return "lvx %0,%y1";
20614
20615 else if (TARGET_VSX && dest_vsx_p)
20616 {
20617 if (mode_supports_dq_form (mode)
20618 && quad_address_p (XEXP (src, 0), mode, true))
20619 return "lxv %x0,%1";
20620
20621 else if (TARGET_P9_VECTOR)
20622 return "lxvx %x0,%y1";
20623
20624 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20625 return "lxvw4x %x0,%y1";
20626
20627 else
20628 return "lxvd2x %x0,%y1";
20629 }
20630
20631 else if (TARGET_ALTIVEC && dest_vmx_p)
20632 return "lvx %0,%y1";
20633
20634 else if (dest_fp_p)
20635 return "#";
20636 }
20637
20638 /* Stores. */
20639 else if (src_regno >= 0 && MEM_P (dest))
20640 {
20641 if (src_gpr_p)
20642 {
20643 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20644 return "stq %1,%0";
20645 else
20646 return "#";
20647 }
20648
20649 else if (TARGET_ALTIVEC && src_vmx_p
20650 && altivec_indexed_or_indirect_operand (dest, mode))
20651 return "stvx %1,%y0";
20652
20653 else if (TARGET_VSX && src_vsx_p)
20654 {
20655 if (mode_supports_dq_form (mode)
20656 && quad_address_p (XEXP (dest, 0), mode, true))
20657 return "stxv %x1,%0";
20658
20659 else if (TARGET_P9_VECTOR)
20660 return "stxvx %x1,%y0";
20661
20662 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20663 return "stxvw4x %x1,%y0";
20664
20665 else
20666 return "stxvd2x %x1,%y0";
20667 }
20668
20669 else if (TARGET_ALTIVEC && src_vmx_p)
20670 return "stvx %1,%y0";
20671
20672 else if (src_fp_p)
20673 return "#";
20674 }
20675
20676 /* Constants. */
20677 else if (dest_regno >= 0
20678 && (CONST_INT_P (src)
20679 || CONST_WIDE_INT_P (src)
20680 || CONST_DOUBLE_P (src)
20681 || GET_CODE (src) == CONST_VECTOR))
20682 {
20683 if (dest_gpr_p)
20684 return "#";
20685
20686 else if ((dest_vmx_p && TARGET_ALTIVEC)
20687 || (dest_vsx_p && TARGET_VSX))
20688 return output_vec_const_move (operands);
20689 }
20690
20691 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20692 }
20693
20694 /* Validate a 128-bit move. */
20695 bool
20696 rs6000_move_128bit_ok_p (rtx operands[])
20697 {
20698 machine_mode mode = GET_MODE (operands[0]);
20699 return (gpc_reg_operand (operands[0], mode)
20700 || gpc_reg_operand (operands[1], mode));
20701 }
20702
20703 /* Return true if a 128-bit move needs to be split. */
20704 bool
20705 rs6000_split_128bit_ok_p (rtx operands[])
20706 {
20707 if (!reload_completed)
20708 return false;
20709
20710 if (!gpr_or_gpr_p (operands[0], operands[1]))
20711 return false;
20712
20713 if (quad_load_store_p (operands[0], operands[1]))
20714 return false;
20715
20716 return true;
20717 }
20718
20719 \f
20720 /* Given a comparison operation, return the bit number in CCR to test. We
20721 know this is a valid comparison.
20722
20723 SCC_P is 1 if this is for an scc. That means that %D will have been
20724 used instead of %C, so the bits will be in different places.
20725
20726 Return -1 if OP isn't a valid comparison for some reason. */
20727
20728 int
20729 ccr_bit (rtx op, int scc_p)
20730 {
20731 enum rtx_code code = GET_CODE (op);
20732 machine_mode cc_mode;
20733 int cc_regnum;
20734 int base_bit;
20735 rtx reg;
20736
20737 if (!COMPARISON_P (op))
20738 return -1;
20739
20740 reg = XEXP (op, 0);
20741
20742 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20743 return -1;
20744
20745 cc_mode = GET_MODE (reg);
20746 cc_regnum = REGNO (reg);
20747 base_bit = 4 * (cc_regnum - CR0_REGNO);
20748
20749 validate_condition_mode (code, cc_mode);
20750
20751 /* When generating a sCOND operation, only positive conditions are
20752 allowed. */
20753 if (scc_p)
20754 switch (code)
20755 {
20756 case EQ:
20757 case GT:
20758 case LT:
20759 case UNORDERED:
20760 case GTU:
20761 case LTU:
20762 break;
20763 default:
20764 return -1;
20765 }
20766
20767 switch (code)
20768 {
20769 case NE:
20770 return scc_p ? base_bit + 3 : base_bit + 2;
20771 case EQ:
20772 return base_bit + 2;
20773 case GT: case GTU: case UNLE:
20774 return base_bit + 1;
20775 case LT: case LTU: case UNGE:
20776 return base_bit;
20777 case ORDERED: case UNORDERED:
20778 return base_bit + 3;
20779
20780 case GE: case GEU:
20781 /* If scc, we will have done a cror to put the bit in the
20782 unordered position. So test that bit. For integer, this is ! LT
20783 unless this is an scc insn. */
20784 return scc_p ? base_bit + 3 : base_bit;
20785
20786 case LE: case LEU:
20787 return scc_p ? base_bit + 3 : base_bit + 1;
20788
20789 default:
20790 return -1;
20791 }
20792 }
20793 \f
20794 /* Return the GOT register. */
20795
20796 rtx
20797 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20798 {
20799 /* The second flow pass currently (June 1999) can't update
20800 regs_ever_live without disturbing other parts of the compiler, so
20801 update it here to make the prolog/epilogue code happy. */
20802 if (!can_create_pseudo_p ()
20803 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20804 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20805
20806 crtl->uses_pic_offset_table = 1;
20807
20808 return pic_offset_table_rtx;
20809 }
20810 \f
20811 static rs6000_stack_t stack_info;
20812
20813 /* Function to init struct machine_function.
20814 This will be called, via a pointer variable,
20815 from push_function_context. */
20816
20817 static struct machine_function *
20818 rs6000_init_machine_status (void)
20819 {
20820 stack_info.reload_completed = 0;
20821 return ggc_cleared_alloc<machine_function> ();
20822 }
20823 \f
20824 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20825
20826 /* Write out a function code label. */
20827
20828 void
20829 rs6000_output_function_entry (FILE *file, const char *fname)
20830 {
20831 if (fname[0] != '.')
20832 {
20833 switch (DEFAULT_ABI)
20834 {
20835 default:
20836 gcc_unreachable ();
20837
20838 case ABI_AIX:
20839 if (DOT_SYMBOLS)
20840 putc ('.', file);
20841 else
20842 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20843 break;
20844
20845 case ABI_ELFv2:
20846 case ABI_V4:
20847 case ABI_DARWIN:
20848 break;
20849 }
20850 }
20851
20852 RS6000_OUTPUT_BASENAME (file, fname);
20853 }
20854
20855 /* Print an operand. Recognize special options, documented below. */
20856
20857 #if TARGET_ELF
20858 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20859 only introduced by the linker, when applying the sda21
20860 relocation. */
20861 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20862 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20863 #else
20864 #define SMALL_DATA_RELOC "sda21"
20865 #define SMALL_DATA_REG 0
20866 #endif
20867
20868 void
20869 print_operand (FILE *file, rtx x, int code)
20870 {
20871 int i;
20872 unsigned HOST_WIDE_INT uval;
20873
20874 switch (code)
20875 {
20876 /* %a is output_address. */
20877
20878 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20879 output_operand. */
20880
20881 case 'D':
20882 /* Like 'J' but get to the GT bit only. */
20883 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20884 {
20885 output_operand_lossage ("invalid %%D value");
20886 return;
20887 }
20888
20889 /* Bit 1 is GT bit. */
20890 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20891
20892 /* Add one for shift count in rlinm for scc. */
20893 fprintf (file, "%d", i + 1);
20894 return;
20895
20896 case 'e':
20897 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20898 if (! INT_P (x))
20899 {
20900 output_operand_lossage ("invalid %%e value");
20901 return;
20902 }
20903
20904 uval = INTVAL (x);
20905 if ((uval & 0xffff) == 0 && uval != 0)
20906 putc ('s', file);
20907 return;
20908
20909 case 'E':
20910 /* X is a CR register. Print the number of the EQ bit of the CR */
20911 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20912 output_operand_lossage ("invalid %%E value");
20913 else
20914 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20915 return;
20916
20917 case 'f':
20918 /* X is a CR register. Print the shift count needed to move it
20919 to the high-order four bits. */
20920 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20921 output_operand_lossage ("invalid %%f value");
20922 else
20923 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20924 return;
20925
20926 case 'F':
20927 /* Similar, but print the count for the rotate in the opposite
20928 direction. */
20929 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20930 output_operand_lossage ("invalid %%F value");
20931 else
20932 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20933 return;
20934
20935 case 'G':
20936 /* X is a constant integer. If it is negative, print "m",
20937 otherwise print "z". This is to make an aze or ame insn. */
20938 if (!CONST_INT_P (x))
20939 output_operand_lossage ("invalid %%G value");
20940 else if (INTVAL (x) >= 0)
20941 putc ('z', file);
20942 else
20943 putc ('m', file);
20944 return;
20945
20946 case 'h':
20947 /* If constant, output low-order five bits. Otherwise, write
20948 normally. */
20949 if (INT_P (x))
20950 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20951 else
20952 print_operand (file, x, 0);
20953 return;
20954
20955 case 'H':
20956 /* If constant, output low-order six bits. Otherwise, write
20957 normally. */
20958 if (INT_P (x))
20959 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20960 else
20961 print_operand (file, x, 0);
20962 return;
20963
20964 case 'I':
20965 /* Print `i' if this is a constant, else nothing. */
20966 if (INT_P (x))
20967 putc ('i', file);
20968 return;
20969
20970 case 'j':
20971 /* Write the bit number in CCR for jump. */
20972 i = ccr_bit (x, 0);
20973 if (i == -1)
20974 output_operand_lossage ("invalid %%j code");
20975 else
20976 fprintf (file, "%d", i);
20977 return;
20978
20979 case 'J':
20980 /* Similar, but add one for shift count in rlinm for scc and pass
20981 scc flag to `ccr_bit'. */
20982 i = ccr_bit (x, 1);
20983 if (i == -1)
20984 output_operand_lossage ("invalid %%J code");
20985 else
20986 /* If we want bit 31, write a shift count of zero, not 32. */
20987 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20988 return;
20989
20990 case 'k':
20991 /* X must be a constant. Write the 1's complement of the
20992 constant. */
20993 if (! INT_P (x))
20994 output_operand_lossage ("invalid %%k value");
20995 else
20996 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20997 return;
20998
20999 case 'K':
21000 /* X must be a symbolic constant on ELF. Write an
21001 expression suitable for an 'addi' that adds in the low 16
21002 bits of the MEM. */
21003 if (GET_CODE (x) == CONST)
21004 {
21005 if (GET_CODE (XEXP (x, 0)) != PLUS
21006 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
21007 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21008 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
21009 output_operand_lossage ("invalid %%K value");
21010 }
21011 print_operand_address (file, x);
21012 fputs ("@l", file);
21013 return;
21014
21015 /* %l is output_asm_label. */
21016
21017 case 'L':
21018 /* Write second word of DImode or DFmode reference. Works on register
21019 or non-indexed memory only. */
21020 if (REG_P (x))
21021 fputs (reg_names[REGNO (x) + 1], file);
21022 else if (MEM_P (x))
21023 {
21024 machine_mode mode = GET_MODE (x);
21025 /* Handle possible auto-increment. Since it is pre-increment and
21026 we have already done it, we can just use an offset of word. */
21027 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21028 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21029 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21030 UNITS_PER_WORD));
21031 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21032 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21033 UNITS_PER_WORD));
21034 else
21035 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21036 UNITS_PER_WORD),
21037 0));
21038
21039 if (small_data_operand (x, GET_MODE (x)))
21040 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21041 reg_names[SMALL_DATA_REG]);
21042 }
21043 return;
21044
21045 case 'N': /* Unused */
21046 /* Write the number of elements in the vector times 4. */
21047 if (GET_CODE (x) != PARALLEL)
21048 output_operand_lossage ("invalid %%N value");
21049 else
21050 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21051 return;
21052
21053 case 'O': /* Unused */
21054 /* Similar, but subtract 1 first. */
21055 if (GET_CODE (x) != PARALLEL)
21056 output_operand_lossage ("invalid %%O value");
21057 else
21058 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21059 return;
21060
21061 case 'p':
21062 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21063 if (! INT_P (x)
21064 || INTVAL (x) < 0
21065 || (i = exact_log2 (INTVAL (x))) < 0)
21066 output_operand_lossage ("invalid %%p value");
21067 else
21068 fprintf (file, "%d", i);
21069 return;
21070
21071 case 'P':
21072 /* The operand must be an indirect memory reference. The result
21073 is the register name. */
21074 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
21075 || REGNO (XEXP (x, 0)) >= 32)
21076 output_operand_lossage ("invalid %%P value");
21077 else
21078 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21079 return;
21080
21081 case 'q':
21082 /* This outputs the logical code corresponding to a boolean
21083 expression. The expression may have one or both operands
21084 negated (if one, only the first one). For condition register
21085 logical operations, it will also treat the negated
21086 CR codes as NOTs, but not handle NOTs of them. */
21087 {
21088 const char *const *t = 0;
21089 const char *s;
21090 enum rtx_code code = GET_CODE (x);
21091 static const char * const tbl[3][3] = {
21092 { "and", "andc", "nor" },
21093 { "or", "orc", "nand" },
21094 { "xor", "eqv", "xor" } };
21095
21096 if (code == AND)
21097 t = tbl[0];
21098 else if (code == IOR)
21099 t = tbl[1];
21100 else if (code == XOR)
21101 t = tbl[2];
21102 else
21103 output_operand_lossage ("invalid %%q value");
21104
21105 if (GET_CODE (XEXP (x, 0)) != NOT)
21106 s = t[0];
21107 else
21108 {
21109 if (GET_CODE (XEXP (x, 1)) == NOT)
21110 s = t[2];
21111 else
21112 s = t[1];
21113 }
21114
21115 fputs (s, file);
21116 }
21117 return;
21118
21119 case 'Q':
21120 if (! TARGET_MFCRF)
21121 return;
21122 fputc (',', file);
21123 /* FALLTHRU */
21124
21125 case 'R':
21126 /* X is a CR register. Print the mask for `mtcrf'. */
21127 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21128 output_operand_lossage ("invalid %%R value");
21129 else
21130 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21131 return;
21132
21133 case 's':
21134 /* Low 5 bits of 32 - value */
21135 if (! INT_P (x))
21136 output_operand_lossage ("invalid %%s value");
21137 else
21138 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21139 return;
21140
21141 case 't':
21142 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21143 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21144 {
21145 output_operand_lossage ("invalid %%t value");
21146 return;
21147 }
21148
21149 /* Bit 3 is OV bit. */
21150 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21151
21152 /* If we want bit 31, write a shift count of zero, not 32. */
21153 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21154 return;
21155
21156 case 'T':
21157 /* Print the symbolic name of a branch target register. */
21158 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21159 x = XVECEXP (x, 0, 0);
21160 if (!REG_P (x) || (REGNO (x) != LR_REGNO
21161 && REGNO (x) != CTR_REGNO))
21162 output_operand_lossage ("invalid %%T value");
21163 else if (REGNO (x) == LR_REGNO)
21164 fputs ("lr", file);
21165 else
21166 fputs ("ctr", file);
21167 return;
21168
21169 case 'u':
21170 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21171 for use in unsigned operand. */
21172 if (! INT_P (x))
21173 {
21174 output_operand_lossage ("invalid %%u value");
21175 return;
21176 }
21177
21178 uval = INTVAL (x);
21179 if ((uval & 0xffff) == 0)
21180 uval >>= 16;
21181
21182 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21183 return;
21184
21185 case 'v':
21186 /* High-order 16 bits of constant for use in signed operand. */
21187 if (! INT_P (x))
21188 output_operand_lossage ("invalid %%v value");
21189 else
21190 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21191 (INTVAL (x) >> 16) & 0xffff);
21192 return;
21193
21194 case 'U':
21195 /* Print `u' if this has an auto-increment or auto-decrement. */
21196 if (MEM_P (x)
21197 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21198 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21199 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21200 putc ('u', file);
21201 return;
21202
21203 case 'V':
21204 /* Print the trap code for this operand. */
21205 switch (GET_CODE (x))
21206 {
21207 case EQ:
21208 fputs ("eq", file); /* 4 */
21209 break;
21210 case NE:
21211 fputs ("ne", file); /* 24 */
21212 break;
21213 case LT:
21214 fputs ("lt", file); /* 16 */
21215 break;
21216 case LE:
21217 fputs ("le", file); /* 20 */
21218 break;
21219 case GT:
21220 fputs ("gt", file); /* 8 */
21221 break;
21222 case GE:
21223 fputs ("ge", file); /* 12 */
21224 break;
21225 case LTU:
21226 fputs ("llt", file); /* 2 */
21227 break;
21228 case LEU:
21229 fputs ("lle", file); /* 6 */
21230 break;
21231 case GTU:
21232 fputs ("lgt", file); /* 1 */
21233 break;
21234 case GEU:
21235 fputs ("lge", file); /* 5 */
21236 break;
21237 default:
21238 output_operand_lossage ("invalid %%V value");
21239 }
21240 break;
21241
21242 case 'w':
21243 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21244 normally. */
21245 if (INT_P (x))
21246 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21247 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21248 else
21249 print_operand (file, x, 0);
21250 return;
21251
21252 case 'x':
21253 /* X is a FPR or Altivec register used in a VSX context. */
21254 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
21255 output_operand_lossage ("invalid %%x value");
21256 else
21257 {
21258 int reg = REGNO (x);
21259 int vsx_reg = (FP_REGNO_P (reg)
21260 ? reg - 32
21261 : reg - FIRST_ALTIVEC_REGNO + 32);
21262
21263 #ifdef TARGET_REGNAMES
21264 if (TARGET_REGNAMES)
21265 fprintf (file, "%%vs%d", vsx_reg);
21266 else
21267 #endif
21268 fprintf (file, "%d", vsx_reg);
21269 }
21270 return;
21271
21272 case 'X':
21273 if (MEM_P (x)
21274 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21275 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21276 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21277 putc ('x', file);
21278 return;
21279
21280 case 'Y':
21281 /* Like 'L', for third word of TImode/PTImode */
21282 if (REG_P (x))
21283 fputs (reg_names[REGNO (x) + 2], file);
21284 else if (MEM_P (x))
21285 {
21286 machine_mode mode = GET_MODE (x);
21287 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21288 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21289 output_address (mode, plus_constant (Pmode,
21290 XEXP (XEXP (x, 0), 0), 8));
21291 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21292 output_address (mode, plus_constant (Pmode,
21293 XEXP (XEXP (x, 0), 0), 8));
21294 else
21295 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21296 if (small_data_operand (x, GET_MODE (x)))
21297 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21298 reg_names[SMALL_DATA_REG]);
21299 }
21300 return;
21301
21302 case 'z':
21303 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21304 x = XVECEXP (x, 0, 1);
21305 /* X is a SYMBOL_REF. Write out the name preceded by a
21306 period and without any trailing data in brackets. Used for function
21307 names. If we are configured for System V (or the embedded ABI) on
21308 the PowerPC, do not emit the period, since those systems do not use
21309 TOCs and the like. */
21310 if (!SYMBOL_REF_P (x))
21311 {
21312 output_operand_lossage ("invalid %%z value");
21313 return;
21314 }
21315
21316 /* For macho, check to see if we need a stub. */
21317 if (TARGET_MACHO)
21318 {
21319 const char *name = XSTR (x, 0);
21320 #if TARGET_MACHO
21321 if (darwin_emit_branch_islands
21322 && MACHOPIC_INDIRECT
21323 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21324 name = machopic_indirection_name (x, /*stub_p=*/true);
21325 #endif
21326 assemble_name (file, name);
21327 }
21328 else if (!DOT_SYMBOLS)
21329 assemble_name (file, XSTR (x, 0));
21330 else
21331 rs6000_output_function_entry (file, XSTR (x, 0));
21332 return;
21333
21334 case 'Z':
21335 /* Like 'L', for last word of TImode/PTImode. */
21336 if (REG_P (x))
21337 fputs (reg_names[REGNO (x) + 3], file);
21338 else if (MEM_P (x))
21339 {
21340 machine_mode mode = GET_MODE (x);
21341 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21342 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21343 output_address (mode, plus_constant (Pmode,
21344 XEXP (XEXP (x, 0), 0), 12));
21345 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21346 output_address (mode, plus_constant (Pmode,
21347 XEXP (XEXP (x, 0), 0), 12));
21348 else
21349 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21350 if (small_data_operand (x, GET_MODE (x)))
21351 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21352 reg_names[SMALL_DATA_REG]);
21353 }
21354 return;
21355
21356 /* Print AltiVec memory operand. */
21357 case 'y':
21358 {
21359 rtx tmp;
21360
21361 gcc_assert (MEM_P (x));
21362
21363 tmp = XEXP (x, 0);
21364
21365 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21366 && GET_CODE (tmp) == AND
21367 && CONST_INT_P (XEXP (tmp, 1))
21368 && INTVAL (XEXP (tmp, 1)) == -16)
21369 tmp = XEXP (tmp, 0);
21370 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21371 && GET_CODE (tmp) == PRE_MODIFY)
21372 tmp = XEXP (tmp, 1);
21373 if (REG_P (tmp))
21374 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21375 else
21376 {
21377 if (GET_CODE (tmp) != PLUS
21378 || !REG_P (XEXP (tmp, 0))
21379 || !REG_P (XEXP (tmp, 1)))
21380 {
21381 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21382 break;
21383 }
21384
21385 if (REGNO (XEXP (tmp, 0)) == 0)
21386 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21387 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21388 else
21389 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21390 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21391 }
21392 break;
21393 }
21394
21395 case 0:
21396 if (REG_P (x))
21397 fprintf (file, "%s", reg_names[REGNO (x)]);
21398 else if (MEM_P (x))
21399 {
21400 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21401 know the width from the mode. */
21402 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21403 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21404 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21405 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21406 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21407 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21408 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21409 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21410 else
21411 output_address (GET_MODE (x), XEXP (x, 0));
21412 }
21413 else if (toc_relative_expr_p (x, false,
21414 &tocrel_base_oac, &tocrel_offset_oac))
21415 /* This hack along with a corresponding hack in
21416 rs6000_output_addr_const_extra arranges to output addends
21417 where the assembler expects to find them. eg.
21418 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21419 without this hack would be output as "x@toc+4". We
21420 want "x+4@toc". */
21421 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21422 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21423 output_addr_const (file, XVECEXP (x, 0, 0));
21424 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21425 output_addr_const (file, XVECEXP (x, 0, 1));
21426 else
21427 output_addr_const (file, x);
21428 return;
21429
21430 case '&':
21431 if (const char *name = get_some_local_dynamic_name ())
21432 assemble_name (file, name);
21433 else
21434 output_operand_lossage ("'%%&' used without any "
21435 "local dynamic TLS references");
21436 return;
21437
21438 default:
21439 output_operand_lossage ("invalid %%xn code");
21440 }
21441 }
21442 \f
21443 /* Print the address of an operand. */
21444
21445 void
21446 print_operand_address (FILE *file, rtx x)
21447 {
21448 if (REG_P (x))
21449 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21450 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21451 || GET_CODE (x) == LABEL_REF)
21452 {
21453 output_addr_const (file, x);
21454 if (small_data_operand (x, GET_MODE (x)))
21455 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21456 reg_names[SMALL_DATA_REG]);
21457 else
21458 gcc_assert (!TARGET_TOC);
21459 }
21460 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21461 && REG_P (XEXP (x, 1)))
21462 {
21463 if (REGNO (XEXP (x, 0)) == 0)
21464 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21465 reg_names[ REGNO (XEXP (x, 0)) ]);
21466 else
21467 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21468 reg_names[ REGNO (XEXP (x, 1)) ]);
21469 }
21470 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21471 && CONST_INT_P (XEXP (x, 1)))
21472 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21473 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21474 #if TARGET_MACHO
21475 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21476 && CONSTANT_P (XEXP (x, 1)))
21477 {
21478 fprintf (file, "lo16(");
21479 output_addr_const (file, XEXP (x, 1));
21480 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21481 }
21482 #endif
21483 #if TARGET_ELF
21484 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21485 && CONSTANT_P (XEXP (x, 1)))
21486 {
21487 output_addr_const (file, XEXP (x, 1));
21488 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21489 }
21490 #endif
21491 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21492 {
21493 /* This hack along with a corresponding hack in
21494 rs6000_output_addr_const_extra arranges to output addends
21495 where the assembler expects to find them. eg.
21496 (lo_sum (reg 9)
21497 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21498 without this hack would be output as "x@toc+8@l(9)". We
21499 want "x+8@toc@l(9)". */
21500 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21501 if (GET_CODE (x) == LO_SUM)
21502 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21503 else
21504 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21505 }
21506 else
21507 output_addr_const (file, x);
21508 }
21509 \f
21510 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21511
21512 static bool
21513 rs6000_output_addr_const_extra (FILE *file, rtx x)
21514 {
21515 if (GET_CODE (x) == UNSPEC)
21516 switch (XINT (x, 1))
21517 {
21518 case UNSPEC_TOCREL:
21519 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21520 && REG_P (XVECEXP (x, 0, 1))
21521 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21522 output_addr_const (file, XVECEXP (x, 0, 0));
21523 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21524 {
21525 if (INTVAL (tocrel_offset_oac) >= 0)
21526 fprintf (file, "+");
21527 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21528 }
21529 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21530 {
21531 putc ('-', file);
21532 assemble_name (file, toc_label_name);
21533 need_toc_init = 1;
21534 }
21535 else if (TARGET_ELF)
21536 fputs ("@toc", file);
21537 return true;
21538
21539 #if TARGET_MACHO
21540 case UNSPEC_MACHOPIC_OFFSET:
21541 output_addr_const (file, XVECEXP (x, 0, 0));
21542 putc ('-', file);
21543 machopic_output_function_base_name (file);
21544 return true;
21545 #endif
21546 }
21547 return false;
21548 }
21549 \f
21550 /* Target hook for assembling integer objects. The PowerPC version has
21551 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21552 is defined. It also needs to handle DI-mode objects on 64-bit
21553 targets. */
21554
21555 static bool
21556 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21557 {
21558 #ifdef RELOCATABLE_NEEDS_FIXUP
21559 /* Special handling for SI values. */
21560 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21561 {
21562 static int recurse = 0;
21563
21564 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21565 the .fixup section. Since the TOC section is already relocated, we
21566 don't need to mark it here. We used to skip the text section, but it
21567 should never be valid for relocated addresses to be placed in the text
21568 section. */
21569 if (DEFAULT_ABI == ABI_V4
21570 && (TARGET_RELOCATABLE || flag_pic > 1)
21571 && in_section != toc_section
21572 && !recurse
21573 && !CONST_SCALAR_INT_P (x)
21574 && CONSTANT_P (x))
21575 {
21576 char buf[256];
21577
21578 recurse = 1;
21579 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21580 fixuplabelno++;
21581 ASM_OUTPUT_LABEL (asm_out_file, buf);
21582 fprintf (asm_out_file, "\t.long\t(");
21583 output_addr_const (asm_out_file, x);
21584 fprintf (asm_out_file, ")@fixup\n");
21585 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21586 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21587 fprintf (asm_out_file, "\t.long\t");
21588 assemble_name (asm_out_file, buf);
21589 fprintf (asm_out_file, "\n\t.previous\n");
21590 recurse = 0;
21591 return true;
21592 }
21593 /* Remove initial .'s to turn a -mcall-aixdesc function
21594 address into the address of the descriptor, not the function
21595 itself. */
21596 else if (SYMBOL_REF_P (x)
21597 && XSTR (x, 0)[0] == '.'
21598 && DEFAULT_ABI == ABI_AIX)
21599 {
21600 const char *name = XSTR (x, 0);
21601 while (*name == '.')
21602 name++;
21603
21604 fprintf (asm_out_file, "\t.long\t%s\n", name);
21605 return true;
21606 }
21607 }
21608 #endif /* RELOCATABLE_NEEDS_FIXUP */
21609 return default_assemble_integer (x, size, aligned_p);
21610 }
21611
21612 /* Return a template string for assembly to emit when making an
21613 external call. FUNOP is the call mem argument operand number. */
21614
21615 static const char *
21616 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21617 {
21618 /* -Wformat-overflow workaround, without which gcc thinks that %u
21619 might produce 10 digits. */
21620 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21621
21622 char arg[12];
21623 arg[0] = 0;
21624 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21625 {
21626 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21627 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21628 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21629 sprintf (arg, "(%%&@tlsld)");
21630 else
21631 gcc_unreachable ();
21632 }
21633
21634 /* The magic 32768 offset here corresponds to the offset of
21635 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21636 char z[11];
21637 sprintf (z, "%%z%u%s", funop,
21638 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21639 ? "+32768" : ""));
21640
21641 static char str[32]; /* 2 spare */
21642 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21643 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21644 sibcall ? "" : "\n\tnop");
21645 else if (DEFAULT_ABI == ABI_V4)
21646 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21647 flag_pic ? "@plt" : "");
21648 #if TARGET_MACHO
21649 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21650 else if (DEFAULT_ABI == ABI_DARWIN)
21651 {
21652 /* The cookie is in operand func+2. */
21653 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21654 int cookie = INTVAL (operands[funop + 2]);
21655 if (cookie & CALL_LONG)
21656 {
21657 tree funname = get_identifier (XSTR (operands[funop], 0));
21658 tree labelname = get_prev_label (funname);
21659 gcc_checking_assert (labelname && !sibcall);
21660
21661 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21662 instruction will reach 'foo', otherwise link as 'bl L42'".
21663 "L42" should be a 'branch island', that will do a far jump to
21664 'foo'. Branch islands are generated in
21665 macho_branch_islands(). */
21666 sprintf (str, "jbsr %%z%u,%.10s", funop,
21667 IDENTIFIER_POINTER (labelname));
21668 }
21669 else
21670 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21671 after the call. */
21672 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21673 }
21674 #endif
21675 else
21676 gcc_unreachable ();
21677 return str;
21678 }
21679
21680 const char *
21681 rs6000_call_template (rtx *operands, unsigned int funop)
21682 {
21683 return rs6000_call_template_1 (operands, funop, false);
21684 }
21685
21686 const char *
21687 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21688 {
21689 return rs6000_call_template_1 (operands, funop, true);
21690 }
21691
21692 /* As above, for indirect calls. */
21693
21694 static const char *
21695 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21696 bool sibcall)
21697 {
21698 /* -Wformat-overflow workaround, without which gcc thinks that %u
21699 might produce 10 digits. Note that -Wformat-overflow will not
21700 currently warn here for str[], so do not rely on a warning to
21701 ensure str[] is correctly sized. */
21702 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21703
21704 /* Currently, funop is either 0 or 1. The maximum string is always
21705 a !speculate 64-bit __tls_get_addr call.
21706
21707 ABI_AIX:
21708 . 9 ld 2,%3\n\t
21709 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21710 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21711 . 9 crset 2\n\t
21712 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21713 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21714 . 10 beq%T1l-\n\t
21715 . 10 ld 2,%4(1)
21716 .---
21717 .151
21718
21719 ABI_ELFv2:
21720 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21721 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21722 . 9 crset 2\n\t
21723 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21724 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21725 . 10 beq%T1l-\n\t
21726 . 10 ld 2,%3(1)
21727 .---
21728 .142
21729
21730 ABI_V4:
21731 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21732 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21733 . 9 crset 2\n\t
21734 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21735 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21736 . 8 beq%T1l-
21737 .---
21738 .141 */
21739 static char str[160]; /* 8 spare */
21740 char *s = str;
21741 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21742
21743 if (DEFAULT_ABI == ABI_AIX)
21744 s += sprintf (s,
21745 "l%s 2,%%%u\n\t",
21746 ptrload, funop + 2);
21747
21748 /* We don't need the extra code to stop indirect call speculation if
21749 calling via LR. */
21750 bool speculate = (TARGET_MACHO
21751 || rs6000_speculate_indirect_jumps
21752 || (REG_P (operands[funop])
21753 && REGNO (operands[funop]) == LR_REGNO));
21754
21755 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21756 {
21757 const char *rel64 = TARGET_64BIT ? "64" : "";
21758 char tls[29];
21759 tls[0] = 0;
21760 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21761 {
21762 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21763 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21764 rel64, funop + 1);
21765 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21766 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21767 rel64);
21768 else
21769 gcc_unreachable ();
21770 }
21771
21772 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21773 && flag_pic == 2 ? "+32768" : "");
21774 if (!speculate)
21775 {
21776 s += sprintf (s,
21777 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21778 tls, rel64, funop, addend);
21779 s += sprintf (s, "crset 2\n\t");
21780 }
21781 s += sprintf (s,
21782 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21783 tls, rel64, funop, addend);
21784 }
21785 else if (!speculate)
21786 s += sprintf (s, "crset 2\n\t");
21787
21788 if (DEFAULT_ABI == ABI_AIX)
21789 {
21790 if (speculate)
21791 sprintf (s,
21792 "b%%T%ul\n\t"
21793 "l%s 2,%%%u(1)",
21794 funop, ptrload, funop + 3);
21795 else
21796 sprintf (s,
21797 "beq%%T%ul-\n\t"
21798 "l%s 2,%%%u(1)",
21799 funop, ptrload, funop + 3);
21800 }
21801 else if (DEFAULT_ABI == ABI_ELFv2)
21802 {
21803 if (speculate)
21804 sprintf (s,
21805 "b%%T%ul\n\t"
21806 "l%s 2,%%%u(1)",
21807 funop, ptrload, funop + 2);
21808 else
21809 sprintf (s,
21810 "beq%%T%ul-\n\t"
21811 "l%s 2,%%%u(1)",
21812 funop, ptrload, funop + 2);
21813 }
21814 else
21815 {
21816 if (speculate)
21817 sprintf (s,
21818 "b%%T%u%s",
21819 funop, sibcall ? "" : "l");
21820 else
21821 sprintf (s,
21822 "beq%%T%u%s-%s",
21823 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21824 }
21825 return str;
21826 }
21827
21828 const char *
21829 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21830 {
21831 return rs6000_indirect_call_template_1 (operands, funop, false);
21832 }
21833
21834 const char *
21835 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21836 {
21837 return rs6000_indirect_call_template_1 (operands, funop, true);
21838 }
21839
21840 #if HAVE_AS_PLTSEQ
21841 /* Output indirect call insns.
21842 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21843 const char *
21844 rs6000_pltseq_template (rtx *operands, int which)
21845 {
21846 const char *rel64 = TARGET_64BIT ? "64" : "";
21847 char tls[28];
21848 tls[0] = 0;
21849 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21850 {
21851 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21852 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21853 rel64);
21854 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21855 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21856 rel64);
21857 else
21858 gcc_unreachable ();
21859 }
21860
21861 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21862 static char str[96]; /* 15 spare */
21863 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21864 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21865 && flag_pic == 2 ? "+32768" : "");
21866 switch (which)
21867 {
21868 case 0:
21869 sprintf (str,
21870 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21871 "st%s",
21872 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21873 break;
21874 case 1:
21875 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21876 sprintf (str,
21877 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21878 "lis %%0,0",
21879 tls, off, rel64);
21880 else
21881 sprintf (str,
21882 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21883 "addis %%0,%%1,0",
21884 tls, off, rel64, addend);
21885 break;
21886 case 2:
21887 sprintf (str,
21888 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21889 "l%s %%0,0(%%1)",
21890 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21891 TARGET_64BIT ? "d" : "wz");
21892 break;
21893 case 3:
21894 sprintf (str,
21895 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21896 "mtctr %%1",
21897 tls, rel64, addend);
21898 break;
21899 default:
21900 gcc_unreachable ();
21901 }
21902 return str;
21903 }
21904 #endif
21905
21906 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21907 /* Emit an assembler directive to set symbol visibility for DECL to
21908 VISIBILITY_TYPE. */
21909
21910 static void
21911 rs6000_assemble_visibility (tree decl, int vis)
21912 {
21913 if (TARGET_XCOFF)
21914 return;
21915
21916 /* Functions need to have their entry point symbol visibility set as
21917 well as their descriptor symbol visibility. */
21918 if (DEFAULT_ABI == ABI_AIX
21919 && DOT_SYMBOLS
21920 && TREE_CODE (decl) == FUNCTION_DECL)
21921 {
21922 static const char * const visibility_types[] = {
21923 NULL, "protected", "hidden", "internal"
21924 };
21925
21926 const char *name, *type;
21927
21928 name = ((* targetm.strip_name_encoding)
21929 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21930 type = visibility_types[vis];
21931
21932 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21933 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21934 }
21935 else
21936 default_assemble_visibility (decl, vis);
21937 }
21938 #endif
21939 \f
21940 enum rtx_code
21941 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21942 {
21943 /* Reversal of FP compares takes care -- an ordered compare
21944 becomes an unordered compare and vice versa. */
21945 if (mode == CCFPmode
21946 && (!flag_finite_math_only
21947 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21948 || code == UNEQ || code == LTGT))
21949 return reverse_condition_maybe_unordered (code);
21950 else
21951 return reverse_condition (code);
21952 }
21953
21954 /* Generate a compare for CODE. Return a brand-new rtx that
21955 represents the result of the compare. */
21956
21957 static rtx
21958 rs6000_generate_compare (rtx cmp, machine_mode mode)
21959 {
21960 machine_mode comp_mode;
21961 rtx compare_result;
21962 enum rtx_code code = GET_CODE (cmp);
21963 rtx op0 = XEXP (cmp, 0);
21964 rtx op1 = XEXP (cmp, 1);
21965
21966 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21967 comp_mode = CCmode;
21968 else if (FLOAT_MODE_P (mode))
21969 comp_mode = CCFPmode;
21970 else if (code == GTU || code == LTU
21971 || code == GEU || code == LEU)
21972 comp_mode = CCUNSmode;
21973 else if ((code == EQ || code == NE)
21974 && unsigned_reg_p (op0)
21975 && (unsigned_reg_p (op1)
21976 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21977 /* These are unsigned values, perhaps there will be a later
21978 ordering compare that can be shared with this one. */
21979 comp_mode = CCUNSmode;
21980 else
21981 comp_mode = CCmode;
21982
21983 /* If we have an unsigned compare, make sure we don't have a signed value as
21984 an immediate. */
21985 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21986 && INTVAL (op1) < 0)
21987 {
21988 op0 = copy_rtx_if_shared (op0);
21989 op1 = force_reg (GET_MODE (op0), op1);
21990 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21991 }
21992
21993 /* First, the compare. */
21994 compare_result = gen_reg_rtx (comp_mode);
21995
21996 /* IEEE 128-bit support in VSX registers when we do not have hardware
21997 support. */
21998 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21999 {
22000 rtx libfunc = NULL_RTX;
22001 bool check_nan = false;
22002 rtx dest;
22003
22004 switch (code)
22005 {
22006 case EQ:
22007 case NE:
22008 libfunc = optab_libfunc (eq_optab, mode);
22009 break;
22010
22011 case GT:
22012 case GE:
22013 libfunc = optab_libfunc (ge_optab, mode);
22014 break;
22015
22016 case LT:
22017 case LE:
22018 libfunc = optab_libfunc (le_optab, mode);
22019 break;
22020
22021 case UNORDERED:
22022 case ORDERED:
22023 libfunc = optab_libfunc (unord_optab, mode);
22024 code = (code == UNORDERED) ? NE : EQ;
22025 break;
22026
22027 case UNGE:
22028 case UNGT:
22029 check_nan = true;
22030 libfunc = optab_libfunc (ge_optab, mode);
22031 code = (code == UNGE) ? GE : GT;
22032 break;
22033
22034 case UNLE:
22035 case UNLT:
22036 check_nan = true;
22037 libfunc = optab_libfunc (le_optab, mode);
22038 code = (code == UNLE) ? LE : LT;
22039 break;
22040
22041 case UNEQ:
22042 case LTGT:
22043 check_nan = true;
22044 libfunc = optab_libfunc (eq_optab, mode);
22045 code = (code = UNEQ) ? EQ : NE;
22046 break;
22047
22048 default:
22049 gcc_unreachable ();
22050 }
22051
22052 gcc_assert (libfunc);
22053
22054 if (!check_nan)
22055 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22056 SImode, op0, mode, op1, mode);
22057
22058 /* The library signals an exception for signalling NaNs, so we need to
22059 handle isgreater, etc. by first checking isordered. */
22060 else
22061 {
22062 rtx ne_rtx, normal_dest, unord_dest;
22063 rtx unord_func = optab_libfunc (unord_optab, mode);
22064 rtx join_label = gen_label_rtx ();
22065 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
22066 rtx unord_cmp = gen_reg_rtx (comp_mode);
22067
22068
22069 /* Test for either value being a NaN. */
22070 gcc_assert (unord_func);
22071 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22072 SImode, op0, mode, op1, mode);
22073
22074 /* Set value (0) if either value is a NaN, and jump to the join
22075 label. */
22076 dest = gen_reg_rtx (SImode);
22077 emit_move_insn (dest, const1_rtx);
22078 emit_insn (gen_rtx_SET (unord_cmp,
22079 gen_rtx_COMPARE (comp_mode, unord_dest,
22080 const0_rtx)));
22081
22082 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22083 emit_jump_insn (gen_rtx_SET (pc_rtx,
22084 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22085 join_ref,
22086 pc_rtx)));
22087
22088 /* Do the normal comparison, knowing that the values are not
22089 NaNs. */
22090 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22091 SImode, op0, mode, op1, mode);
22092
22093 emit_insn (gen_cstoresi4 (dest,
22094 gen_rtx_fmt_ee (code, SImode, normal_dest,
22095 const0_rtx),
22096 normal_dest, const0_rtx));
22097
22098 /* Join NaN and non-Nan paths. Compare dest against 0. */
22099 emit_label (join_label);
22100 code = NE;
22101 }
22102
22103 emit_insn (gen_rtx_SET (compare_result,
22104 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22105 }
22106
22107 else
22108 {
22109 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22110 CLOBBERs to match cmptf_internal2 pattern. */
22111 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22112 && FLOAT128_IBM_P (GET_MODE (op0))
22113 && TARGET_HARD_FLOAT)
22114 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22115 gen_rtvec (10,
22116 gen_rtx_SET (compare_result,
22117 gen_rtx_COMPARE (comp_mode, op0, op1)),
22118 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22119 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22120 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22121 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22122 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22123 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22124 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22125 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22126 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22127 else if (GET_CODE (op1) == UNSPEC
22128 && XINT (op1, 1) == UNSPEC_SP_TEST)
22129 {
22130 rtx op1b = XVECEXP (op1, 0, 0);
22131 comp_mode = CCEQmode;
22132 compare_result = gen_reg_rtx (CCEQmode);
22133 if (TARGET_64BIT)
22134 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22135 else
22136 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22137 }
22138 else
22139 emit_insn (gen_rtx_SET (compare_result,
22140 gen_rtx_COMPARE (comp_mode, op0, op1)));
22141 }
22142
22143 /* Some kinds of FP comparisons need an OR operation;
22144 under flag_finite_math_only we don't bother. */
22145 if (FLOAT_MODE_P (mode)
22146 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22147 && !flag_finite_math_only
22148 && (code == LE || code == GE
22149 || code == UNEQ || code == LTGT
22150 || code == UNGT || code == UNLT))
22151 {
22152 enum rtx_code or1, or2;
22153 rtx or1_rtx, or2_rtx, compare2_rtx;
22154 rtx or_result = gen_reg_rtx (CCEQmode);
22155
22156 switch (code)
22157 {
22158 case LE: or1 = LT; or2 = EQ; break;
22159 case GE: or1 = GT; or2 = EQ; break;
22160 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22161 case LTGT: or1 = LT; or2 = GT; break;
22162 case UNGT: or1 = UNORDERED; or2 = GT; break;
22163 case UNLT: or1 = UNORDERED; or2 = LT; break;
22164 default: gcc_unreachable ();
22165 }
22166 validate_condition_mode (or1, comp_mode);
22167 validate_condition_mode (or2, comp_mode);
22168 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22169 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22170 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22171 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22172 const_true_rtx);
22173 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22174
22175 compare_result = or_result;
22176 code = EQ;
22177 }
22178
22179 validate_condition_mode (code, GET_MODE (compare_result));
22180
22181 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22182 }
22183
22184 \f
22185 /* Return the diagnostic message string if the binary operation OP is
22186 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22187
22188 static const char*
22189 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22190 const_tree type1,
22191 const_tree type2)
22192 {
22193 machine_mode mode1 = TYPE_MODE (type1);
22194 machine_mode mode2 = TYPE_MODE (type2);
22195
22196 /* For complex modes, use the inner type. */
22197 if (COMPLEX_MODE_P (mode1))
22198 mode1 = GET_MODE_INNER (mode1);
22199
22200 if (COMPLEX_MODE_P (mode2))
22201 mode2 = GET_MODE_INNER (mode2);
22202
22203 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22204 double to intermix unless -mfloat128-convert. */
22205 if (mode1 == mode2)
22206 return NULL;
22207
22208 if (!TARGET_FLOAT128_CVT)
22209 {
22210 if ((mode1 == KFmode && mode2 == IFmode)
22211 || (mode1 == IFmode && mode2 == KFmode))
22212 return N_("__float128 and __ibm128 cannot be used in the same "
22213 "expression");
22214
22215 if (TARGET_IEEEQUAD
22216 && ((mode1 == IFmode && mode2 == TFmode)
22217 || (mode1 == TFmode && mode2 == IFmode)))
22218 return N_("__ibm128 and long double cannot be used in the same "
22219 "expression");
22220
22221 if (!TARGET_IEEEQUAD
22222 && ((mode1 == KFmode && mode2 == TFmode)
22223 || (mode1 == TFmode && mode2 == KFmode)))
22224 return N_("__float128 and long double cannot be used in the same "
22225 "expression");
22226 }
22227
22228 return NULL;
22229 }
22230
22231 \f
22232 /* Expand floating point conversion to/from __float128 and __ibm128. */
22233
22234 void
22235 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22236 {
22237 machine_mode dest_mode = GET_MODE (dest);
22238 machine_mode src_mode = GET_MODE (src);
22239 convert_optab cvt = unknown_optab;
22240 bool do_move = false;
22241 rtx libfunc = NULL_RTX;
22242 rtx dest2;
22243 typedef rtx (*rtx_2func_t) (rtx, rtx);
22244 rtx_2func_t hw_convert = (rtx_2func_t)0;
22245 size_t kf_or_tf;
22246
22247 struct hw_conv_t {
22248 rtx_2func_t from_df;
22249 rtx_2func_t from_sf;
22250 rtx_2func_t from_si_sign;
22251 rtx_2func_t from_si_uns;
22252 rtx_2func_t from_di_sign;
22253 rtx_2func_t from_di_uns;
22254 rtx_2func_t to_df;
22255 rtx_2func_t to_sf;
22256 rtx_2func_t to_si_sign;
22257 rtx_2func_t to_si_uns;
22258 rtx_2func_t to_di_sign;
22259 rtx_2func_t to_di_uns;
22260 } hw_conversions[2] = {
22261 /* convertions to/from KFmode */
22262 {
22263 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22264 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22265 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22266 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22267 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22268 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22269 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22270 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22271 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22272 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22273 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22274 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22275 },
22276
22277 /* convertions to/from TFmode */
22278 {
22279 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22280 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22281 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22282 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22283 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22284 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22285 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22286 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22287 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22288 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22289 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22290 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22291 },
22292 };
22293
22294 if (dest_mode == src_mode)
22295 gcc_unreachable ();
22296
22297 /* Eliminate memory operations. */
22298 if (MEM_P (src))
22299 src = force_reg (src_mode, src);
22300
22301 if (MEM_P (dest))
22302 {
22303 rtx tmp = gen_reg_rtx (dest_mode);
22304 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22305 rs6000_emit_move (dest, tmp, dest_mode);
22306 return;
22307 }
22308
22309 /* Convert to IEEE 128-bit floating point. */
22310 if (FLOAT128_IEEE_P (dest_mode))
22311 {
22312 if (dest_mode == KFmode)
22313 kf_or_tf = 0;
22314 else if (dest_mode == TFmode)
22315 kf_or_tf = 1;
22316 else
22317 gcc_unreachable ();
22318
22319 switch (src_mode)
22320 {
22321 case E_DFmode:
22322 cvt = sext_optab;
22323 hw_convert = hw_conversions[kf_or_tf].from_df;
22324 break;
22325
22326 case E_SFmode:
22327 cvt = sext_optab;
22328 hw_convert = hw_conversions[kf_or_tf].from_sf;
22329 break;
22330
22331 case E_KFmode:
22332 case E_IFmode:
22333 case E_TFmode:
22334 if (FLOAT128_IBM_P (src_mode))
22335 cvt = sext_optab;
22336 else
22337 do_move = true;
22338 break;
22339
22340 case E_SImode:
22341 if (unsigned_p)
22342 {
22343 cvt = ufloat_optab;
22344 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22345 }
22346 else
22347 {
22348 cvt = sfloat_optab;
22349 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22350 }
22351 break;
22352
22353 case E_DImode:
22354 if (unsigned_p)
22355 {
22356 cvt = ufloat_optab;
22357 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22358 }
22359 else
22360 {
22361 cvt = sfloat_optab;
22362 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22363 }
22364 break;
22365
22366 default:
22367 gcc_unreachable ();
22368 }
22369 }
22370
22371 /* Convert from IEEE 128-bit floating point. */
22372 else if (FLOAT128_IEEE_P (src_mode))
22373 {
22374 if (src_mode == KFmode)
22375 kf_or_tf = 0;
22376 else if (src_mode == TFmode)
22377 kf_or_tf = 1;
22378 else
22379 gcc_unreachable ();
22380
22381 switch (dest_mode)
22382 {
22383 case E_DFmode:
22384 cvt = trunc_optab;
22385 hw_convert = hw_conversions[kf_or_tf].to_df;
22386 break;
22387
22388 case E_SFmode:
22389 cvt = trunc_optab;
22390 hw_convert = hw_conversions[kf_or_tf].to_sf;
22391 break;
22392
22393 case E_KFmode:
22394 case E_IFmode:
22395 case E_TFmode:
22396 if (FLOAT128_IBM_P (dest_mode))
22397 cvt = trunc_optab;
22398 else
22399 do_move = true;
22400 break;
22401
22402 case E_SImode:
22403 if (unsigned_p)
22404 {
22405 cvt = ufix_optab;
22406 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22407 }
22408 else
22409 {
22410 cvt = sfix_optab;
22411 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22412 }
22413 break;
22414
22415 case E_DImode:
22416 if (unsigned_p)
22417 {
22418 cvt = ufix_optab;
22419 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22420 }
22421 else
22422 {
22423 cvt = sfix_optab;
22424 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22425 }
22426 break;
22427
22428 default:
22429 gcc_unreachable ();
22430 }
22431 }
22432
22433 /* Both IBM format. */
22434 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22435 do_move = true;
22436
22437 else
22438 gcc_unreachable ();
22439
22440 /* Handle conversion between TFmode/KFmode/IFmode. */
22441 if (do_move)
22442 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22443
22444 /* Handle conversion if we have hardware support. */
22445 else if (TARGET_FLOAT128_HW && hw_convert)
22446 emit_insn ((hw_convert) (dest, src));
22447
22448 /* Call an external function to do the conversion. */
22449 else if (cvt != unknown_optab)
22450 {
22451 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22452 gcc_assert (libfunc != NULL_RTX);
22453
22454 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22455 src, src_mode);
22456
22457 gcc_assert (dest2 != NULL_RTX);
22458 if (!rtx_equal_p (dest, dest2))
22459 emit_move_insn (dest, dest2);
22460 }
22461
22462 else
22463 gcc_unreachable ();
22464
22465 return;
22466 }
22467
22468 \f
22469 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22470 can be used as that dest register. Return the dest register. */
22471
22472 rtx
22473 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22474 {
22475 if (op2 == const0_rtx)
22476 return op1;
22477
22478 if (GET_CODE (scratch) == SCRATCH)
22479 scratch = gen_reg_rtx (mode);
22480
22481 if (logical_operand (op2, mode))
22482 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22483 else
22484 emit_insn (gen_rtx_SET (scratch,
22485 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22486
22487 return scratch;
22488 }
22489
22490 void
22491 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22492 {
22493 rtx condition_rtx;
22494 machine_mode op_mode;
22495 enum rtx_code cond_code;
22496 rtx result = operands[0];
22497
22498 condition_rtx = rs6000_generate_compare (operands[1], mode);
22499 cond_code = GET_CODE (condition_rtx);
22500
22501 if (cond_code == NE
22502 || cond_code == GE || cond_code == LE
22503 || cond_code == GEU || cond_code == LEU
22504 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22505 {
22506 rtx not_result = gen_reg_rtx (CCEQmode);
22507 rtx not_op, rev_cond_rtx;
22508 machine_mode cc_mode;
22509
22510 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22511
22512 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22513 SImode, XEXP (condition_rtx, 0), const0_rtx);
22514 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22515 emit_insn (gen_rtx_SET (not_result, not_op));
22516 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22517 }
22518
22519 op_mode = GET_MODE (XEXP (operands[1], 0));
22520 if (op_mode == VOIDmode)
22521 op_mode = GET_MODE (XEXP (operands[1], 1));
22522
22523 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22524 {
22525 PUT_MODE (condition_rtx, DImode);
22526 convert_move (result, condition_rtx, 0);
22527 }
22528 else
22529 {
22530 PUT_MODE (condition_rtx, SImode);
22531 emit_insn (gen_rtx_SET (result, condition_rtx));
22532 }
22533 }
22534
22535 /* Emit a branch of kind CODE to location LOC. */
22536
22537 void
22538 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22539 {
22540 rtx condition_rtx, loc_ref;
22541
22542 condition_rtx = rs6000_generate_compare (operands[0], mode);
22543 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22544 emit_jump_insn (gen_rtx_SET (pc_rtx,
22545 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22546 loc_ref, pc_rtx)));
22547 }
22548
22549 /* Return the string to output a conditional branch to LABEL, which is
22550 the operand template of the label, or NULL if the branch is really a
22551 conditional return.
22552
22553 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22554 condition code register and its mode specifies what kind of
22555 comparison we made.
22556
22557 REVERSED is nonzero if we should reverse the sense of the comparison.
22558
22559 INSN is the insn. */
22560
22561 char *
22562 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22563 {
22564 static char string[64];
22565 enum rtx_code code = GET_CODE (op);
22566 rtx cc_reg = XEXP (op, 0);
22567 machine_mode mode = GET_MODE (cc_reg);
22568 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22569 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22570 int really_reversed = reversed ^ need_longbranch;
22571 char *s = string;
22572 const char *ccode;
22573 const char *pred;
22574 rtx note;
22575
22576 validate_condition_mode (code, mode);
22577
22578 /* Work out which way this really branches. We could use
22579 reverse_condition_maybe_unordered here always but this
22580 makes the resulting assembler clearer. */
22581 if (really_reversed)
22582 {
22583 /* Reversal of FP compares takes care -- an ordered compare
22584 becomes an unordered compare and vice versa. */
22585 if (mode == CCFPmode)
22586 code = reverse_condition_maybe_unordered (code);
22587 else
22588 code = reverse_condition (code);
22589 }
22590
22591 switch (code)
22592 {
22593 /* Not all of these are actually distinct opcodes, but
22594 we distinguish them for clarity of the resulting assembler. */
22595 case NE: case LTGT:
22596 ccode = "ne"; break;
22597 case EQ: case UNEQ:
22598 ccode = "eq"; break;
22599 case GE: case GEU:
22600 ccode = "ge"; break;
22601 case GT: case GTU: case UNGT:
22602 ccode = "gt"; break;
22603 case LE: case LEU:
22604 ccode = "le"; break;
22605 case LT: case LTU: case UNLT:
22606 ccode = "lt"; break;
22607 case UNORDERED: ccode = "un"; break;
22608 case ORDERED: ccode = "nu"; break;
22609 case UNGE: ccode = "nl"; break;
22610 case UNLE: ccode = "ng"; break;
22611 default:
22612 gcc_unreachable ();
22613 }
22614
22615 /* Maybe we have a guess as to how likely the branch is. */
22616 pred = "";
22617 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22618 if (note != NULL_RTX)
22619 {
22620 /* PROB is the difference from 50%. */
22621 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22622 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22623
22624 /* Only hint for highly probable/improbable branches on newer cpus when
22625 we have real profile data, as static prediction overrides processor
22626 dynamic prediction. For older cpus we may as well always hint, but
22627 assume not taken for branches that are very close to 50% as a
22628 mispredicted taken branch is more expensive than a
22629 mispredicted not-taken branch. */
22630 if (rs6000_always_hint
22631 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22632 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22633 && br_prob_note_reliable_p (note)))
22634 {
22635 if (abs (prob) > REG_BR_PROB_BASE / 20
22636 && ((prob > 0) ^ need_longbranch))
22637 pred = "+";
22638 else
22639 pred = "-";
22640 }
22641 }
22642
22643 if (label == NULL)
22644 s += sprintf (s, "b%slr%s ", ccode, pred);
22645 else
22646 s += sprintf (s, "b%s%s ", ccode, pred);
22647
22648 /* We need to escape any '%' characters in the reg_names string.
22649 Assume they'd only be the first character.... */
22650 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22651 *s++ = '%';
22652 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22653
22654 if (label != NULL)
22655 {
22656 /* If the branch distance was too far, we may have to use an
22657 unconditional branch to go the distance. */
22658 if (need_longbranch)
22659 s += sprintf (s, ",$+8\n\tb %s", label);
22660 else
22661 s += sprintf (s, ",%s", label);
22662 }
22663
22664 return string;
22665 }
22666
22667 /* Return insn for VSX or Altivec comparisons. */
22668
22669 static rtx
22670 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22671 {
22672 rtx mask;
22673 machine_mode mode = GET_MODE (op0);
22674
22675 switch (code)
22676 {
22677 default:
22678 break;
22679
22680 case GE:
22681 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22682 return NULL_RTX;
22683 /* FALLTHRU */
22684
22685 case EQ:
22686 case GT:
22687 case GTU:
22688 case ORDERED:
22689 case UNORDERED:
22690 case UNEQ:
22691 case LTGT:
22692 mask = gen_reg_rtx (mode);
22693 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22694 return mask;
22695 }
22696
22697 return NULL_RTX;
22698 }
22699
22700 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22701 DMODE is expected destination mode. This is a recursive function. */
22702
22703 static rtx
22704 rs6000_emit_vector_compare (enum rtx_code rcode,
22705 rtx op0, rtx op1,
22706 machine_mode dmode)
22707 {
22708 rtx mask;
22709 bool swap_operands = false;
22710 bool try_again = false;
22711
22712 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22713 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22714
22715 /* See if the comparison works as is. */
22716 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22717 if (mask)
22718 return mask;
22719
22720 switch (rcode)
22721 {
22722 case LT:
22723 rcode = GT;
22724 swap_operands = true;
22725 try_again = true;
22726 break;
22727 case LTU:
22728 rcode = GTU;
22729 swap_operands = true;
22730 try_again = true;
22731 break;
22732 case NE:
22733 case UNLE:
22734 case UNLT:
22735 case UNGE:
22736 case UNGT:
22737 /* Invert condition and try again.
22738 e.g., A != B becomes ~(A==B). */
22739 {
22740 enum rtx_code rev_code;
22741 enum insn_code nor_code;
22742 rtx mask2;
22743
22744 rev_code = reverse_condition_maybe_unordered (rcode);
22745 if (rev_code == UNKNOWN)
22746 return NULL_RTX;
22747
22748 nor_code = optab_handler (one_cmpl_optab, dmode);
22749 if (nor_code == CODE_FOR_nothing)
22750 return NULL_RTX;
22751
22752 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22753 if (!mask2)
22754 return NULL_RTX;
22755
22756 mask = gen_reg_rtx (dmode);
22757 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22758 return mask;
22759 }
22760 break;
22761 case GE:
22762 case GEU:
22763 case LE:
22764 case LEU:
22765 /* Try GT/GTU/LT/LTU OR EQ */
22766 {
22767 rtx c_rtx, eq_rtx;
22768 enum insn_code ior_code;
22769 enum rtx_code new_code;
22770
22771 switch (rcode)
22772 {
22773 case GE:
22774 new_code = GT;
22775 break;
22776
22777 case GEU:
22778 new_code = GTU;
22779 break;
22780
22781 case LE:
22782 new_code = LT;
22783 break;
22784
22785 case LEU:
22786 new_code = LTU;
22787 break;
22788
22789 default:
22790 gcc_unreachable ();
22791 }
22792
22793 ior_code = optab_handler (ior_optab, dmode);
22794 if (ior_code == CODE_FOR_nothing)
22795 return NULL_RTX;
22796
22797 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22798 if (!c_rtx)
22799 return NULL_RTX;
22800
22801 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22802 if (!eq_rtx)
22803 return NULL_RTX;
22804
22805 mask = gen_reg_rtx (dmode);
22806 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22807 return mask;
22808 }
22809 break;
22810 default:
22811 return NULL_RTX;
22812 }
22813
22814 if (try_again)
22815 {
22816 if (swap_operands)
22817 std::swap (op0, op1);
22818
22819 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22820 if (mask)
22821 return mask;
22822 }
22823
22824 /* You only get two chances. */
22825 return NULL_RTX;
22826 }
22827
22828 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22829 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22830 operands for the relation operation COND. */
22831
22832 int
22833 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22834 rtx cond, rtx cc_op0, rtx cc_op1)
22835 {
22836 machine_mode dest_mode = GET_MODE (dest);
22837 machine_mode mask_mode = GET_MODE (cc_op0);
22838 enum rtx_code rcode = GET_CODE (cond);
22839 machine_mode cc_mode = CCmode;
22840 rtx mask;
22841 rtx cond2;
22842 bool invert_move = false;
22843
22844 if (VECTOR_UNIT_NONE_P (dest_mode))
22845 return 0;
22846
22847 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22848 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22849
22850 switch (rcode)
22851 {
22852 /* Swap operands if we can, and fall back to doing the operation as
22853 specified, and doing a NOR to invert the test. */
22854 case NE:
22855 case UNLE:
22856 case UNLT:
22857 case UNGE:
22858 case UNGT:
22859 /* Invert condition and try again.
22860 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22861 invert_move = true;
22862 rcode = reverse_condition_maybe_unordered (rcode);
22863 if (rcode == UNKNOWN)
22864 return 0;
22865 break;
22866
22867 case GE:
22868 case LE:
22869 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22870 {
22871 /* Invert condition to avoid compound test. */
22872 invert_move = true;
22873 rcode = reverse_condition (rcode);
22874 }
22875 break;
22876
22877 case GTU:
22878 case GEU:
22879 case LTU:
22880 case LEU:
22881 /* Mark unsigned tests with CCUNSmode. */
22882 cc_mode = CCUNSmode;
22883
22884 /* Invert condition to avoid compound test if necessary. */
22885 if (rcode == GEU || rcode == LEU)
22886 {
22887 invert_move = true;
22888 rcode = reverse_condition (rcode);
22889 }
22890 break;
22891
22892 default:
22893 break;
22894 }
22895
22896 /* Get the vector mask for the given relational operations. */
22897 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22898
22899 if (!mask)
22900 return 0;
22901
22902 if (invert_move)
22903 std::swap (op_true, op_false);
22904
22905 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22906 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22907 && (GET_CODE (op_true) == CONST_VECTOR
22908 || GET_CODE (op_false) == CONST_VECTOR))
22909 {
22910 rtx constant_0 = CONST0_RTX (dest_mode);
22911 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22912
22913 if (op_true == constant_m1 && op_false == constant_0)
22914 {
22915 emit_move_insn (dest, mask);
22916 return 1;
22917 }
22918
22919 else if (op_true == constant_0 && op_false == constant_m1)
22920 {
22921 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22922 return 1;
22923 }
22924
22925 /* If we can't use the vector comparison directly, perhaps we can use
22926 the mask for the true or false fields, instead of loading up a
22927 constant. */
22928 if (op_true == constant_m1)
22929 op_true = mask;
22930
22931 if (op_false == constant_0)
22932 op_false = mask;
22933 }
22934
22935 if (!REG_P (op_true) && !SUBREG_P (op_true))
22936 op_true = force_reg (dest_mode, op_true);
22937
22938 if (!REG_P (op_false) && !SUBREG_P (op_false))
22939 op_false = force_reg (dest_mode, op_false);
22940
22941 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22942 CONST0_RTX (dest_mode));
22943 emit_insn (gen_rtx_SET (dest,
22944 gen_rtx_IF_THEN_ELSE (dest_mode,
22945 cond2,
22946 op_true,
22947 op_false)));
22948 return 1;
22949 }
22950
22951 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22952 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22953 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22954 hardware has no such operation. */
22955
22956 static int
22957 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22958 {
22959 enum rtx_code code = GET_CODE (op);
22960 rtx op0 = XEXP (op, 0);
22961 rtx op1 = XEXP (op, 1);
22962 machine_mode compare_mode = GET_MODE (op0);
22963 machine_mode result_mode = GET_MODE (dest);
22964 bool max_p = false;
22965
22966 if (result_mode != compare_mode)
22967 return 0;
22968
22969 if (code == GE || code == GT)
22970 max_p = true;
22971 else if (code == LE || code == LT)
22972 max_p = false;
22973 else
22974 return 0;
22975
22976 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22977 ;
22978
22979 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22980 max_p = !max_p;
22981
22982 else
22983 return 0;
22984
22985 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22986 return 1;
22987 }
22988
22989 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22990 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22991 operands of the last comparison is nonzero/true, FALSE_COND if it is
22992 zero/false. Return 0 if the hardware has no such operation. */
22993
22994 static int
22995 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22996 {
22997 enum rtx_code code = GET_CODE (op);
22998 rtx op0 = XEXP (op, 0);
22999 rtx op1 = XEXP (op, 1);
23000 machine_mode result_mode = GET_MODE (dest);
23001 rtx compare_rtx;
23002 rtx cmove_rtx;
23003 rtx clobber_rtx;
23004
23005 if (!can_create_pseudo_p ())
23006 return 0;
23007
23008 switch (code)
23009 {
23010 case EQ:
23011 case GE:
23012 case GT:
23013 break;
23014
23015 case NE:
23016 case LT:
23017 case LE:
23018 code = swap_condition (code);
23019 std::swap (op0, op1);
23020 break;
23021
23022 default:
23023 return 0;
23024 }
23025
23026 /* Generate: [(parallel [(set (dest)
23027 (if_then_else (op (cmp1) (cmp2))
23028 (true)
23029 (false)))
23030 (clobber (scratch))])]. */
23031
23032 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
23033 cmove_rtx = gen_rtx_SET (dest,
23034 gen_rtx_IF_THEN_ELSE (result_mode,
23035 compare_rtx,
23036 true_cond,
23037 false_cond));
23038
23039 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
23040 emit_insn (gen_rtx_PARALLEL (VOIDmode,
23041 gen_rtvec (2, cmove_rtx, clobber_rtx)));
23042
23043 return 1;
23044 }
23045
23046 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23047 operands of the last comparison is nonzero/true, FALSE_COND if it
23048 is zero/false. Return 0 if the hardware has no such operation. */
23049
23050 int
23051 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23052 {
23053 enum rtx_code code = GET_CODE (op);
23054 rtx op0 = XEXP (op, 0);
23055 rtx op1 = XEXP (op, 1);
23056 machine_mode compare_mode = GET_MODE (op0);
23057 machine_mode result_mode = GET_MODE (dest);
23058 rtx temp;
23059 bool is_against_zero;
23060
23061 /* These modes should always match. */
23062 if (GET_MODE (op1) != compare_mode
23063 /* In the isel case however, we can use a compare immediate, so
23064 op1 may be a small constant. */
23065 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23066 return 0;
23067 if (GET_MODE (true_cond) != result_mode)
23068 return 0;
23069 if (GET_MODE (false_cond) != result_mode)
23070 return 0;
23071
23072 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23073 if (TARGET_P9_MINMAX
23074 && (compare_mode == SFmode || compare_mode == DFmode)
23075 && (result_mode == SFmode || result_mode == DFmode))
23076 {
23077 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23078 return 1;
23079
23080 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23081 return 1;
23082 }
23083
23084 /* Don't allow using floating point comparisons for integer results for
23085 now. */
23086 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23087 return 0;
23088
23089 /* First, work out if the hardware can do this at all, or
23090 if it's too slow.... */
23091 if (!FLOAT_MODE_P (compare_mode))
23092 {
23093 if (TARGET_ISEL)
23094 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23095 return 0;
23096 }
23097
23098 is_against_zero = op1 == CONST0_RTX (compare_mode);
23099
23100 /* A floating-point subtract might overflow, underflow, or produce
23101 an inexact result, thus changing the floating-point flags, so it
23102 can't be generated if we care about that. It's safe if one side
23103 of the construct is zero, since then no subtract will be
23104 generated. */
23105 if (SCALAR_FLOAT_MODE_P (compare_mode)
23106 && flag_trapping_math && ! is_against_zero)
23107 return 0;
23108
23109 /* Eliminate half of the comparisons by switching operands, this
23110 makes the remaining code simpler. */
23111 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23112 || code == LTGT || code == LT || code == UNLE)
23113 {
23114 code = reverse_condition_maybe_unordered (code);
23115 temp = true_cond;
23116 true_cond = false_cond;
23117 false_cond = temp;
23118 }
23119
23120 /* UNEQ and LTGT take four instructions for a comparison with zero,
23121 it'll probably be faster to use a branch here too. */
23122 if (code == UNEQ && HONOR_NANS (compare_mode))
23123 return 0;
23124
23125 /* We're going to try to implement comparisons by performing
23126 a subtract, then comparing against zero. Unfortunately,
23127 Inf - Inf is NaN which is not zero, and so if we don't
23128 know that the operand is finite and the comparison
23129 would treat EQ different to UNORDERED, we can't do it. */
23130 if (HONOR_INFINITIES (compare_mode)
23131 && code != GT && code != UNGE
23132 && (!CONST_DOUBLE_P (op1)
23133 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23134 /* Constructs of the form (a OP b ? a : b) are safe. */
23135 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23136 || (! rtx_equal_p (op0, true_cond)
23137 && ! rtx_equal_p (op1, true_cond))))
23138 return 0;
23139
23140 /* At this point we know we can use fsel. */
23141
23142 /* Reduce the comparison to a comparison against zero. */
23143 if (! is_against_zero)
23144 {
23145 temp = gen_reg_rtx (compare_mode);
23146 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23147 op0 = temp;
23148 op1 = CONST0_RTX (compare_mode);
23149 }
23150
23151 /* If we don't care about NaNs we can reduce some of the comparisons
23152 down to faster ones. */
23153 if (! HONOR_NANS (compare_mode))
23154 switch (code)
23155 {
23156 case GT:
23157 code = LE;
23158 temp = true_cond;
23159 true_cond = false_cond;
23160 false_cond = temp;
23161 break;
23162 case UNGE:
23163 code = GE;
23164 break;
23165 case UNEQ:
23166 code = EQ;
23167 break;
23168 default:
23169 break;
23170 }
23171
23172 /* Now, reduce everything down to a GE. */
23173 switch (code)
23174 {
23175 case GE:
23176 break;
23177
23178 case LE:
23179 temp = gen_reg_rtx (compare_mode);
23180 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23181 op0 = temp;
23182 break;
23183
23184 case ORDERED:
23185 temp = gen_reg_rtx (compare_mode);
23186 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23187 op0 = temp;
23188 break;
23189
23190 case EQ:
23191 temp = gen_reg_rtx (compare_mode);
23192 emit_insn (gen_rtx_SET (temp,
23193 gen_rtx_NEG (compare_mode,
23194 gen_rtx_ABS (compare_mode, op0))));
23195 op0 = temp;
23196 break;
23197
23198 case UNGE:
23199 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23200 temp = gen_reg_rtx (result_mode);
23201 emit_insn (gen_rtx_SET (temp,
23202 gen_rtx_IF_THEN_ELSE (result_mode,
23203 gen_rtx_GE (VOIDmode,
23204 op0, op1),
23205 true_cond, false_cond)));
23206 false_cond = true_cond;
23207 true_cond = temp;
23208
23209 temp = gen_reg_rtx (compare_mode);
23210 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23211 op0 = temp;
23212 break;
23213
23214 case GT:
23215 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23216 temp = gen_reg_rtx (result_mode);
23217 emit_insn (gen_rtx_SET (temp,
23218 gen_rtx_IF_THEN_ELSE (result_mode,
23219 gen_rtx_GE (VOIDmode,
23220 op0, op1),
23221 true_cond, false_cond)));
23222 true_cond = false_cond;
23223 false_cond = temp;
23224
23225 temp = gen_reg_rtx (compare_mode);
23226 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23227 op0 = temp;
23228 break;
23229
23230 default:
23231 gcc_unreachable ();
23232 }
23233
23234 emit_insn (gen_rtx_SET (dest,
23235 gen_rtx_IF_THEN_ELSE (result_mode,
23236 gen_rtx_GE (VOIDmode,
23237 op0, op1),
23238 true_cond, false_cond)));
23239 return 1;
23240 }
23241
23242 /* Same as above, but for ints (isel). */
23243
23244 int
23245 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23246 {
23247 rtx condition_rtx, cr;
23248 machine_mode mode = GET_MODE (dest);
23249 enum rtx_code cond_code;
23250 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23251 bool signedp;
23252
23253 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23254 return 0;
23255
23256 /* We still have to do the compare, because isel doesn't do a
23257 compare, it just looks at the CRx bits set by a previous compare
23258 instruction. */
23259 condition_rtx = rs6000_generate_compare (op, mode);
23260 cond_code = GET_CODE (condition_rtx);
23261 cr = XEXP (condition_rtx, 0);
23262 signedp = GET_MODE (cr) == CCmode;
23263
23264 isel_func = (mode == SImode
23265 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23266 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23267
23268 switch (cond_code)
23269 {
23270 case LT: case GT: case LTU: case GTU: case EQ:
23271 /* isel handles these directly. */
23272 break;
23273
23274 default:
23275 /* We need to swap the sense of the comparison. */
23276 {
23277 std::swap (false_cond, true_cond);
23278 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23279 }
23280 break;
23281 }
23282
23283 false_cond = force_reg (mode, false_cond);
23284 if (true_cond != const0_rtx)
23285 true_cond = force_reg (mode, true_cond);
23286
23287 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23288
23289 return 1;
23290 }
23291
23292 void
23293 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23294 {
23295 machine_mode mode = GET_MODE (op0);
23296 enum rtx_code c;
23297 rtx target;
23298
23299 /* VSX/altivec have direct min/max insns. */
23300 if ((code == SMAX || code == SMIN)
23301 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23302 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23303 {
23304 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23305 return;
23306 }
23307
23308 if (code == SMAX || code == SMIN)
23309 c = GE;
23310 else
23311 c = GEU;
23312
23313 if (code == SMAX || code == UMAX)
23314 target = emit_conditional_move (dest, c, op0, op1, mode,
23315 op0, op1, mode, 0);
23316 else
23317 target = emit_conditional_move (dest, c, op0, op1, mode,
23318 op1, op0, mode, 0);
23319 gcc_assert (target);
23320 if (target != dest)
23321 emit_move_insn (dest, target);
23322 }
23323
23324 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23325 COND is true. Mark the jump as unlikely to be taken. */
23326
23327 static void
23328 emit_unlikely_jump (rtx cond, rtx label)
23329 {
23330 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23331 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23332 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23333 }
23334
23335 /* A subroutine of the atomic operation splitters. Emit a load-locked
23336 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23337 the zero_extend operation. */
23338
23339 static void
23340 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23341 {
23342 rtx (*fn) (rtx, rtx) = NULL;
23343
23344 switch (mode)
23345 {
23346 case E_QImode:
23347 fn = gen_load_lockedqi;
23348 break;
23349 case E_HImode:
23350 fn = gen_load_lockedhi;
23351 break;
23352 case E_SImode:
23353 if (GET_MODE (mem) == QImode)
23354 fn = gen_load_lockedqi_si;
23355 else if (GET_MODE (mem) == HImode)
23356 fn = gen_load_lockedhi_si;
23357 else
23358 fn = gen_load_lockedsi;
23359 break;
23360 case E_DImode:
23361 fn = gen_load_lockeddi;
23362 break;
23363 case E_TImode:
23364 fn = gen_load_lockedti;
23365 break;
23366 default:
23367 gcc_unreachable ();
23368 }
23369 emit_insn (fn (reg, mem));
23370 }
23371
23372 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23373 instruction in MODE. */
23374
23375 static void
23376 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23377 {
23378 rtx (*fn) (rtx, rtx, rtx) = NULL;
23379
23380 switch (mode)
23381 {
23382 case E_QImode:
23383 fn = gen_store_conditionalqi;
23384 break;
23385 case E_HImode:
23386 fn = gen_store_conditionalhi;
23387 break;
23388 case E_SImode:
23389 fn = gen_store_conditionalsi;
23390 break;
23391 case E_DImode:
23392 fn = gen_store_conditionaldi;
23393 break;
23394 case E_TImode:
23395 fn = gen_store_conditionalti;
23396 break;
23397 default:
23398 gcc_unreachable ();
23399 }
23400
23401 /* Emit sync before stwcx. to address PPC405 Erratum. */
23402 if (PPC405_ERRATUM77)
23403 emit_insn (gen_hwsync ());
23404
23405 emit_insn (fn (res, mem, val));
23406 }
23407
23408 /* Expand barriers before and after a load_locked/store_cond sequence. */
23409
23410 static rtx
23411 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23412 {
23413 rtx addr = XEXP (mem, 0);
23414
23415 if (!legitimate_indirect_address_p (addr, reload_completed)
23416 && !legitimate_indexed_address_p (addr, reload_completed))
23417 {
23418 addr = force_reg (Pmode, addr);
23419 mem = replace_equiv_address_nv (mem, addr);
23420 }
23421
23422 switch (model)
23423 {
23424 case MEMMODEL_RELAXED:
23425 case MEMMODEL_CONSUME:
23426 case MEMMODEL_ACQUIRE:
23427 break;
23428 case MEMMODEL_RELEASE:
23429 case MEMMODEL_ACQ_REL:
23430 emit_insn (gen_lwsync ());
23431 break;
23432 case MEMMODEL_SEQ_CST:
23433 emit_insn (gen_hwsync ());
23434 break;
23435 default:
23436 gcc_unreachable ();
23437 }
23438 return mem;
23439 }
23440
23441 static void
23442 rs6000_post_atomic_barrier (enum memmodel model)
23443 {
23444 switch (model)
23445 {
23446 case MEMMODEL_RELAXED:
23447 case MEMMODEL_CONSUME:
23448 case MEMMODEL_RELEASE:
23449 break;
23450 case MEMMODEL_ACQUIRE:
23451 case MEMMODEL_ACQ_REL:
23452 case MEMMODEL_SEQ_CST:
23453 emit_insn (gen_isync ());
23454 break;
23455 default:
23456 gcc_unreachable ();
23457 }
23458 }
23459
23460 /* A subroutine of the various atomic expanders. For sub-word operations,
23461 we must adjust things to operate on SImode. Given the original MEM,
23462 return a new aligned memory. Also build and return the quantities by
23463 which to shift and mask. */
23464
23465 static rtx
23466 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23467 {
23468 rtx addr, align, shift, mask, mem;
23469 HOST_WIDE_INT shift_mask;
23470 machine_mode mode = GET_MODE (orig_mem);
23471
23472 /* For smaller modes, we have to implement this via SImode. */
23473 shift_mask = (mode == QImode ? 0x18 : 0x10);
23474
23475 addr = XEXP (orig_mem, 0);
23476 addr = force_reg (GET_MODE (addr), addr);
23477
23478 /* Aligned memory containing subword. Generate a new memory. We
23479 do not want any of the existing MEM_ATTR data, as we're now
23480 accessing memory outside the original object. */
23481 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23482 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23483 mem = gen_rtx_MEM (SImode, align);
23484 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23485 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23486 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23487
23488 /* Shift amount for subword relative to aligned word. */
23489 shift = gen_reg_rtx (SImode);
23490 addr = gen_lowpart (SImode, addr);
23491 rtx tmp = gen_reg_rtx (SImode);
23492 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23493 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23494 if (BYTES_BIG_ENDIAN)
23495 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23496 shift, 1, OPTAB_LIB_WIDEN);
23497 *pshift = shift;
23498
23499 /* Mask for insertion. */
23500 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23501 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23502 *pmask = mask;
23503
23504 return mem;
23505 }
23506
23507 /* A subroutine of the various atomic expanders. For sub-word operands,
23508 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23509
23510 static rtx
23511 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23512 {
23513 rtx x;
23514
23515 x = gen_reg_rtx (SImode);
23516 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23517 gen_rtx_NOT (SImode, mask),
23518 oldval)));
23519
23520 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23521
23522 return x;
23523 }
23524
23525 /* A subroutine of the various atomic expanders. For sub-word operands,
23526 extract WIDE to NARROW via SHIFT. */
23527
23528 static void
23529 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23530 {
23531 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23532 wide, 1, OPTAB_LIB_WIDEN);
23533 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23534 }
23535
23536 /* Expand an atomic compare and swap operation. */
23537
23538 void
23539 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23540 {
23541 rtx boolval, retval, mem, oldval, newval, cond;
23542 rtx label1, label2, x, mask, shift;
23543 machine_mode mode, orig_mode;
23544 enum memmodel mod_s, mod_f;
23545 bool is_weak;
23546
23547 boolval = operands[0];
23548 retval = operands[1];
23549 mem = operands[2];
23550 oldval = operands[3];
23551 newval = operands[4];
23552 is_weak = (INTVAL (operands[5]) != 0);
23553 mod_s = memmodel_base (INTVAL (operands[6]));
23554 mod_f = memmodel_base (INTVAL (operands[7]));
23555 orig_mode = mode = GET_MODE (mem);
23556
23557 mask = shift = NULL_RTX;
23558 if (mode == QImode || mode == HImode)
23559 {
23560 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23561 lwarx and shift/mask operations. With power8, we need to do the
23562 comparison in SImode, but the store is still done in QI/HImode. */
23563 oldval = convert_modes (SImode, mode, oldval, 1);
23564
23565 if (!TARGET_SYNC_HI_QI)
23566 {
23567 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23568
23569 /* Shift and mask OLDVAL into position with the word. */
23570 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23571 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23572
23573 /* Shift and mask NEWVAL into position within the word. */
23574 newval = convert_modes (SImode, mode, newval, 1);
23575 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23576 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23577 }
23578
23579 /* Prepare to adjust the return value. */
23580 retval = gen_reg_rtx (SImode);
23581 mode = SImode;
23582 }
23583 else if (reg_overlap_mentioned_p (retval, oldval))
23584 oldval = copy_to_reg (oldval);
23585
23586 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23587 oldval = copy_to_mode_reg (mode, oldval);
23588
23589 if (reg_overlap_mentioned_p (retval, newval))
23590 newval = copy_to_reg (newval);
23591
23592 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23593
23594 label1 = NULL_RTX;
23595 if (!is_weak)
23596 {
23597 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23598 emit_label (XEXP (label1, 0));
23599 }
23600 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23601
23602 emit_load_locked (mode, retval, mem);
23603
23604 x = retval;
23605 if (mask)
23606 x = expand_simple_binop (SImode, AND, retval, mask,
23607 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23608
23609 cond = gen_reg_rtx (CCmode);
23610 /* If we have TImode, synthesize a comparison. */
23611 if (mode != TImode)
23612 x = gen_rtx_COMPARE (CCmode, x, oldval);
23613 else
23614 {
23615 rtx xor1_result = gen_reg_rtx (DImode);
23616 rtx xor2_result = gen_reg_rtx (DImode);
23617 rtx or_result = gen_reg_rtx (DImode);
23618 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23619 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23620 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23621 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23622
23623 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23624 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23625 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23626 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23627 }
23628
23629 emit_insn (gen_rtx_SET (cond, x));
23630
23631 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23632 emit_unlikely_jump (x, label2);
23633
23634 x = newval;
23635 if (mask)
23636 x = rs6000_mask_atomic_subword (retval, newval, mask);
23637
23638 emit_store_conditional (orig_mode, cond, mem, x);
23639
23640 if (!is_weak)
23641 {
23642 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23643 emit_unlikely_jump (x, label1);
23644 }
23645
23646 if (!is_mm_relaxed (mod_f))
23647 emit_label (XEXP (label2, 0));
23648
23649 rs6000_post_atomic_barrier (mod_s);
23650
23651 if (is_mm_relaxed (mod_f))
23652 emit_label (XEXP (label2, 0));
23653
23654 if (shift)
23655 rs6000_finish_atomic_subword (operands[1], retval, shift);
23656 else if (mode != GET_MODE (operands[1]))
23657 convert_move (operands[1], retval, 1);
23658
23659 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23660 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23661 emit_insn (gen_rtx_SET (boolval, x));
23662 }
23663
23664 /* Expand an atomic exchange operation. */
23665
23666 void
23667 rs6000_expand_atomic_exchange (rtx operands[])
23668 {
23669 rtx retval, mem, val, cond;
23670 machine_mode mode;
23671 enum memmodel model;
23672 rtx label, x, mask, shift;
23673
23674 retval = operands[0];
23675 mem = operands[1];
23676 val = operands[2];
23677 model = memmodel_base (INTVAL (operands[3]));
23678 mode = GET_MODE (mem);
23679
23680 mask = shift = NULL_RTX;
23681 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23682 {
23683 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23684
23685 /* Shift and mask VAL into position with the word. */
23686 val = convert_modes (SImode, mode, val, 1);
23687 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23688 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23689
23690 /* Prepare to adjust the return value. */
23691 retval = gen_reg_rtx (SImode);
23692 mode = SImode;
23693 }
23694
23695 mem = rs6000_pre_atomic_barrier (mem, model);
23696
23697 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23698 emit_label (XEXP (label, 0));
23699
23700 emit_load_locked (mode, retval, mem);
23701
23702 x = val;
23703 if (mask)
23704 x = rs6000_mask_atomic_subword (retval, val, mask);
23705
23706 cond = gen_reg_rtx (CCmode);
23707 emit_store_conditional (mode, cond, mem, x);
23708
23709 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23710 emit_unlikely_jump (x, label);
23711
23712 rs6000_post_atomic_barrier (model);
23713
23714 if (shift)
23715 rs6000_finish_atomic_subword (operands[0], retval, shift);
23716 }
23717
23718 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23719 to perform. MEM is the memory on which to operate. VAL is the second
23720 operand of the binary operator. BEFORE and AFTER are optional locations to
23721 return the value of MEM either before of after the operation. MODEL_RTX
23722 is a CONST_INT containing the memory model to use. */
23723
23724 void
23725 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23726 rtx orig_before, rtx orig_after, rtx model_rtx)
23727 {
23728 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23729 machine_mode mode = GET_MODE (mem);
23730 machine_mode store_mode = mode;
23731 rtx label, x, cond, mask, shift;
23732 rtx before = orig_before, after = orig_after;
23733
23734 mask = shift = NULL_RTX;
23735 /* On power8, we want to use SImode for the operation. On previous systems,
23736 use the operation in a subword and shift/mask to get the proper byte or
23737 halfword. */
23738 if (mode == QImode || mode == HImode)
23739 {
23740 if (TARGET_SYNC_HI_QI)
23741 {
23742 val = convert_modes (SImode, mode, val, 1);
23743
23744 /* Prepare to adjust the return value. */
23745 before = gen_reg_rtx (SImode);
23746 if (after)
23747 after = gen_reg_rtx (SImode);
23748 mode = SImode;
23749 }
23750 else
23751 {
23752 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23753
23754 /* Shift and mask VAL into position with the word. */
23755 val = convert_modes (SImode, mode, val, 1);
23756 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23757 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23758
23759 switch (code)
23760 {
23761 case IOR:
23762 case XOR:
23763 /* We've already zero-extended VAL. That is sufficient to
23764 make certain that it does not affect other bits. */
23765 mask = NULL;
23766 break;
23767
23768 case AND:
23769 /* If we make certain that all of the other bits in VAL are
23770 set, that will be sufficient to not affect other bits. */
23771 x = gen_rtx_NOT (SImode, mask);
23772 x = gen_rtx_IOR (SImode, x, val);
23773 emit_insn (gen_rtx_SET (val, x));
23774 mask = NULL;
23775 break;
23776
23777 case NOT:
23778 case PLUS:
23779 case MINUS:
23780 /* These will all affect bits outside the field and need
23781 adjustment via MASK within the loop. */
23782 break;
23783
23784 default:
23785 gcc_unreachable ();
23786 }
23787
23788 /* Prepare to adjust the return value. */
23789 before = gen_reg_rtx (SImode);
23790 if (after)
23791 after = gen_reg_rtx (SImode);
23792 store_mode = mode = SImode;
23793 }
23794 }
23795
23796 mem = rs6000_pre_atomic_barrier (mem, model);
23797
23798 label = gen_label_rtx ();
23799 emit_label (label);
23800 label = gen_rtx_LABEL_REF (VOIDmode, label);
23801
23802 if (before == NULL_RTX)
23803 before = gen_reg_rtx (mode);
23804
23805 emit_load_locked (mode, before, mem);
23806
23807 if (code == NOT)
23808 {
23809 x = expand_simple_binop (mode, AND, before, val,
23810 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23811 after = expand_simple_unop (mode, NOT, x, after, 1);
23812 }
23813 else
23814 {
23815 after = expand_simple_binop (mode, code, before, val,
23816 after, 1, OPTAB_LIB_WIDEN);
23817 }
23818
23819 x = after;
23820 if (mask)
23821 {
23822 x = expand_simple_binop (SImode, AND, after, mask,
23823 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23824 x = rs6000_mask_atomic_subword (before, x, mask);
23825 }
23826 else if (store_mode != mode)
23827 x = convert_modes (store_mode, mode, x, 1);
23828
23829 cond = gen_reg_rtx (CCmode);
23830 emit_store_conditional (store_mode, cond, mem, x);
23831
23832 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23833 emit_unlikely_jump (x, label);
23834
23835 rs6000_post_atomic_barrier (model);
23836
23837 if (shift)
23838 {
23839 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23840 then do the calcuations in a SImode register. */
23841 if (orig_before)
23842 rs6000_finish_atomic_subword (orig_before, before, shift);
23843 if (orig_after)
23844 rs6000_finish_atomic_subword (orig_after, after, shift);
23845 }
23846 else if (store_mode != mode)
23847 {
23848 /* QImode/HImode on machines with lbarx/lharx where we do the native
23849 operation and then do the calcuations in a SImode register. */
23850 if (orig_before)
23851 convert_move (orig_before, before, 1);
23852 if (orig_after)
23853 convert_move (orig_after, after, 1);
23854 }
23855 else if (orig_after && after != orig_after)
23856 emit_move_insn (orig_after, after);
23857 }
23858
23859 /* Emit instructions to move SRC to DST. Called by splitters for
23860 multi-register moves. It will emit at most one instruction for
23861 each register that is accessed; that is, it won't emit li/lis pairs
23862 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23863 register. */
23864
23865 void
23866 rs6000_split_multireg_move (rtx dst, rtx src)
23867 {
23868 /* The register number of the first register being moved. */
23869 int reg;
23870 /* The mode that is to be moved. */
23871 machine_mode mode;
23872 /* The mode that the move is being done in, and its size. */
23873 machine_mode reg_mode;
23874 int reg_mode_size;
23875 /* The number of registers that will be moved. */
23876 int nregs;
23877
23878 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23879 mode = GET_MODE (dst);
23880 nregs = hard_regno_nregs (reg, mode);
23881 if (FP_REGNO_P (reg))
23882 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23883 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23884 else if (ALTIVEC_REGNO_P (reg))
23885 reg_mode = V16QImode;
23886 else
23887 reg_mode = word_mode;
23888 reg_mode_size = GET_MODE_SIZE (reg_mode);
23889
23890 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23891
23892 /* TDmode residing in FP registers is special, since the ISA requires that
23893 the lower-numbered word of a register pair is always the most significant
23894 word, even in little-endian mode. This does not match the usual subreg
23895 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23896 the appropriate constituent registers "by hand" in little-endian mode.
23897
23898 Note we do not need to check for destructive overlap here since TDmode
23899 can only reside in even/odd register pairs. */
23900 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23901 {
23902 rtx p_src, p_dst;
23903 int i;
23904
23905 for (i = 0; i < nregs; i++)
23906 {
23907 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23908 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23909 else
23910 p_src = simplify_gen_subreg (reg_mode, src, mode,
23911 i * reg_mode_size);
23912
23913 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23914 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23915 else
23916 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23917 i * reg_mode_size);
23918
23919 emit_insn (gen_rtx_SET (p_dst, p_src));
23920 }
23921
23922 return;
23923 }
23924
23925 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23926 {
23927 /* Move register range backwards, if we might have destructive
23928 overlap. */
23929 int i;
23930 for (i = nregs - 1; i >= 0; i--)
23931 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23932 i * reg_mode_size),
23933 simplify_gen_subreg (reg_mode, src, mode,
23934 i * reg_mode_size)));
23935 }
23936 else
23937 {
23938 int i;
23939 int j = -1;
23940 bool used_update = false;
23941 rtx restore_basereg = NULL_RTX;
23942
23943 if (MEM_P (src) && INT_REGNO_P (reg))
23944 {
23945 rtx breg;
23946
23947 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23948 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23949 {
23950 rtx delta_rtx;
23951 breg = XEXP (XEXP (src, 0), 0);
23952 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23953 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23954 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23955 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23956 src = replace_equiv_address (src, breg);
23957 }
23958 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23959 {
23960 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23961 {
23962 rtx basereg = XEXP (XEXP (src, 0), 0);
23963 if (TARGET_UPDATE)
23964 {
23965 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23966 emit_insn (gen_rtx_SET (ndst,
23967 gen_rtx_MEM (reg_mode,
23968 XEXP (src, 0))));
23969 used_update = true;
23970 }
23971 else
23972 emit_insn (gen_rtx_SET (basereg,
23973 XEXP (XEXP (src, 0), 1)));
23974 src = replace_equiv_address (src, basereg);
23975 }
23976 else
23977 {
23978 rtx basereg = gen_rtx_REG (Pmode, reg);
23979 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23980 src = replace_equiv_address (src, basereg);
23981 }
23982 }
23983
23984 breg = XEXP (src, 0);
23985 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23986 breg = XEXP (breg, 0);
23987
23988 /* If the base register we are using to address memory is
23989 also a destination reg, then change that register last. */
23990 if (REG_P (breg)
23991 && REGNO (breg) >= REGNO (dst)
23992 && REGNO (breg) < REGNO (dst) + nregs)
23993 j = REGNO (breg) - REGNO (dst);
23994 }
23995 else if (MEM_P (dst) && INT_REGNO_P (reg))
23996 {
23997 rtx breg;
23998
23999 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
24000 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
24001 {
24002 rtx delta_rtx;
24003 breg = XEXP (XEXP (dst, 0), 0);
24004 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
24005 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
24006 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
24007
24008 /* We have to update the breg before doing the store.
24009 Use store with update, if available. */
24010
24011 if (TARGET_UPDATE)
24012 {
24013 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24014 emit_insn (TARGET_32BIT
24015 ? (TARGET_POWERPC64
24016 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
24017 : gen_movsi_si_update (breg, breg, delta_rtx, nsrc))
24018 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
24019 used_update = true;
24020 }
24021 else
24022 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24023 dst = replace_equiv_address (dst, breg);
24024 }
24025 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
24026 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
24027 {
24028 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
24029 {
24030 rtx basereg = XEXP (XEXP (dst, 0), 0);
24031 if (TARGET_UPDATE)
24032 {
24033 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24034 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24035 XEXP (dst, 0)),
24036 nsrc));
24037 used_update = true;
24038 }
24039 else
24040 emit_insn (gen_rtx_SET (basereg,
24041 XEXP (XEXP (dst, 0), 1)));
24042 dst = replace_equiv_address (dst, basereg);
24043 }
24044 else
24045 {
24046 rtx basereg = XEXP (XEXP (dst, 0), 0);
24047 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24048 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24049 && REG_P (basereg)
24050 && REG_P (offsetreg)
24051 && REGNO (basereg) != REGNO (offsetreg));
24052 if (REGNO (basereg) == 0)
24053 {
24054 rtx tmp = offsetreg;
24055 offsetreg = basereg;
24056 basereg = tmp;
24057 }
24058 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24059 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24060 dst = replace_equiv_address (dst, basereg);
24061 }
24062 }
24063 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24064 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
24065 }
24066
24067 for (i = 0; i < nregs; i++)
24068 {
24069 /* Calculate index to next subword. */
24070 ++j;
24071 if (j == nregs)
24072 j = 0;
24073
24074 /* If compiler already emitted move of first word by
24075 store with update, no need to do anything. */
24076 if (j == 0 && used_update)
24077 continue;
24078
24079 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24080 j * reg_mode_size),
24081 simplify_gen_subreg (reg_mode, src, mode,
24082 j * reg_mode_size)));
24083 }
24084 if (restore_basereg != NULL_RTX)
24085 emit_insn (restore_basereg);
24086 }
24087 }
24088
24089 \f
24090 /* This page contains routines that are used to determine what the
24091 function prologue and epilogue code will do and write them out. */
24092
24093 /* Determine whether the REG is really used. */
24094
24095 static bool
24096 save_reg_p (int reg)
24097 {
24098 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24099 {
24100 /* When calling eh_return, we must return true for all the cases
24101 where conditional_register_usage marks the PIC offset reg
24102 call used or fixed. */
24103 if (crtl->calls_eh_return
24104 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24105 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24106 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24107 return true;
24108
24109 /* We need to mark the PIC offset register live for the same
24110 conditions as it is set up in rs6000_emit_prologue, or
24111 otherwise it won't be saved before we clobber it. */
24112 if (TARGET_TOC && TARGET_MINIMAL_TOC
24113 && !constant_pool_empty_p ())
24114 return true;
24115
24116 if (DEFAULT_ABI == ABI_V4
24117 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
24118 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
24119 return true;
24120
24121 if (DEFAULT_ABI == ABI_DARWIN
24122 && flag_pic && crtl->uses_pic_offset_table)
24123 return true;
24124 }
24125
24126 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24127 }
24128
24129 /* Return the first fixed-point register that is required to be
24130 saved. 32 if none. */
24131
24132 int
24133 first_reg_to_save (void)
24134 {
24135 int first_reg;
24136
24137 /* Find lowest numbered live register. */
24138 for (first_reg = 13; first_reg <= 31; first_reg++)
24139 if (save_reg_p (first_reg))
24140 break;
24141
24142 return first_reg;
24143 }
24144
24145 /* Similar, for FP regs. */
24146
24147 int
24148 first_fp_reg_to_save (void)
24149 {
24150 int first_reg;
24151
24152 /* Find lowest numbered live register. */
24153 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24154 if (save_reg_p (first_reg))
24155 break;
24156
24157 return first_reg;
24158 }
24159
24160 /* Similar, for AltiVec regs. */
24161
24162 static int
24163 first_altivec_reg_to_save (void)
24164 {
24165 int i;
24166
24167 /* Stack frame remains as is unless we are in AltiVec ABI. */
24168 if (! TARGET_ALTIVEC_ABI)
24169 return LAST_ALTIVEC_REGNO + 1;
24170
24171 /* On Darwin, the unwind routines are compiled without
24172 TARGET_ALTIVEC, and use save_world to save/restore the
24173 altivec registers when necessary. */
24174 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24175 && ! TARGET_ALTIVEC)
24176 return FIRST_ALTIVEC_REGNO + 20;
24177
24178 /* Find lowest numbered live register. */
24179 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24180 if (save_reg_p (i))
24181 break;
24182
24183 return i;
24184 }
24185
24186 /* Return a 32-bit mask of the AltiVec registers we need to set in
24187 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24188 the 32-bit word is 0. */
24189
24190 static unsigned int
24191 compute_vrsave_mask (void)
24192 {
24193 unsigned int i, mask = 0;
24194
24195 /* On Darwin, the unwind routines are compiled without
24196 TARGET_ALTIVEC, and use save_world to save/restore the
24197 call-saved altivec registers when necessary. */
24198 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24199 && ! TARGET_ALTIVEC)
24200 mask |= 0xFFF;
24201
24202 /* First, find out if we use _any_ altivec registers. */
24203 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24204 if (df_regs_ever_live_p (i))
24205 mask |= ALTIVEC_REG_BIT (i);
24206
24207 if (mask == 0)
24208 return mask;
24209
24210 /* Next, remove the argument registers from the set. These must
24211 be in the VRSAVE mask set by the caller, so we don't need to add
24212 them in again. More importantly, the mask we compute here is
24213 used to generate CLOBBERs in the set_vrsave insn, and we do not
24214 wish the argument registers to die. */
24215 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24216 mask &= ~ALTIVEC_REG_BIT (i);
24217
24218 /* Similarly, remove the return value from the set. */
24219 {
24220 bool yes = false;
24221 diddle_return_value (is_altivec_return_reg, &yes);
24222 if (yes)
24223 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24224 }
24225
24226 return mask;
24227 }
24228
24229 /* For a very restricted set of circumstances, we can cut down the
24230 size of prologues/epilogues by calling our own save/restore-the-world
24231 routines. */
24232
24233 static void
24234 compute_save_world_info (rs6000_stack_t *info)
24235 {
24236 info->world_save_p = 1;
24237 info->world_save_p
24238 = (WORLD_SAVE_P (info)
24239 && DEFAULT_ABI == ABI_DARWIN
24240 && !cfun->has_nonlocal_label
24241 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24242 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24243 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24244 && info->cr_save_p);
24245
24246 /* This will not work in conjunction with sibcalls. Make sure there
24247 are none. (This check is expensive, but seldom executed.) */
24248 if (WORLD_SAVE_P (info))
24249 {
24250 rtx_insn *insn;
24251 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24252 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24253 {
24254 info->world_save_p = 0;
24255 break;
24256 }
24257 }
24258
24259 if (WORLD_SAVE_P (info))
24260 {
24261 /* Even if we're not touching VRsave, make sure there's room on the
24262 stack for it, if it looks like we're calling SAVE_WORLD, which
24263 will attempt to save it. */
24264 info->vrsave_size = 4;
24265
24266 /* If we are going to save the world, we need to save the link register too. */
24267 info->lr_save_p = 1;
24268
24269 /* "Save" the VRsave register too if we're saving the world. */
24270 if (info->vrsave_mask == 0)
24271 info->vrsave_mask = compute_vrsave_mask ();
24272
24273 /* Because the Darwin register save/restore routines only handle
24274 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24275 check. */
24276 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24277 && (info->first_altivec_reg_save
24278 >= FIRST_SAVED_ALTIVEC_REGNO));
24279 }
24280
24281 return;
24282 }
24283
24284
24285 static void
24286 is_altivec_return_reg (rtx reg, void *xyes)
24287 {
24288 bool *yes = (bool *) xyes;
24289 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24290 *yes = true;
24291 }
24292
24293 \f
24294 /* Return whether REG is a global user reg or has been specifed by
24295 -ffixed-REG. We should not restore these, and so cannot use
24296 lmw or out-of-line restore functions if there are any. We also
24297 can't save them (well, emit frame notes for them), because frame
24298 unwinding during exception handling will restore saved registers. */
24299
24300 static bool
24301 fixed_reg_p (int reg)
24302 {
24303 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24304 backend sets it, overriding anything the user might have given. */
24305 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24306 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24307 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24308 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24309 return false;
24310
24311 return fixed_regs[reg];
24312 }
24313
24314 /* Determine the strategy for savings/restoring registers. */
24315
24316 enum {
24317 SAVE_MULTIPLE = 0x1,
24318 SAVE_INLINE_GPRS = 0x2,
24319 SAVE_INLINE_FPRS = 0x4,
24320 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24321 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24322 SAVE_INLINE_VRS = 0x20,
24323 REST_MULTIPLE = 0x100,
24324 REST_INLINE_GPRS = 0x200,
24325 REST_INLINE_FPRS = 0x400,
24326 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24327 REST_INLINE_VRS = 0x1000
24328 };
24329
24330 static int
24331 rs6000_savres_strategy (rs6000_stack_t *info,
24332 bool using_static_chain_p)
24333 {
24334 int strategy = 0;
24335
24336 /* Select between in-line and out-of-line save and restore of regs.
24337 First, all the obvious cases where we don't use out-of-line. */
24338 if (crtl->calls_eh_return
24339 || cfun->machine->ra_need_lr)
24340 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24341 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24342 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24343
24344 if (info->first_gp_reg_save == 32)
24345 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24346
24347 if (info->first_fp_reg_save == 64)
24348 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24349
24350 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24351 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24352
24353 /* Define cutoff for using out-of-line functions to save registers. */
24354 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24355 {
24356 if (!optimize_size)
24357 {
24358 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24359 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24360 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24361 }
24362 else
24363 {
24364 /* Prefer out-of-line restore if it will exit. */
24365 if (info->first_fp_reg_save > 61)
24366 strategy |= SAVE_INLINE_FPRS;
24367 if (info->first_gp_reg_save > 29)
24368 {
24369 if (info->first_fp_reg_save == 64)
24370 strategy |= SAVE_INLINE_GPRS;
24371 else
24372 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24373 }
24374 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24375 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24376 }
24377 }
24378 else if (DEFAULT_ABI == ABI_DARWIN)
24379 {
24380 if (info->first_fp_reg_save > 60)
24381 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24382 if (info->first_gp_reg_save > 29)
24383 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24384 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24385 }
24386 else
24387 {
24388 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24389 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24390 || info->first_fp_reg_save > 61)
24391 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24392 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24393 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24394 }
24395
24396 /* Don't bother to try to save things out-of-line if r11 is occupied
24397 by the static chain. It would require too much fiddling and the
24398 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24399 pointer on Darwin, and AIX uses r1 or r12. */
24400 if (using_static_chain_p
24401 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24402 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24403 | SAVE_INLINE_GPRS
24404 | SAVE_INLINE_VRS);
24405
24406 /* Don't ever restore fixed regs. That means we can't use the
24407 out-of-line register restore functions if a fixed reg is in the
24408 range of regs restored. */
24409 if (!(strategy & REST_INLINE_FPRS))
24410 for (int i = info->first_fp_reg_save; i < 64; i++)
24411 if (fixed_regs[i])
24412 {
24413 strategy |= REST_INLINE_FPRS;
24414 break;
24415 }
24416
24417 /* We can only use the out-of-line routines to restore fprs if we've
24418 saved all the registers from first_fp_reg_save in the prologue.
24419 Otherwise, we risk loading garbage. Of course, if we have saved
24420 out-of-line then we know we haven't skipped any fprs. */
24421 if ((strategy & SAVE_INLINE_FPRS)
24422 && !(strategy & REST_INLINE_FPRS))
24423 for (int i = info->first_fp_reg_save; i < 64; i++)
24424 if (!save_reg_p (i))
24425 {
24426 strategy |= REST_INLINE_FPRS;
24427 break;
24428 }
24429
24430 /* Similarly, for altivec regs. */
24431 if (!(strategy & REST_INLINE_VRS))
24432 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24433 if (fixed_regs[i])
24434 {
24435 strategy |= REST_INLINE_VRS;
24436 break;
24437 }
24438
24439 if ((strategy & SAVE_INLINE_VRS)
24440 && !(strategy & REST_INLINE_VRS))
24441 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24442 if (!save_reg_p (i))
24443 {
24444 strategy |= REST_INLINE_VRS;
24445 break;
24446 }
24447
24448 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24449 saved is an out-of-line save or restore. Set up the value for
24450 the next test (excluding out-of-line gprs). */
24451 bool lr_save_p = (info->lr_save_p
24452 || !(strategy & SAVE_INLINE_FPRS)
24453 || !(strategy & SAVE_INLINE_VRS)
24454 || !(strategy & REST_INLINE_FPRS)
24455 || !(strategy & REST_INLINE_VRS));
24456
24457 if (TARGET_MULTIPLE
24458 && !TARGET_POWERPC64
24459 && info->first_gp_reg_save < 31
24460 && !(flag_shrink_wrap
24461 && flag_shrink_wrap_separate
24462 && optimize_function_for_speed_p (cfun)))
24463 {
24464 int count = 0;
24465 for (int i = info->first_gp_reg_save; i < 32; i++)
24466 if (save_reg_p (i))
24467 count++;
24468
24469 if (count <= 1)
24470 /* Don't use store multiple if only one reg needs to be
24471 saved. This can occur for example when the ABI_V4 pic reg
24472 (r30) needs to be saved to make calls, but r31 is not
24473 used. */
24474 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24475 else
24476 {
24477 /* Prefer store multiple for saves over out-of-line
24478 routines, since the store-multiple instruction will
24479 always be smaller. */
24480 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24481
24482 /* The situation is more complicated with load multiple.
24483 We'd prefer to use the out-of-line routines for restores,
24484 since the "exit" out-of-line routines can handle the
24485 restore of LR and the frame teardown. However if doesn't
24486 make sense to use the out-of-line routine if that is the
24487 only reason we'd need to save LR, and we can't use the
24488 "exit" out-of-line gpr restore if we have saved some
24489 fprs; In those cases it is advantageous to use load
24490 multiple when available. */
24491 if (info->first_fp_reg_save != 64 || !lr_save_p)
24492 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24493 }
24494 }
24495
24496 /* Using the "exit" out-of-line routine does not improve code size
24497 if using it would require lr to be saved and if only saving one
24498 or two gprs. */
24499 else if (!lr_save_p && info->first_gp_reg_save > 29)
24500 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24501
24502 /* Don't ever restore fixed regs. */
24503 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24504 for (int i = info->first_gp_reg_save; i < 32; i++)
24505 if (fixed_reg_p (i))
24506 {
24507 strategy |= REST_INLINE_GPRS;
24508 strategy &= ~REST_MULTIPLE;
24509 break;
24510 }
24511
24512 /* We can only use load multiple or the out-of-line routines to
24513 restore gprs if we've saved all the registers from
24514 first_gp_reg_save. Otherwise, we risk loading garbage.
24515 Of course, if we have saved out-of-line or used stmw then we know
24516 we haven't skipped any gprs. */
24517 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24518 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24519 for (int i = info->first_gp_reg_save; i < 32; i++)
24520 if (!save_reg_p (i))
24521 {
24522 strategy |= REST_INLINE_GPRS;
24523 strategy &= ~REST_MULTIPLE;
24524 break;
24525 }
24526
24527 if (TARGET_ELF && TARGET_64BIT)
24528 {
24529 if (!(strategy & SAVE_INLINE_FPRS))
24530 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24531 else if (!(strategy & SAVE_INLINE_GPRS)
24532 && info->first_fp_reg_save == 64)
24533 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24534 }
24535 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24536 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24537
24538 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24539 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24540
24541 return strategy;
24542 }
24543
24544 /* Calculate the stack information for the current function. This is
24545 complicated by having two separate calling sequences, the AIX calling
24546 sequence and the V.4 calling sequence.
24547
24548 AIX (and Darwin/Mac OS X) stack frames look like:
24549 32-bit 64-bit
24550 SP----> +---------------------------------------+
24551 | back chain to caller | 0 0
24552 +---------------------------------------+
24553 | saved CR | 4 8 (8-11)
24554 +---------------------------------------+
24555 | saved LR | 8 16
24556 +---------------------------------------+
24557 | reserved for compilers | 12 24
24558 +---------------------------------------+
24559 | reserved for binders | 16 32
24560 +---------------------------------------+
24561 | saved TOC pointer | 20 40
24562 +---------------------------------------+
24563 | Parameter save area (+padding*) (P) | 24 48
24564 +---------------------------------------+
24565 | Alloca space (A) | 24+P etc.
24566 +---------------------------------------+
24567 | Local variable space (L) | 24+P+A
24568 +---------------------------------------+
24569 | Float/int conversion temporary (X) | 24+P+A+L
24570 +---------------------------------------+
24571 | Save area for AltiVec registers (W) | 24+P+A+L+X
24572 +---------------------------------------+
24573 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24574 +---------------------------------------+
24575 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24576 +---------------------------------------+
24577 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24578 +---------------------------------------+
24579 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24580 +---------------------------------------+
24581 old SP->| back chain to caller's caller |
24582 +---------------------------------------+
24583
24584 * If the alloca area is present, the parameter save area is
24585 padded so that the former starts 16-byte aligned.
24586
24587 The required alignment for AIX configurations is two words (i.e., 8
24588 or 16 bytes).
24589
24590 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24591
24592 SP----> +---------------------------------------+
24593 | Back chain to caller | 0
24594 +---------------------------------------+
24595 | Save area for CR | 8
24596 +---------------------------------------+
24597 | Saved LR | 16
24598 +---------------------------------------+
24599 | Saved TOC pointer | 24
24600 +---------------------------------------+
24601 | Parameter save area (+padding*) (P) | 32
24602 +---------------------------------------+
24603 | Alloca space (A) | 32+P
24604 +---------------------------------------+
24605 | Local variable space (L) | 32+P+A
24606 +---------------------------------------+
24607 | Save area for AltiVec registers (W) | 32+P+A+L
24608 +---------------------------------------+
24609 | AltiVec alignment padding (Y) | 32+P+A+L+W
24610 +---------------------------------------+
24611 | Save area for GP registers (G) | 32+P+A+L+W+Y
24612 +---------------------------------------+
24613 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24614 +---------------------------------------+
24615 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24616 +---------------------------------------+
24617
24618 * If the alloca area is present, the parameter save area is
24619 padded so that the former starts 16-byte aligned.
24620
24621 V.4 stack frames look like:
24622
24623 SP----> +---------------------------------------+
24624 | back chain to caller | 0
24625 +---------------------------------------+
24626 | caller's saved LR | 4
24627 +---------------------------------------+
24628 | Parameter save area (+padding*) (P) | 8
24629 +---------------------------------------+
24630 | Alloca space (A) | 8+P
24631 +---------------------------------------+
24632 | Varargs save area (V) | 8+P+A
24633 +---------------------------------------+
24634 | Local variable space (L) | 8+P+A+V
24635 +---------------------------------------+
24636 | Float/int conversion temporary (X) | 8+P+A+V+L
24637 +---------------------------------------+
24638 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24639 +---------------------------------------+
24640 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24641 +---------------------------------------+
24642 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24643 +---------------------------------------+
24644 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24645 +---------------------------------------+
24646 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24647 +---------------------------------------+
24648 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24649 +---------------------------------------+
24650 old SP->| back chain to caller's caller |
24651 +---------------------------------------+
24652
24653 * If the alloca area is present and the required alignment is
24654 16 bytes, the parameter save area is padded so that the
24655 alloca area starts 16-byte aligned.
24656
24657 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24658 given. (But note below and in sysv4.h that we require only 8 and
24659 may round up the size of our stack frame anyways. The historical
24660 reason is early versions of powerpc-linux which didn't properly
24661 align the stack at program startup. A happy side-effect is that
24662 -mno-eabi libraries can be used with -meabi programs.)
24663
24664 The EABI configuration defaults to the V.4 layout. However,
24665 the stack alignment requirements may differ. If -mno-eabi is not
24666 given, the required stack alignment is 8 bytes; if -mno-eabi is
24667 given, the required alignment is 16 bytes. (But see V.4 comment
24668 above.) */
24669
24670 #ifndef ABI_STACK_BOUNDARY
24671 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24672 #endif
24673
24674 static rs6000_stack_t *
24675 rs6000_stack_info (void)
24676 {
24677 /* We should never be called for thunks, we are not set up for that. */
24678 gcc_assert (!cfun->is_thunk);
24679
24680 rs6000_stack_t *info = &stack_info;
24681 int reg_size = TARGET_32BIT ? 4 : 8;
24682 int ehrd_size;
24683 int ehcr_size;
24684 int save_align;
24685 int first_gp;
24686 HOST_WIDE_INT non_fixed_size;
24687 bool using_static_chain_p;
24688
24689 if (reload_completed && info->reload_completed)
24690 return info;
24691
24692 memset (info, 0, sizeof (*info));
24693 info->reload_completed = reload_completed;
24694
24695 /* Select which calling sequence. */
24696 info->abi = DEFAULT_ABI;
24697
24698 /* Calculate which registers need to be saved & save area size. */
24699 info->first_gp_reg_save = first_reg_to_save ();
24700 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24701 even if it currently looks like we won't. Reload may need it to
24702 get at a constant; if so, it will have already created a constant
24703 pool entry for it. */
24704 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24705 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24706 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24707 && crtl->uses_const_pool
24708 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24709 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24710 else
24711 first_gp = info->first_gp_reg_save;
24712
24713 info->gp_size = reg_size * (32 - first_gp);
24714
24715 info->first_fp_reg_save = first_fp_reg_to_save ();
24716 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24717
24718 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24719 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24720 - info->first_altivec_reg_save);
24721
24722 /* Does this function call anything? */
24723 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24724
24725 /* Determine if we need to save the condition code registers. */
24726 if (save_reg_p (CR2_REGNO)
24727 || save_reg_p (CR3_REGNO)
24728 || save_reg_p (CR4_REGNO))
24729 {
24730 info->cr_save_p = 1;
24731 if (DEFAULT_ABI == ABI_V4)
24732 info->cr_size = reg_size;
24733 }
24734
24735 /* If the current function calls __builtin_eh_return, then we need
24736 to allocate stack space for registers that will hold data for
24737 the exception handler. */
24738 if (crtl->calls_eh_return)
24739 {
24740 unsigned int i;
24741 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24742 continue;
24743
24744 ehrd_size = i * UNITS_PER_WORD;
24745 }
24746 else
24747 ehrd_size = 0;
24748
24749 /* In the ELFv2 ABI, we also need to allocate space for separate
24750 CR field save areas if the function calls __builtin_eh_return. */
24751 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24752 {
24753 /* This hard-codes that we have three call-saved CR fields. */
24754 ehcr_size = 3 * reg_size;
24755 /* We do *not* use the regular CR save mechanism. */
24756 info->cr_save_p = 0;
24757 }
24758 else
24759 ehcr_size = 0;
24760
24761 /* Determine various sizes. */
24762 info->reg_size = reg_size;
24763 info->fixed_size = RS6000_SAVE_AREA;
24764 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24765 if (cfun->calls_alloca)
24766 info->parm_size =
24767 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24768 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24769 else
24770 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24771 TARGET_ALTIVEC ? 16 : 8);
24772 if (FRAME_GROWS_DOWNWARD)
24773 info->vars_size
24774 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24775 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24776 - (info->fixed_size + info->vars_size + info->parm_size);
24777
24778 if (TARGET_ALTIVEC_ABI)
24779 info->vrsave_mask = compute_vrsave_mask ();
24780
24781 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24782 info->vrsave_size = 4;
24783
24784 compute_save_world_info (info);
24785
24786 /* Calculate the offsets. */
24787 switch (DEFAULT_ABI)
24788 {
24789 case ABI_NONE:
24790 default:
24791 gcc_unreachable ();
24792
24793 case ABI_AIX:
24794 case ABI_ELFv2:
24795 case ABI_DARWIN:
24796 info->fp_save_offset = -info->fp_size;
24797 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24798
24799 if (TARGET_ALTIVEC_ABI)
24800 {
24801 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24802
24803 /* Align stack so vector save area is on a quadword boundary.
24804 The padding goes above the vectors. */
24805 if (info->altivec_size != 0)
24806 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24807
24808 info->altivec_save_offset = info->vrsave_save_offset
24809 - info->altivec_padding_size
24810 - info->altivec_size;
24811 gcc_assert (info->altivec_size == 0
24812 || info->altivec_save_offset % 16 == 0);
24813
24814 /* Adjust for AltiVec case. */
24815 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24816 }
24817 else
24818 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24819
24820 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24821 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24822 info->lr_save_offset = 2*reg_size;
24823 break;
24824
24825 case ABI_V4:
24826 info->fp_save_offset = -info->fp_size;
24827 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24828 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24829
24830 if (TARGET_ALTIVEC_ABI)
24831 {
24832 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24833
24834 /* Align stack so vector save area is on a quadword boundary. */
24835 if (info->altivec_size != 0)
24836 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24837
24838 info->altivec_save_offset = info->vrsave_save_offset
24839 - info->altivec_padding_size
24840 - info->altivec_size;
24841
24842 /* Adjust for AltiVec case. */
24843 info->ehrd_offset = info->altivec_save_offset;
24844 }
24845 else
24846 info->ehrd_offset = info->cr_save_offset;
24847
24848 info->ehrd_offset -= ehrd_size;
24849 info->lr_save_offset = reg_size;
24850 }
24851
24852 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24853 info->save_size = RS6000_ALIGN (info->fp_size
24854 + info->gp_size
24855 + info->altivec_size
24856 + info->altivec_padding_size
24857 + ehrd_size
24858 + ehcr_size
24859 + info->cr_size
24860 + info->vrsave_size,
24861 save_align);
24862
24863 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24864
24865 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24866 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24867
24868 /* Determine if we need to save the link register. */
24869 if (info->calls_p
24870 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24871 && crtl->profile
24872 && !TARGET_PROFILE_KERNEL)
24873 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24874 #ifdef TARGET_RELOCATABLE
24875 || (DEFAULT_ABI == ABI_V4
24876 && (TARGET_RELOCATABLE || flag_pic > 1)
24877 && !constant_pool_empty_p ())
24878 #endif
24879 || rs6000_ra_ever_killed ())
24880 info->lr_save_p = 1;
24881
24882 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24883 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24884 && call_used_regs[STATIC_CHAIN_REGNUM]);
24885 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24886
24887 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24888 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24889 || !(info->savres_strategy & SAVE_INLINE_VRS)
24890 || !(info->savres_strategy & REST_INLINE_GPRS)
24891 || !(info->savres_strategy & REST_INLINE_FPRS)
24892 || !(info->savres_strategy & REST_INLINE_VRS))
24893 info->lr_save_p = 1;
24894
24895 if (info->lr_save_p)
24896 df_set_regs_ever_live (LR_REGNO, true);
24897
24898 /* Determine if we need to allocate any stack frame:
24899
24900 For AIX we need to push the stack if a frame pointer is needed
24901 (because the stack might be dynamically adjusted), if we are
24902 debugging, if we make calls, or if the sum of fp_save, gp_save,
24903 and local variables are more than the space needed to save all
24904 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24905 + 18*8 = 288 (GPR13 reserved).
24906
24907 For V.4 we don't have the stack cushion that AIX uses, but assume
24908 that the debugger can handle stackless frames. */
24909
24910 if (info->calls_p)
24911 info->push_p = 1;
24912
24913 else if (DEFAULT_ABI == ABI_V4)
24914 info->push_p = non_fixed_size != 0;
24915
24916 else if (frame_pointer_needed)
24917 info->push_p = 1;
24918
24919 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24920 info->push_p = 1;
24921
24922 else
24923 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24924
24925 return info;
24926 }
24927
24928 static void
24929 debug_stack_info (rs6000_stack_t *info)
24930 {
24931 const char *abi_string;
24932
24933 if (! info)
24934 info = rs6000_stack_info ();
24935
24936 fprintf (stderr, "\nStack information for function %s:\n",
24937 ((current_function_decl && DECL_NAME (current_function_decl))
24938 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24939 : "<unknown>"));
24940
24941 switch (info->abi)
24942 {
24943 default: abi_string = "Unknown"; break;
24944 case ABI_NONE: abi_string = "NONE"; break;
24945 case ABI_AIX: abi_string = "AIX"; break;
24946 case ABI_ELFv2: abi_string = "ELFv2"; break;
24947 case ABI_DARWIN: abi_string = "Darwin"; break;
24948 case ABI_V4: abi_string = "V.4"; break;
24949 }
24950
24951 fprintf (stderr, "\tABI = %5s\n", abi_string);
24952
24953 if (TARGET_ALTIVEC_ABI)
24954 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24955
24956 if (info->first_gp_reg_save != 32)
24957 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24958
24959 if (info->first_fp_reg_save != 64)
24960 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24961
24962 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24963 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24964 info->first_altivec_reg_save);
24965
24966 if (info->lr_save_p)
24967 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24968
24969 if (info->cr_save_p)
24970 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24971
24972 if (info->vrsave_mask)
24973 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24974
24975 if (info->push_p)
24976 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24977
24978 if (info->calls_p)
24979 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24980
24981 if (info->gp_size)
24982 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24983
24984 if (info->fp_size)
24985 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24986
24987 if (info->altivec_size)
24988 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24989 info->altivec_save_offset);
24990
24991 if (info->vrsave_size)
24992 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24993 info->vrsave_save_offset);
24994
24995 if (info->lr_save_p)
24996 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24997
24998 if (info->cr_save_p)
24999 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
25000
25001 if (info->varargs_save_offset)
25002 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
25003
25004 if (info->total_size)
25005 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25006 info->total_size);
25007
25008 if (info->vars_size)
25009 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25010 info->vars_size);
25011
25012 if (info->parm_size)
25013 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25014
25015 if (info->fixed_size)
25016 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25017
25018 if (info->gp_size)
25019 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25020
25021 if (info->fp_size)
25022 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25023
25024 if (info->altivec_size)
25025 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25026
25027 if (info->vrsave_size)
25028 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25029
25030 if (info->altivec_padding_size)
25031 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25032 info->altivec_padding_size);
25033
25034 if (info->cr_size)
25035 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25036
25037 if (info->save_size)
25038 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25039
25040 if (info->reg_size != 4)
25041 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25042
25043 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25044
25045 fprintf (stderr, "\n");
25046 }
25047
25048 rtx
25049 rs6000_return_addr (int count, rtx frame)
25050 {
25051 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25052 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25053 if (count != 0
25054 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25055 {
25056 cfun->machine->ra_needs_full_frame = 1;
25057
25058 if (count == 0)
25059 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25060 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25061 frame = stack_pointer_rtx;
25062 rtx prev_frame_addr = memory_address (Pmode, frame);
25063 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
25064 rtx lr_save_off = plus_constant (Pmode,
25065 prev_frame, RETURN_ADDRESS_OFFSET);
25066 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
25067 return gen_rtx_MEM (Pmode, lr_save_addr);
25068 }
25069
25070 cfun->machine->ra_need_lr = 1;
25071 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25072 }
25073
25074 /* Say whether a function is a candidate for sibcall handling or not. */
25075
25076 static bool
25077 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25078 {
25079 tree fntype;
25080
25081 /* The sibcall epilogue may clobber the static chain register.
25082 ??? We could work harder and avoid that, but it's probably
25083 not worth the hassle in practice. */
25084 if (CALL_EXPR_STATIC_CHAIN (exp))
25085 return false;
25086
25087 if (decl)
25088 fntype = TREE_TYPE (decl);
25089 else
25090 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25091
25092 /* We can't do it if the called function has more vector parameters
25093 than the current function; there's nowhere to put the VRsave code. */
25094 if (TARGET_ALTIVEC_ABI
25095 && TARGET_ALTIVEC_VRSAVE
25096 && !(decl && decl == current_function_decl))
25097 {
25098 function_args_iterator args_iter;
25099 tree type;
25100 int nvreg = 0;
25101
25102 /* Functions with vector parameters are required to have a
25103 prototype, so the argument type info must be available
25104 here. */
25105 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25106 if (TREE_CODE (type) == VECTOR_TYPE
25107 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25108 nvreg++;
25109
25110 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25111 if (TREE_CODE (type) == VECTOR_TYPE
25112 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25113 nvreg--;
25114
25115 if (nvreg > 0)
25116 return false;
25117 }
25118
25119 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25120 functions, because the callee may have a different TOC pointer to
25121 the caller and there's no way to ensure we restore the TOC when
25122 we return. With the secure-plt SYSV ABI we can't make non-local
25123 calls when -fpic/PIC because the plt call stubs use r30. */
25124 if (DEFAULT_ABI == ABI_DARWIN
25125 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25126 && decl
25127 && !DECL_EXTERNAL (decl)
25128 && !DECL_WEAK (decl)
25129 && (*targetm.binds_local_p) (decl))
25130 || (DEFAULT_ABI == ABI_V4
25131 && (!TARGET_SECURE_PLT
25132 || !flag_pic
25133 || (decl
25134 && (*targetm.binds_local_p) (decl)))))
25135 {
25136 tree attr_list = TYPE_ATTRIBUTES (fntype);
25137
25138 if (!lookup_attribute ("longcall", attr_list)
25139 || lookup_attribute ("shortcall", attr_list))
25140 return true;
25141 }
25142
25143 return false;
25144 }
25145
25146 static int
25147 rs6000_ra_ever_killed (void)
25148 {
25149 rtx_insn *top;
25150 rtx reg;
25151 rtx_insn *insn;
25152
25153 if (cfun->is_thunk)
25154 return 0;
25155
25156 if (cfun->machine->lr_save_state)
25157 return cfun->machine->lr_save_state - 1;
25158
25159 /* regs_ever_live has LR marked as used if any sibcalls are present,
25160 but this should not force saving and restoring in the
25161 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25162 clobbers LR, so that is inappropriate. */
25163
25164 /* Also, the prologue can generate a store into LR that
25165 doesn't really count, like this:
25166
25167 move LR->R0
25168 bcl to set PIC register
25169 move LR->R31
25170 move R0->LR
25171
25172 When we're called from the epilogue, we need to avoid counting
25173 this as a store. */
25174
25175 push_topmost_sequence ();
25176 top = get_insns ();
25177 pop_topmost_sequence ();
25178 reg = gen_rtx_REG (Pmode, LR_REGNO);
25179
25180 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25181 {
25182 if (INSN_P (insn))
25183 {
25184 if (CALL_P (insn))
25185 {
25186 if (!SIBLING_CALL_P (insn))
25187 return 1;
25188 }
25189 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25190 return 1;
25191 else if (set_of (reg, insn) != NULL_RTX
25192 && !prologue_epilogue_contains (insn))
25193 return 1;
25194 }
25195 }
25196 return 0;
25197 }
25198 \f
25199 /* Emit instructions needed to load the TOC register.
25200 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25201 a constant pool; or for SVR4 -fpic. */
25202
25203 void
25204 rs6000_emit_load_toc_table (int fromprolog)
25205 {
25206 rtx dest;
25207 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25208
25209 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25210 {
25211 char buf[30];
25212 rtx lab, tmp1, tmp2, got;
25213
25214 lab = gen_label_rtx ();
25215 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25216 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25217 if (flag_pic == 2)
25218 {
25219 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25220 need_toc_init = 1;
25221 }
25222 else
25223 got = rs6000_got_sym ();
25224 tmp1 = tmp2 = dest;
25225 if (!fromprolog)
25226 {
25227 tmp1 = gen_reg_rtx (Pmode);
25228 tmp2 = gen_reg_rtx (Pmode);
25229 }
25230 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25231 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25232 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25233 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25234 }
25235 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25236 {
25237 emit_insn (gen_load_toc_v4_pic_si ());
25238 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25239 }
25240 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25241 {
25242 char buf[30];
25243 rtx temp0 = (fromprolog
25244 ? gen_rtx_REG (Pmode, 0)
25245 : gen_reg_rtx (Pmode));
25246
25247 if (fromprolog)
25248 {
25249 rtx symF, symL;
25250
25251 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25252 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25253
25254 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25255 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25256
25257 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25258 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25259 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25260 }
25261 else
25262 {
25263 rtx tocsym, lab;
25264
25265 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25266 need_toc_init = 1;
25267 lab = gen_label_rtx ();
25268 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25269 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25270 if (TARGET_LINK_STACK)
25271 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25272 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25273 }
25274 emit_insn (gen_addsi3 (dest, temp0, dest));
25275 }
25276 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25277 {
25278 /* This is for AIX code running in non-PIC ELF32. */
25279 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25280
25281 need_toc_init = 1;
25282 emit_insn (gen_elf_high (dest, realsym));
25283 emit_insn (gen_elf_low (dest, dest, realsym));
25284 }
25285 else
25286 {
25287 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25288
25289 if (TARGET_32BIT)
25290 emit_insn (gen_load_toc_aix_si (dest));
25291 else
25292 emit_insn (gen_load_toc_aix_di (dest));
25293 }
25294 }
25295
25296 /* Emit instructions to restore the link register after determining where
25297 its value has been stored. */
25298
25299 void
25300 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25301 {
25302 rs6000_stack_t *info = rs6000_stack_info ();
25303 rtx operands[2];
25304
25305 operands[0] = source;
25306 operands[1] = scratch;
25307
25308 if (info->lr_save_p)
25309 {
25310 rtx frame_rtx = stack_pointer_rtx;
25311 HOST_WIDE_INT sp_offset = 0;
25312 rtx tmp;
25313
25314 if (frame_pointer_needed
25315 || cfun->calls_alloca
25316 || info->total_size > 32767)
25317 {
25318 tmp = gen_frame_mem (Pmode, frame_rtx);
25319 emit_move_insn (operands[1], tmp);
25320 frame_rtx = operands[1];
25321 }
25322 else if (info->push_p)
25323 sp_offset = info->total_size;
25324
25325 tmp = plus_constant (Pmode, frame_rtx,
25326 info->lr_save_offset + sp_offset);
25327 tmp = gen_frame_mem (Pmode, tmp);
25328 emit_move_insn (tmp, operands[0]);
25329 }
25330 else
25331 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25332
25333 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25334 state of lr_save_p so any change from here on would be a bug. In
25335 particular, stop rs6000_ra_ever_killed from considering the SET
25336 of lr we may have added just above. */
25337 cfun->machine->lr_save_state = info->lr_save_p + 1;
25338 }
25339
25340 static GTY(()) alias_set_type set = -1;
25341
25342 alias_set_type
25343 get_TOC_alias_set (void)
25344 {
25345 if (set == -1)
25346 set = new_alias_set ();
25347 return set;
25348 }
25349
25350 /* This returns nonzero if the current function uses the TOC. This is
25351 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25352 is generated by the ABI_V4 load_toc_* patterns.
25353 Return 2 instead of 1 if the load_toc_* pattern is in the function
25354 partition that doesn't start the function. */
25355 #if TARGET_ELF
25356 static int
25357 uses_TOC (void)
25358 {
25359 rtx_insn *insn;
25360 int ret = 1;
25361
25362 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25363 {
25364 if (INSN_P (insn))
25365 {
25366 rtx pat = PATTERN (insn);
25367 int i;
25368
25369 if (GET_CODE (pat) == PARALLEL)
25370 for (i = 0; i < XVECLEN (pat, 0); i++)
25371 {
25372 rtx sub = XVECEXP (pat, 0, i);
25373 if (GET_CODE (sub) == USE)
25374 {
25375 sub = XEXP (sub, 0);
25376 if (GET_CODE (sub) == UNSPEC
25377 && XINT (sub, 1) == UNSPEC_TOC)
25378 return ret;
25379 }
25380 }
25381 }
25382 else if (crtl->has_bb_partition
25383 && NOTE_P (insn)
25384 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25385 ret = 2;
25386 }
25387 return 0;
25388 }
25389 #endif
25390
25391 rtx
25392 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25393 {
25394 rtx tocrel, tocreg, hi;
25395
25396 if (TARGET_DEBUG_ADDR)
25397 {
25398 if (SYMBOL_REF_P (symbol))
25399 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25400 XSTR (symbol, 0));
25401 else
25402 {
25403 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25404 GET_RTX_NAME (GET_CODE (symbol)));
25405 debug_rtx (symbol);
25406 }
25407 }
25408
25409 if (!can_create_pseudo_p ())
25410 df_set_regs_ever_live (TOC_REGISTER, true);
25411
25412 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25413 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25414 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25415 return tocrel;
25416
25417 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25418 if (largetoc_reg != NULL)
25419 {
25420 emit_move_insn (largetoc_reg, hi);
25421 hi = largetoc_reg;
25422 }
25423 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25424 }
25425
25426 /* Issue assembly directives that create a reference to the given DWARF
25427 FRAME_TABLE_LABEL from the current function section. */
25428 void
25429 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25430 {
25431 fprintf (asm_out_file, "\t.ref %s\n",
25432 (* targetm.strip_name_encoding) (frame_table_label));
25433 }
25434 \f
25435 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25436 and the change to the stack pointer. */
25437
25438 static void
25439 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25440 {
25441 rtvec p;
25442 int i;
25443 rtx regs[3];
25444
25445 i = 0;
25446 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25447 if (hard_frame_needed)
25448 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25449 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25450 || (hard_frame_needed
25451 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25452 regs[i++] = fp;
25453
25454 p = rtvec_alloc (i);
25455 while (--i >= 0)
25456 {
25457 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25458 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25459 }
25460
25461 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25462 }
25463
25464 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25465 and set the appropriate attributes for the generated insn. Return the
25466 first insn which adjusts the stack pointer or the last insn before
25467 the stack adjustment loop.
25468
25469 SIZE_INT is used to create the CFI note for the allocation.
25470
25471 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25472 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25473
25474 ORIG_SP contains the backchain value that must be stored at *sp. */
25475
25476 static rtx_insn *
25477 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25478 {
25479 rtx_insn *insn;
25480
25481 rtx size_rtx = GEN_INT (-size_int);
25482 if (size_int > 32767)
25483 {
25484 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25485 /* Need a note here so that try_split doesn't get confused. */
25486 if (get_last_insn () == NULL_RTX)
25487 emit_note (NOTE_INSN_DELETED);
25488 insn = emit_move_insn (tmp_reg, size_rtx);
25489 try_split (PATTERN (insn), insn, 0);
25490 size_rtx = tmp_reg;
25491 }
25492
25493 if (TARGET_32BIT)
25494 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25495 stack_pointer_rtx,
25496 size_rtx,
25497 orig_sp));
25498 else
25499 insn = emit_insn (gen_movdi_update_stack (stack_pointer_rtx,
25500 stack_pointer_rtx,
25501 size_rtx,
25502 orig_sp));
25503 rtx par = PATTERN (insn);
25504 gcc_assert (GET_CODE (par) == PARALLEL);
25505 rtx set = XVECEXP (par, 0, 0);
25506 gcc_assert (GET_CODE (set) == SET);
25507 rtx mem = SET_DEST (set);
25508 gcc_assert (MEM_P (mem));
25509 MEM_NOTRAP_P (mem) = 1;
25510 set_mem_alias_set (mem, get_frame_alias_set ());
25511
25512 RTX_FRAME_RELATED_P (insn) = 1;
25513 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25514 gen_rtx_SET (stack_pointer_rtx,
25515 gen_rtx_PLUS (Pmode,
25516 stack_pointer_rtx,
25517 GEN_INT (-size_int))));
25518
25519 /* Emit a blockage to ensure the allocation/probing insns are
25520 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25521 note for similar reasons. */
25522 if (flag_stack_clash_protection)
25523 {
25524 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25525 emit_insn (gen_blockage ());
25526 }
25527
25528 return insn;
25529 }
25530
25531 static HOST_WIDE_INT
25532 get_stack_clash_protection_probe_interval (void)
25533 {
25534 return (HOST_WIDE_INT_1U
25535 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25536 }
25537
25538 static HOST_WIDE_INT
25539 get_stack_clash_protection_guard_size (void)
25540 {
25541 return (HOST_WIDE_INT_1U
25542 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25543 }
25544
25545 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25546 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25547
25548 COPY_REG, if non-null, should contain a copy of the original
25549 stack pointer at exit from this function.
25550
25551 This is subtly different than the Ada probing in that it tries hard to
25552 prevent attacks that jump the stack guard. Thus it is never allowed to
25553 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25554 space without a suitable probe. */
25555 static rtx_insn *
25556 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25557 rtx copy_reg)
25558 {
25559 rtx orig_sp = copy_reg;
25560
25561 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25562
25563 /* Round the size down to a multiple of PROBE_INTERVAL. */
25564 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25565
25566 /* If explicitly requested,
25567 or the rounded size is not the same as the original size
25568 or the the rounded size is greater than a page,
25569 then we will need a copy of the original stack pointer. */
25570 if (rounded_size != orig_size
25571 || rounded_size > probe_interval
25572 || copy_reg)
25573 {
25574 /* If the caller did not request a copy of the incoming stack
25575 pointer, then we use r0 to hold the copy. */
25576 if (!copy_reg)
25577 orig_sp = gen_rtx_REG (Pmode, 0);
25578 emit_move_insn (orig_sp, stack_pointer_rtx);
25579 }
25580
25581 /* There's three cases here.
25582
25583 One is a single probe which is the most common and most efficiently
25584 implemented as it does not have to have a copy of the original
25585 stack pointer if there are no residuals.
25586
25587 Second is unrolled allocation/probes which we use if there's just
25588 a few of them. It needs to save the original stack pointer into a
25589 temporary for use as a source register in the allocation/probe.
25590
25591 Last is a loop. This is the most uncommon case and least efficient. */
25592 rtx_insn *retval = NULL;
25593 if (rounded_size == probe_interval)
25594 {
25595 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25596
25597 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25598 }
25599 else if (rounded_size <= 8 * probe_interval)
25600 {
25601 /* The ABI requires using the store with update insns to allocate
25602 space and store the backchain into the stack
25603
25604 So we save the current stack pointer into a temporary, then
25605 emit the store-with-update insns to store the saved stack pointer
25606 into the right location in each new page. */
25607 for (int i = 0; i < rounded_size; i += probe_interval)
25608 {
25609 rtx_insn *insn
25610 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25611
25612 /* Save the first stack adjustment in RETVAL. */
25613 if (i == 0)
25614 retval = insn;
25615 }
25616
25617 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25618 }
25619 else
25620 {
25621 /* Compute the ending address. */
25622 rtx end_addr
25623 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25624 rtx rs = GEN_INT (-rounded_size);
25625 rtx_insn *insn;
25626 if (add_operand (rs, Pmode))
25627 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25628 else
25629 {
25630 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25631 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25632 stack_pointer_rtx));
25633 /* Describe the effect of INSN to the CFI engine. */
25634 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25635 gen_rtx_SET (end_addr,
25636 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25637 rs)));
25638 }
25639 RTX_FRAME_RELATED_P (insn) = 1;
25640
25641 /* Emit the loop. */
25642 if (TARGET_64BIT)
25643 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25644 stack_pointer_rtx, orig_sp,
25645 end_addr));
25646 else
25647 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25648 stack_pointer_rtx, orig_sp,
25649 end_addr));
25650 RTX_FRAME_RELATED_P (retval) = 1;
25651 /* Describe the effect of INSN to the CFI engine. */
25652 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25653 gen_rtx_SET (stack_pointer_rtx, end_addr));
25654
25655 /* Emit a blockage to ensure the allocation/probing insns are
25656 not optimized, combined, removed, etc. Other cases handle this
25657 within their call to rs6000_emit_allocate_stack_1. */
25658 emit_insn (gen_blockage ());
25659
25660 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25661 }
25662
25663 if (orig_size != rounded_size)
25664 {
25665 /* Allocate (and implicitly probe) any residual space. */
25666 HOST_WIDE_INT residual = orig_size - rounded_size;
25667
25668 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25669
25670 /* If the residual was the only allocation, then we can return the
25671 allocating insn. */
25672 if (!retval)
25673 retval = insn;
25674 }
25675
25676 return retval;
25677 }
25678
25679 /* Emit the correct code for allocating stack space, as insns.
25680 If COPY_REG, make sure a copy of the old frame is left there.
25681 The generated code may use hard register 0 as a temporary. */
25682
25683 static rtx_insn *
25684 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25685 {
25686 rtx_insn *insn;
25687 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25688 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25689 rtx todec = gen_int_mode (-size, Pmode);
25690
25691 if (INTVAL (todec) != -size)
25692 {
25693 warning (0, "stack frame too large");
25694 emit_insn (gen_trap ());
25695 return 0;
25696 }
25697
25698 if (crtl->limit_stack)
25699 {
25700 if (REG_P (stack_limit_rtx)
25701 && REGNO (stack_limit_rtx) > 1
25702 && REGNO (stack_limit_rtx) <= 31)
25703 {
25704 rtx_insn *insn
25705 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25706 gcc_assert (insn);
25707 emit_insn (insn);
25708 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25709 }
25710 else if (SYMBOL_REF_P (stack_limit_rtx)
25711 && TARGET_32BIT
25712 && DEFAULT_ABI == ABI_V4
25713 && !flag_pic)
25714 {
25715 rtx toload = gen_rtx_CONST (VOIDmode,
25716 gen_rtx_PLUS (Pmode,
25717 stack_limit_rtx,
25718 GEN_INT (size)));
25719
25720 emit_insn (gen_elf_high (tmp_reg, toload));
25721 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25722 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25723 const0_rtx));
25724 }
25725 else
25726 warning (0, "stack limit expression is not supported");
25727 }
25728
25729 if (flag_stack_clash_protection)
25730 {
25731 if (size < get_stack_clash_protection_guard_size ())
25732 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25733 else
25734 {
25735 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25736 copy_reg);
25737
25738 /* If we asked for a copy with an offset, then we still need add in
25739 the offset. */
25740 if (copy_reg && copy_off)
25741 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25742 return insn;
25743 }
25744 }
25745
25746 if (copy_reg)
25747 {
25748 if (copy_off != 0)
25749 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25750 else
25751 emit_move_insn (copy_reg, stack_reg);
25752 }
25753
25754 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25755 it now and set the alias set/attributes. The above gen_*_update
25756 calls will generate a PARALLEL with the MEM set being the first
25757 operation. */
25758 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25759 return insn;
25760 }
25761
25762 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25763
25764 #if PROBE_INTERVAL > 32768
25765 #error Cannot use indexed addressing mode for stack probing
25766 #endif
25767
25768 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25769 inclusive. These are offsets from the current stack pointer. */
25770
25771 static void
25772 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25773 {
25774 /* See if we have a constant small number of probes to generate. If so,
25775 that's the easy case. */
25776 if (first + size <= 32768)
25777 {
25778 HOST_WIDE_INT i;
25779
25780 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25781 it exceeds SIZE. If only one probe is needed, this will not
25782 generate any code. Then probe at FIRST + SIZE. */
25783 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25784 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25785 -(first + i)));
25786
25787 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25788 -(first + size)));
25789 }
25790
25791 /* Otherwise, do the same as above, but in a loop. Note that we must be
25792 extra careful with variables wrapping around because we might be at
25793 the very top (or the very bottom) of the address space and we have
25794 to be able to handle this case properly; in particular, we use an
25795 equality test for the loop condition. */
25796 else
25797 {
25798 HOST_WIDE_INT rounded_size;
25799 rtx r12 = gen_rtx_REG (Pmode, 12);
25800 rtx r0 = gen_rtx_REG (Pmode, 0);
25801
25802 /* Sanity check for the addressing mode we're going to use. */
25803 gcc_assert (first <= 32768);
25804
25805 /* Step 1: round SIZE to the previous multiple of the interval. */
25806
25807 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25808
25809
25810 /* Step 2: compute initial and final value of the loop counter. */
25811
25812 /* TEST_ADDR = SP + FIRST. */
25813 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25814 -first)));
25815
25816 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25817 if (rounded_size > 32768)
25818 {
25819 emit_move_insn (r0, GEN_INT (-rounded_size));
25820 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25821 }
25822 else
25823 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25824 -rounded_size)));
25825
25826
25827 /* Step 3: the loop
25828
25829 do
25830 {
25831 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25832 probe at TEST_ADDR
25833 }
25834 while (TEST_ADDR != LAST_ADDR)
25835
25836 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25837 until it is equal to ROUNDED_SIZE. */
25838
25839 if (TARGET_64BIT)
25840 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25841 else
25842 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25843
25844
25845 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25846 that SIZE is equal to ROUNDED_SIZE. */
25847
25848 if (size != rounded_size)
25849 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25850 }
25851 }
25852
25853 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25854 addresses, not offsets. */
25855
25856 static const char *
25857 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25858 {
25859 static int labelno = 0;
25860 char loop_lab[32];
25861 rtx xops[2];
25862
25863 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25864
25865 /* Loop. */
25866 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25867
25868 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25869 xops[0] = reg1;
25870 xops[1] = GEN_INT (-PROBE_INTERVAL);
25871 output_asm_insn ("addi %0,%0,%1", xops);
25872
25873 /* Probe at TEST_ADDR. */
25874 xops[1] = gen_rtx_REG (Pmode, 0);
25875 output_asm_insn ("stw %1,0(%0)", xops);
25876
25877 /* Test if TEST_ADDR == LAST_ADDR. */
25878 xops[1] = reg2;
25879 if (TARGET_64BIT)
25880 output_asm_insn ("cmpd 0,%0,%1", xops);
25881 else
25882 output_asm_insn ("cmpw 0,%0,%1", xops);
25883
25884 /* Branch. */
25885 fputs ("\tbne 0,", asm_out_file);
25886 assemble_name_raw (asm_out_file, loop_lab);
25887 fputc ('\n', asm_out_file);
25888
25889 return "";
25890 }
25891
25892 /* This function is called when rs6000_frame_related is processing
25893 SETs within a PARALLEL, and returns whether the REGNO save ought to
25894 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25895 for out-of-line register save functions, store multiple, and the
25896 Darwin world_save. They may contain registers that don't really
25897 need saving. */
25898
25899 static bool
25900 interesting_frame_related_regno (unsigned int regno)
25901 {
25902 /* Saves apparently of r0 are actually saving LR. It doesn't make
25903 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25904 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25905 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25906 as frame related. */
25907 if (regno == 0)
25908 return true;
25909 /* If we see CR2 then we are here on a Darwin world save. Saves of
25910 CR2 signify the whole CR is being saved. This is a long-standing
25911 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25912 that CR needs to be saved. */
25913 if (regno == CR2_REGNO)
25914 return true;
25915 /* Omit frame info for any user-defined global regs. If frame info
25916 is supplied for them, frame unwinding will restore a user reg.
25917 Also omit frame info for any reg we don't need to save, as that
25918 bloats frame info and can cause problems with shrink wrapping.
25919 Since global regs won't be seen as needing to be saved, both of
25920 these conditions are covered by save_reg_p. */
25921 return save_reg_p (regno);
25922 }
25923
25924 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25925 addresses, not offsets.
25926
25927 REG2 contains the backchain that must be stored into *sp at each allocation.
25928
25929 This is subtly different than the Ada probing above in that it tries hard
25930 to prevent attacks that jump the stack guard. Thus, it is never allowed
25931 to allocate more than PROBE_INTERVAL bytes of stack space without a
25932 suitable probe. */
25933
25934 static const char *
25935 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25936 {
25937 static int labelno = 0;
25938 char loop_lab[32];
25939 rtx xops[3];
25940
25941 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25942
25943 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25944
25945 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25946
25947 /* This allocates and probes. */
25948 xops[0] = reg1;
25949 xops[1] = reg2;
25950 xops[2] = GEN_INT (-probe_interval);
25951 if (TARGET_64BIT)
25952 output_asm_insn ("stdu %1,%2(%0)", xops);
25953 else
25954 output_asm_insn ("stwu %1,%2(%0)", xops);
25955
25956 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25957 xops[0] = reg1;
25958 xops[1] = reg3;
25959 if (TARGET_64BIT)
25960 output_asm_insn ("cmpd 0,%0,%1", xops);
25961 else
25962 output_asm_insn ("cmpw 0,%0,%1", xops);
25963
25964 fputs ("\tbne 0,", asm_out_file);
25965 assemble_name_raw (asm_out_file, loop_lab);
25966 fputc ('\n', asm_out_file);
25967
25968 return "";
25969 }
25970
25971 /* Wrapper around the output_probe_stack_range routines. */
25972 const char *
25973 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25974 {
25975 if (flag_stack_clash_protection)
25976 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25977 else
25978 return output_probe_stack_range_1 (reg1, reg3);
25979 }
25980
25981 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25982 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25983 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25984 deduce these equivalences by itself so it wasn't necessary to hold
25985 its hand so much. Don't be tempted to always supply d2_f_d_e with
25986 the actual cfa register, ie. r31 when we are using a hard frame
25987 pointer. That fails when saving regs off r1, and sched moves the
25988 r31 setup past the reg saves. */
25989
25990 static rtx_insn *
25991 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25992 rtx reg2, rtx repl2)
25993 {
25994 rtx repl;
25995
25996 if (REGNO (reg) == STACK_POINTER_REGNUM)
25997 {
25998 gcc_checking_assert (val == 0);
25999 repl = NULL_RTX;
26000 }
26001 else
26002 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26003 GEN_INT (val));
26004
26005 rtx pat = PATTERN (insn);
26006 if (!repl && !reg2)
26007 {
26008 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26009 if (GET_CODE (pat) == PARALLEL)
26010 for (int i = 0; i < XVECLEN (pat, 0); i++)
26011 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26012 {
26013 rtx set = XVECEXP (pat, 0, i);
26014
26015 if (!REG_P (SET_SRC (set))
26016 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26017 RTX_FRAME_RELATED_P (set) = 1;
26018 }
26019 RTX_FRAME_RELATED_P (insn) = 1;
26020 return insn;
26021 }
26022
26023 /* We expect that 'pat' is either a SET or a PARALLEL containing
26024 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26025 are important so they all have to be marked RTX_FRAME_RELATED_P.
26026 Call simplify_replace_rtx on the SETs rather than the whole insn
26027 so as to leave the other stuff alone (for example USE of r12). */
26028
26029 set_used_flags (pat);
26030 if (GET_CODE (pat) == SET)
26031 {
26032 if (repl)
26033 pat = simplify_replace_rtx (pat, reg, repl);
26034 if (reg2)
26035 pat = simplify_replace_rtx (pat, reg2, repl2);
26036 }
26037 else if (GET_CODE (pat) == PARALLEL)
26038 {
26039 pat = shallow_copy_rtx (pat);
26040 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
26041
26042 for (int i = 0; i < XVECLEN (pat, 0); i++)
26043 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26044 {
26045 rtx set = XVECEXP (pat, 0, i);
26046
26047 if (repl)
26048 set = simplify_replace_rtx (set, reg, repl);
26049 if (reg2)
26050 set = simplify_replace_rtx (set, reg2, repl2);
26051 XVECEXP (pat, 0, i) = set;
26052
26053 if (!REG_P (SET_SRC (set))
26054 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26055 RTX_FRAME_RELATED_P (set) = 1;
26056 }
26057 }
26058 else
26059 gcc_unreachable ();
26060
26061 RTX_FRAME_RELATED_P (insn) = 1;
26062 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
26063
26064 return insn;
26065 }
26066
26067 /* Returns an insn that has a vrsave set operation with the
26068 appropriate CLOBBERs. */
26069
26070 static rtx
26071 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
26072 {
26073 int nclobs, i;
26074 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
26075 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26076
26077 clobs[0]
26078 = gen_rtx_SET (vrsave,
26079 gen_rtx_UNSPEC_VOLATILE (SImode,
26080 gen_rtvec (2, reg, vrsave),
26081 UNSPECV_SET_VRSAVE));
26082
26083 nclobs = 1;
26084
26085 /* We need to clobber the registers in the mask so the scheduler
26086 does not move sets to VRSAVE before sets of AltiVec registers.
26087
26088 However, if the function receives nonlocal gotos, reload will set
26089 all call saved registers live. We will end up with:
26090
26091 (set (reg 999) (mem))
26092 (parallel [ (set (reg vrsave) (unspec blah))
26093 (clobber (reg 999))])
26094
26095 The clobber will cause the store into reg 999 to be dead, and
26096 flow will attempt to delete an epilogue insn. In this case, we
26097 need an unspec use/set of the register. */
26098
26099 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26100 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26101 {
26102 if (!epiloguep || call_used_regs [i])
26103 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
26104 else
26105 {
26106 rtx reg = gen_rtx_REG (V4SImode, i);
26107
26108 clobs[nclobs++]
26109 = gen_rtx_SET (reg,
26110 gen_rtx_UNSPEC (V4SImode,
26111 gen_rtvec (1, reg), 27));
26112 }
26113 }
26114
26115 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
26116
26117 for (i = 0; i < nclobs; ++i)
26118 XVECEXP (insn, 0, i) = clobs[i];
26119
26120 return insn;
26121 }
26122
26123 static rtx
26124 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
26125 {
26126 rtx addr, mem;
26127
26128 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
26129 mem = gen_frame_mem (GET_MODE (reg), addr);
26130 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
26131 }
26132
26133 static rtx
26134 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26135 {
26136 return gen_frame_set (reg, frame_reg, offset, false);
26137 }
26138
26139 static rtx
26140 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26141 {
26142 return gen_frame_set (reg, frame_reg, offset, true);
26143 }
26144
26145 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26146 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26147
26148 static rtx_insn *
26149 emit_frame_save (rtx frame_reg, machine_mode mode,
26150 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26151 {
26152 rtx reg;
26153
26154 /* Some cases that need register indexed addressing. */
26155 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26156 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26157
26158 reg = gen_rtx_REG (mode, regno);
26159 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26160 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26161 NULL_RTX, NULL_RTX);
26162 }
26163
26164 /* Emit an offset memory reference suitable for a frame store, while
26165 converting to a valid addressing mode. */
26166
26167 static rtx
26168 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26169 {
26170 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26171 }
26172
26173 #ifndef TARGET_FIX_AND_CONTINUE
26174 #define TARGET_FIX_AND_CONTINUE 0
26175 #endif
26176
26177 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26178 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26179 #define LAST_SAVRES_REGISTER 31
26180 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26181
26182 enum {
26183 SAVRES_LR = 0x1,
26184 SAVRES_SAVE = 0x2,
26185 SAVRES_REG = 0x0c,
26186 SAVRES_GPR = 0,
26187 SAVRES_FPR = 4,
26188 SAVRES_VR = 8
26189 };
26190
26191 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26192
26193 /* Temporary holding space for an out-of-line register save/restore
26194 routine name. */
26195 static char savres_routine_name[30];
26196
26197 /* Return the name for an out-of-line register save/restore routine.
26198 We are saving/restoring GPRs if GPR is true. */
26199
26200 static char *
26201 rs6000_savres_routine_name (int regno, int sel)
26202 {
26203 const char *prefix = "";
26204 const char *suffix = "";
26205
26206 /* Different targets are supposed to define
26207 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26208 routine name could be defined with:
26209
26210 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26211
26212 This is a nice idea in practice, but in reality, things are
26213 complicated in several ways:
26214
26215 - ELF targets have save/restore routines for GPRs.
26216
26217 - PPC64 ELF targets have routines for save/restore of GPRs that
26218 differ in what they do with the link register, so having a set
26219 prefix doesn't work. (We only use one of the save routines at
26220 the moment, though.)
26221
26222 - PPC32 elf targets have "exit" versions of the restore routines
26223 that restore the link register and can save some extra space.
26224 These require an extra suffix. (There are also "tail" versions
26225 of the restore routines and "GOT" versions of the save routines,
26226 but we don't generate those at present. Same problems apply,
26227 though.)
26228
26229 We deal with all this by synthesizing our own prefix/suffix and
26230 using that for the simple sprintf call shown above. */
26231 if (DEFAULT_ABI == ABI_V4)
26232 {
26233 if (TARGET_64BIT)
26234 goto aix_names;
26235
26236 if ((sel & SAVRES_REG) == SAVRES_GPR)
26237 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26238 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26239 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26240 else if ((sel & SAVRES_REG) == SAVRES_VR)
26241 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26242 else
26243 abort ();
26244
26245 if ((sel & SAVRES_LR))
26246 suffix = "_x";
26247 }
26248 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26249 {
26250 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26251 /* No out-of-line save/restore routines for GPRs on AIX. */
26252 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26253 #endif
26254
26255 aix_names:
26256 if ((sel & SAVRES_REG) == SAVRES_GPR)
26257 prefix = ((sel & SAVRES_SAVE)
26258 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26259 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26260 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26261 {
26262 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26263 if ((sel & SAVRES_LR))
26264 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26265 else
26266 #endif
26267 {
26268 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26269 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26270 }
26271 }
26272 else if ((sel & SAVRES_REG) == SAVRES_VR)
26273 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26274 else
26275 abort ();
26276 }
26277
26278 if (DEFAULT_ABI == ABI_DARWIN)
26279 {
26280 /* The Darwin approach is (slightly) different, in order to be
26281 compatible with code generated by the system toolchain. There is a
26282 single symbol for the start of save sequence, and the code here
26283 embeds an offset into that code on the basis of the first register
26284 to be saved. */
26285 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26286 if ((sel & SAVRES_REG) == SAVRES_GPR)
26287 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26288 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26289 (regno - 13) * 4, prefix, regno);
26290 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26291 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26292 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26293 else if ((sel & SAVRES_REG) == SAVRES_VR)
26294 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26295 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26296 else
26297 abort ();
26298 }
26299 else
26300 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26301
26302 return savres_routine_name;
26303 }
26304
26305 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26306 We are saving/restoring GPRs if GPR is true. */
26307
26308 static rtx
26309 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26310 {
26311 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26312 ? info->first_gp_reg_save
26313 : (sel & SAVRES_REG) == SAVRES_FPR
26314 ? info->first_fp_reg_save - 32
26315 : (sel & SAVRES_REG) == SAVRES_VR
26316 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26317 : -1);
26318 rtx sym;
26319 int select = sel;
26320
26321 /* Don't generate bogus routine names. */
26322 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26323 && regno <= LAST_SAVRES_REGISTER
26324 && select >= 0 && select <= 12);
26325
26326 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26327
26328 if (sym == NULL)
26329 {
26330 char *name;
26331
26332 name = rs6000_savres_routine_name (regno, sel);
26333
26334 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26335 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26336 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26337 }
26338
26339 return sym;
26340 }
26341
26342 /* Emit a sequence of insns, including a stack tie if needed, for
26343 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26344 reset the stack pointer, but move the base of the frame into
26345 reg UPDT_REGNO for use by out-of-line register restore routines. */
26346
26347 static rtx
26348 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26349 unsigned updt_regno)
26350 {
26351 /* If there is nothing to do, don't do anything. */
26352 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26353 return NULL_RTX;
26354
26355 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26356
26357 /* This blockage is needed so that sched doesn't decide to move
26358 the sp change before the register restores. */
26359 if (DEFAULT_ABI == ABI_V4)
26360 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26361 GEN_INT (frame_off)));
26362
26363 /* If we are restoring registers out-of-line, we will be using the
26364 "exit" variants of the restore routines, which will reset the
26365 stack for us. But we do need to point updt_reg into the
26366 right place for those routines. */
26367 if (frame_off != 0)
26368 return emit_insn (gen_add3_insn (updt_reg_rtx,
26369 frame_reg_rtx, GEN_INT (frame_off)));
26370 else
26371 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26372
26373 return NULL_RTX;
26374 }
26375
26376 /* Return the register number used as a pointer by out-of-line
26377 save/restore functions. */
26378
26379 static inline unsigned
26380 ptr_regno_for_savres (int sel)
26381 {
26382 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26383 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26384 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26385 }
26386
26387 /* Construct a parallel rtx describing the effect of a call to an
26388 out-of-line register save/restore routine, and emit the insn
26389 or jump_insn as appropriate. */
26390
26391 static rtx_insn *
26392 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26393 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26394 machine_mode reg_mode, int sel)
26395 {
26396 int i;
26397 int offset, start_reg, end_reg, n_regs, use_reg;
26398 int reg_size = GET_MODE_SIZE (reg_mode);
26399 rtx sym;
26400 rtvec p;
26401 rtx par;
26402 rtx_insn *insn;
26403
26404 offset = 0;
26405 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26406 ? info->first_gp_reg_save
26407 : (sel & SAVRES_REG) == SAVRES_FPR
26408 ? info->first_fp_reg_save
26409 : (sel & SAVRES_REG) == SAVRES_VR
26410 ? info->first_altivec_reg_save
26411 : -1);
26412 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26413 ? 32
26414 : (sel & SAVRES_REG) == SAVRES_FPR
26415 ? 64
26416 : (sel & SAVRES_REG) == SAVRES_VR
26417 ? LAST_ALTIVEC_REGNO + 1
26418 : -1);
26419 n_regs = end_reg - start_reg;
26420 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26421 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26422 + n_regs);
26423
26424 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26425 RTVEC_ELT (p, offset++) = ret_rtx;
26426
26427 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26428
26429 sym = rs6000_savres_routine_sym (info, sel);
26430 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26431
26432 use_reg = ptr_regno_for_savres (sel);
26433 if ((sel & SAVRES_REG) == SAVRES_VR)
26434 {
26435 /* Vector regs are saved/restored using [reg+reg] addressing. */
26436 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26437 RTVEC_ELT (p, offset++)
26438 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26439 }
26440 else
26441 RTVEC_ELT (p, offset++)
26442 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26443
26444 for (i = 0; i < end_reg - start_reg; i++)
26445 RTVEC_ELT (p, i + offset)
26446 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26447 frame_reg_rtx, save_area_offset + reg_size * i,
26448 (sel & SAVRES_SAVE) != 0);
26449
26450 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26451 RTVEC_ELT (p, i + offset)
26452 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26453
26454 par = gen_rtx_PARALLEL (VOIDmode, p);
26455
26456 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26457 {
26458 insn = emit_jump_insn (par);
26459 JUMP_LABEL (insn) = ret_rtx;
26460 }
26461 else
26462 insn = emit_insn (par);
26463 return insn;
26464 }
26465
26466 /* Emit prologue code to store CR fields that need to be saved into REG. This
26467 function should only be called when moving the non-volatile CRs to REG, it
26468 is not a general purpose routine to move the entire set of CRs to REG.
26469 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26470 volatile CRs. */
26471
26472 static void
26473 rs6000_emit_prologue_move_from_cr (rtx reg)
26474 {
26475 /* Only the ELFv2 ABI allows storing only selected fields. */
26476 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26477 {
26478 int i, cr_reg[8], count = 0;
26479
26480 /* Collect CR fields that must be saved. */
26481 for (i = 0; i < 8; i++)
26482 if (save_reg_p (CR0_REGNO + i))
26483 cr_reg[count++] = i;
26484
26485 /* If it's just a single one, use mfcrf. */
26486 if (count == 1)
26487 {
26488 rtvec p = rtvec_alloc (1);
26489 rtvec r = rtvec_alloc (2);
26490 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26491 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26492 RTVEC_ELT (p, 0)
26493 = gen_rtx_SET (reg,
26494 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26495
26496 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26497 return;
26498 }
26499
26500 /* ??? It might be better to handle count == 2 / 3 cases here
26501 as well, using logical operations to combine the values. */
26502 }
26503
26504 emit_insn (gen_prologue_movesi_from_cr (reg));
26505 }
26506
26507 /* Return whether the split-stack arg pointer (r12) is used. */
26508
26509 static bool
26510 split_stack_arg_pointer_used_p (void)
26511 {
26512 /* If the pseudo holding the arg pointer is no longer a pseudo,
26513 then the arg pointer is used. */
26514 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26515 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26516 || HARD_REGISTER_P (cfun->machine->split_stack_arg_pointer)))
26517 return true;
26518
26519 /* Unfortunately we also need to do some code scanning, since
26520 r12 may have been substituted for the pseudo. */
26521 rtx_insn *insn;
26522 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26523 FOR_BB_INSNS (bb, insn)
26524 if (NONDEBUG_INSN_P (insn))
26525 {
26526 /* A call destroys r12. */
26527 if (CALL_P (insn))
26528 return false;
26529
26530 df_ref use;
26531 FOR_EACH_INSN_USE (use, insn)
26532 {
26533 rtx x = DF_REF_REG (use);
26534 if (REG_P (x) && REGNO (x) == 12)
26535 return true;
26536 }
26537 df_ref def;
26538 FOR_EACH_INSN_DEF (def, insn)
26539 {
26540 rtx x = DF_REF_REG (def);
26541 if (REG_P (x) && REGNO (x) == 12)
26542 return false;
26543 }
26544 }
26545 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26546 }
26547
26548 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26549
26550 static bool
26551 rs6000_global_entry_point_needed_p (void)
26552 {
26553 /* Only needed for the ELFv2 ABI. */
26554 if (DEFAULT_ABI != ABI_ELFv2)
26555 return false;
26556
26557 /* With -msingle-pic-base, we assume the whole program shares the same
26558 TOC, so no global entry point prologues are needed anywhere. */
26559 if (TARGET_SINGLE_PIC_BASE)
26560 return false;
26561
26562 /* Ensure we have a global entry point for thunks. ??? We could
26563 avoid that if the target routine doesn't need a global entry point,
26564 but we do not know whether this is the case at this point. */
26565 if (cfun->is_thunk)
26566 return true;
26567
26568 /* For regular functions, rs6000_emit_prologue sets this flag if the
26569 routine ever uses the TOC pointer. */
26570 return cfun->machine->r2_setup_needed;
26571 }
26572
26573 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26574 static sbitmap
26575 rs6000_get_separate_components (void)
26576 {
26577 rs6000_stack_t *info = rs6000_stack_info ();
26578
26579 if (WORLD_SAVE_P (info))
26580 return NULL;
26581
26582 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26583 && !(info->savres_strategy & REST_MULTIPLE));
26584
26585 /* Component 0 is the save/restore of LR (done via GPR0).
26586 Component 2 is the save of the TOC (GPR2).
26587 Components 13..31 are the save/restore of GPR13..GPR31.
26588 Components 46..63 are the save/restore of FPR14..FPR31. */
26589
26590 cfun->machine->n_components = 64;
26591
26592 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26593 bitmap_clear (components);
26594
26595 int reg_size = TARGET_32BIT ? 4 : 8;
26596 int fp_reg_size = 8;
26597
26598 /* The GPRs we need saved to the frame. */
26599 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26600 && (info->savres_strategy & REST_INLINE_GPRS))
26601 {
26602 int offset = info->gp_save_offset;
26603 if (info->push_p)
26604 offset += info->total_size;
26605
26606 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26607 {
26608 if (IN_RANGE (offset, -0x8000, 0x7fff)
26609 && save_reg_p (regno))
26610 bitmap_set_bit (components, regno);
26611
26612 offset += reg_size;
26613 }
26614 }
26615
26616 /* Don't mess with the hard frame pointer. */
26617 if (frame_pointer_needed)
26618 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26619
26620 /* Don't mess with the fixed TOC register. */
26621 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26622 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26623 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26624 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26625
26626 /* The FPRs we need saved to the frame. */
26627 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26628 && (info->savres_strategy & REST_INLINE_FPRS))
26629 {
26630 int offset = info->fp_save_offset;
26631 if (info->push_p)
26632 offset += info->total_size;
26633
26634 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26635 {
26636 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26637 bitmap_set_bit (components, regno);
26638
26639 offset += fp_reg_size;
26640 }
26641 }
26642
26643 /* Optimize LR save and restore if we can. This is component 0. Any
26644 out-of-line register save/restore routines need LR. */
26645 if (info->lr_save_p
26646 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26647 && (info->savres_strategy & SAVE_INLINE_GPRS)
26648 && (info->savres_strategy & REST_INLINE_GPRS)
26649 && (info->savres_strategy & SAVE_INLINE_FPRS)
26650 && (info->savres_strategy & REST_INLINE_FPRS)
26651 && (info->savres_strategy & SAVE_INLINE_VRS)
26652 && (info->savres_strategy & REST_INLINE_VRS))
26653 {
26654 int offset = info->lr_save_offset;
26655 if (info->push_p)
26656 offset += info->total_size;
26657 if (IN_RANGE (offset, -0x8000, 0x7fff))
26658 bitmap_set_bit (components, 0);
26659 }
26660
26661 /* Optimize saving the TOC. This is component 2. */
26662 if (cfun->machine->save_toc_in_prologue)
26663 bitmap_set_bit (components, 2);
26664
26665 return components;
26666 }
26667
26668 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26669 static sbitmap
26670 rs6000_components_for_bb (basic_block bb)
26671 {
26672 rs6000_stack_t *info = rs6000_stack_info ();
26673
26674 bitmap in = DF_LIVE_IN (bb);
26675 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26676 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26677
26678 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26679 bitmap_clear (components);
26680
26681 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26682
26683 /* GPRs. */
26684 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26685 if (bitmap_bit_p (in, regno)
26686 || bitmap_bit_p (gen, regno)
26687 || bitmap_bit_p (kill, regno))
26688 bitmap_set_bit (components, regno);
26689
26690 /* FPRs. */
26691 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26692 if (bitmap_bit_p (in, regno)
26693 || bitmap_bit_p (gen, regno)
26694 || bitmap_bit_p (kill, regno))
26695 bitmap_set_bit (components, regno);
26696
26697 /* The link register. */
26698 if (bitmap_bit_p (in, LR_REGNO)
26699 || bitmap_bit_p (gen, LR_REGNO)
26700 || bitmap_bit_p (kill, LR_REGNO))
26701 bitmap_set_bit (components, 0);
26702
26703 /* The TOC save. */
26704 if (bitmap_bit_p (in, TOC_REGNUM)
26705 || bitmap_bit_p (gen, TOC_REGNUM)
26706 || bitmap_bit_p (kill, TOC_REGNUM))
26707 bitmap_set_bit (components, 2);
26708
26709 return components;
26710 }
26711
26712 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26713 static void
26714 rs6000_disqualify_components (sbitmap components, edge e,
26715 sbitmap edge_components, bool /*is_prologue*/)
26716 {
26717 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26718 live where we want to place that code. */
26719 if (bitmap_bit_p (edge_components, 0)
26720 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26721 {
26722 if (dump_file)
26723 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26724 "on entry to bb %d\n", e->dest->index);
26725 bitmap_clear_bit (components, 0);
26726 }
26727 }
26728
26729 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26730 static void
26731 rs6000_emit_prologue_components (sbitmap components)
26732 {
26733 rs6000_stack_t *info = rs6000_stack_info ();
26734 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26735 ? HARD_FRAME_POINTER_REGNUM
26736 : STACK_POINTER_REGNUM);
26737
26738 machine_mode reg_mode = Pmode;
26739 int reg_size = TARGET_32BIT ? 4 : 8;
26740 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26741 int fp_reg_size = 8;
26742
26743 /* Prologue for LR. */
26744 if (bitmap_bit_p (components, 0))
26745 {
26746 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26747 rtx reg = gen_rtx_REG (reg_mode, 0);
26748 rtx_insn *insn = emit_move_insn (reg, lr);
26749 RTX_FRAME_RELATED_P (insn) = 1;
26750 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26751
26752 int offset = info->lr_save_offset;
26753 if (info->push_p)
26754 offset += info->total_size;
26755
26756 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26757 RTX_FRAME_RELATED_P (insn) = 1;
26758 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26759 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26760 }
26761
26762 /* Prologue for TOC. */
26763 if (bitmap_bit_p (components, 2))
26764 {
26765 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26766 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26767 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26768 }
26769
26770 /* Prologue for the GPRs. */
26771 int offset = info->gp_save_offset;
26772 if (info->push_p)
26773 offset += info->total_size;
26774
26775 for (int i = info->first_gp_reg_save; i < 32; i++)
26776 {
26777 if (bitmap_bit_p (components, i))
26778 {
26779 rtx reg = gen_rtx_REG (reg_mode, i);
26780 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26781 RTX_FRAME_RELATED_P (insn) = 1;
26782 rtx set = copy_rtx (single_set (insn));
26783 add_reg_note (insn, REG_CFA_OFFSET, set);
26784 }
26785
26786 offset += reg_size;
26787 }
26788
26789 /* Prologue for the FPRs. */
26790 offset = info->fp_save_offset;
26791 if (info->push_p)
26792 offset += info->total_size;
26793
26794 for (int i = info->first_fp_reg_save; i < 64; i++)
26795 {
26796 if (bitmap_bit_p (components, i))
26797 {
26798 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26799 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26800 RTX_FRAME_RELATED_P (insn) = 1;
26801 rtx set = copy_rtx (single_set (insn));
26802 add_reg_note (insn, REG_CFA_OFFSET, set);
26803 }
26804
26805 offset += fp_reg_size;
26806 }
26807 }
26808
26809 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26810 static void
26811 rs6000_emit_epilogue_components (sbitmap components)
26812 {
26813 rs6000_stack_t *info = rs6000_stack_info ();
26814 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26815 ? HARD_FRAME_POINTER_REGNUM
26816 : STACK_POINTER_REGNUM);
26817
26818 machine_mode reg_mode = Pmode;
26819 int reg_size = TARGET_32BIT ? 4 : 8;
26820
26821 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26822 int fp_reg_size = 8;
26823
26824 /* Epilogue for the FPRs. */
26825 int offset = info->fp_save_offset;
26826 if (info->push_p)
26827 offset += info->total_size;
26828
26829 for (int i = info->first_fp_reg_save; i < 64; i++)
26830 {
26831 if (bitmap_bit_p (components, i))
26832 {
26833 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26834 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26835 RTX_FRAME_RELATED_P (insn) = 1;
26836 add_reg_note (insn, REG_CFA_RESTORE, reg);
26837 }
26838
26839 offset += fp_reg_size;
26840 }
26841
26842 /* Epilogue for the GPRs. */
26843 offset = info->gp_save_offset;
26844 if (info->push_p)
26845 offset += info->total_size;
26846
26847 for (int i = info->first_gp_reg_save; i < 32; i++)
26848 {
26849 if (bitmap_bit_p (components, i))
26850 {
26851 rtx reg = gen_rtx_REG (reg_mode, i);
26852 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26853 RTX_FRAME_RELATED_P (insn) = 1;
26854 add_reg_note (insn, REG_CFA_RESTORE, reg);
26855 }
26856
26857 offset += reg_size;
26858 }
26859
26860 /* Epilogue for LR. */
26861 if (bitmap_bit_p (components, 0))
26862 {
26863 int offset = info->lr_save_offset;
26864 if (info->push_p)
26865 offset += info->total_size;
26866
26867 rtx reg = gen_rtx_REG (reg_mode, 0);
26868 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26869
26870 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26871 insn = emit_move_insn (lr, reg);
26872 RTX_FRAME_RELATED_P (insn) = 1;
26873 add_reg_note (insn, REG_CFA_RESTORE, lr);
26874 }
26875 }
26876
26877 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26878 static void
26879 rs6000_set_handled_components (sbitmap components)
26880 {
26881 rs6000_stack_t *info = rs6000_stack_info ();
26882
26883 for (int i = info->first_gp_reg_save; i < 32; i++)
26884 if (bitmap_bit_p (components, i))
26885 cfun->machine->gpr_is_wrapped_separately[i] = true;
26886
26887 for (int i = info->first_fp_reg_save; i < 64; i++)
26888 if (bitmap_bit_p (components, i))
26889 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26890
26891 if (bitmap_bit_p (components, 0))
26892 cfun->machine->lr_is_wrapped_separately = true;
26893
26894 if (bitmap_bit_p (components, 2))
26895 cfun->machine->toc_is_wrapped_separately = true;
26896 }
26897
26898 /* VRSAVE is a bit vector representing which AltiVec registers
26899 are used. The OS uses this to determine which vector
26900 registers to save on a context switch. We need to save
26901 VRSAVE on the stack frame, add whatever AltiVec registers we
26902 used in this function, and do the corresponding magic in the
26903 epilogue. */
26904 static void
26905 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26906 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26907 {
26908 /* Get VRSAVE into a GPR. */
26909 rtx reg = gen_rtx_REG (SImode, save_regno);
26910 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26911 if (TARGET_MACHO)
26912 emit_insn (gen_get_vrsave_internal (reg));
26913 else
26914 emit_insn (gen_rtx_SET (reg, vrsave));
26915
26916 /* Save VRSAVE. */
26917 int offset = info->vrsave_save_offset + frame_off;
26918 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26919
26920 /* Include the registers in the mask. */
26921 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26922
26923 emit_insn (generate_set_vrsave (reg, info, 0));
26924 }
26925
26926 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26927 called, it left the arg pointer to the old stack in r29. Otherwise, the
26928 arg pointer is the top of the current frame. */
26929 static void
26930 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26931 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26932 {
26933 cfun->machine->split_stack_argp_used = true;
26934
26935 if (sp_adjust)
26936 {
26937 rtx r12 = gen_rtx_REG (Pmode, 12);
26938 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26939 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26940 emit_insn_before (set_r12, sp_adjust);
26941 }
26942 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26943 {
26944 rtx r12 = gen_rtx_REG (Pmode, 12);
26945 if (frame_off == 0)
26946 emit_move_insn (r12, frame_reg_rtx);
26947 else
26948 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26949 }
26950
26951 if (info->push_p)
26952 {
26953 rtx r12 = gen_rtx_REG (Pmode, 12);
26954 rtx r29 = gen_rtx_REG (Pmode, 29);
26955 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26956 rtx not_more = gen_label_rtx ();
26957 rtx jump;
26958
26959 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26960 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26961 gen_rtx_LABEL_REF (VOIDmode, not_more),
26962 pc_rtx);
26963 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26964 JUMP_LABEL (jump) = not_more;
26965 LABEL_NUSES (not_more) += 1;
26966 emit_move_insn (r12, r29);
26967 emit_label (not_more);
26968 }
26969 }
26970
26971 /* Emit function prologue as insns. */
26972
26973 void
26974 rs6000_emit_prologue (void)
26975 {
26976 rs6000_stack_t *info = rs6000_stack_info ();
26977 machine_mode reg_mode = Pmode;
26978 int reg_size = TARGET_32BIT ? 4 : 8;
26979 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26980 int fp_reg_size = 8;
26981 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26982 rtx frame_reg_rtx = sp_reg_rtx;
26983 unsigned int cr_save_regno;
26984 rtx cr_save_rtx = NULL_RTX;
26985 rtx_insn *insn;
26986 int strategy;
26987 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26988 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26989 && call_used_regs[STATIC_CHAIN_REGNUM]);
26990 int using_split_stack = (flag_split_stack
26991 && (lookup_attribute ("no_split_stack",
26992 DECL_ATTRIBUTES (cfun->decl))
26993 == NULL));
26994
26995 /* Offset to top of frame for frame_reg and sp respectively. */
26996 HOST_WIDE_INT frame_off = 0;
26997 HOST_WIDE_INT sp_off = 0;
26998 /* sp_adjust is the stack adjusting instruction, tracked so that the
26999 insn setting up the split-stack arg pointer can be emitted just
27000 prior to it, when r12 is not used here for other purposes. */
27001 rtx_insn *sp_adjust = 0;
27002
27003 #if CHECKING_P
27004 /* Track and check usage of r0, r11, r12. */
27005 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
27006 #define START_USE(R) do \
27007 { \
27008 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27009 reg_inuse |= 1 << (R); \
27010 } while (0)
27011 #define END_USE(R) do \
27012 { \
27013 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
27014 reg_inuse &= ~(1 << (R)); \
27015 } while (0)
27016 #define NOT_INUSE(R) do \
27017 { \
27018 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27019 } while (0)
27020 #else
27021 #define START_USE(R) do {} while (0)
27022 #define END_USE(R) do {} while (0)
27023 #define NOT_INUSE(R) do {} while (0)
27024 #endif
27025
27026 if (DEFAULT_ABI == ABI_ELFv2
27027 && !TARGET_SINGLE_PIC_BASE)
27028 {
27029 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
27030
27031 /* With -mminimal-toc we may generate an extra use of r2 below. */
27032 if (TARGET_TOC && TARGET_MINIMAL_TOC
27033 && !constant_pool_empty_p ())
27034 cfun->machine->r2_setup_needed = true;
27035 }
27036
27037
27038 if (flag_stack_usage_info)
27039 current_function_static_stack_size = info->total_size;
27040
27041 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
27042 {
27043 HOST_WIDE_INT size = info->total_size;
27044
27045 if (crtl->is_leaf && !cfun->calls_alloca)
27046 {
27047 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
27048 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27049 size - get_stack_check_protect ());
27050 }
27051 else if (size > 0)
27052 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
27053 }
27054
27055 if (TARGET_FIX_AND_CONTINUE)
27056 {
27057 /* gdb on darwin arranges to forward a function from the old
27058 address by modifying the first 5 instructions of the function
27059 to branch to the overriding function. This is necessary to
27060 permit function pointers that point to the old function to
27061 actually forward to the new function. */
27062 emit_insn (gen_nop ());
27063 emit_insn (gen_nop ());
27064 emit_insn (gen_nop ());
27065 emit_insn (gen_nop ());
27066 emit_insn (gen_nop ());
27067 }
27068
27069 /* Handle world saves specially here. */
27070 if (WORLD_SAVE_P (info))
27071 {
27072 int i, j, sz;
27073 rtx treg;
27074 rtvec p;
27075 rtx reg0;
27076
27077 /* save_world expects lr in r0. */
27078 reg0 = gen_rtx_REG (Pmode, 0);
27079 if (info->lr_save_p)
27080 {
27081 insn = emit_move_insn (reg0,
27082 gen_rtx_REG (Pmode, LR_REGNO));
27083 RTX_FRAME_RELATED_P (insn) = 1;
27084 }
27085
27086 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27087 assumptions about the offsets of various bits of the stack
27088 frame. */
27089 gcc_assert (info->gp_save_offset == -220
27090 && info->fp_save_offset == -144
27091 && info->lr_save_offset == 8
27092 && info->cr_save_offset == 4
27093 && info->push_p
27094 && info->lr_save_p
27095 && (!crtl->calls_eh_return
27096 || info->ehrd_offset == -432)
27097 && info->vrsave_save_offset == -224
27098 && info->altivec_save_offset == -416);
27099
27100 treg = gen_rtx_REG (SImode, 11);
27101 emit_move_insn (treg, GEN_INT (-info->total_size));
27102
27103 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27104 in R11. It also clobbers R12, so beware! */
27105
27106 /* Preserve CR2 for save_world prologues */
27107 sz = 5;
27108 sz += 32 - info->first_gp_reg_save;
27109 sz += 64 - info->first_fp_reg_save;
27110 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
27111 p = rtvec_alloc (sz);
27112 j = 0;
27113 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
27114 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27115 gen_rtx_SYMBOL_REF (Pmode,
27116 "*save_world"));
27117 /* We do floats first so that the instruction pattern matches
27118 properly. */
27119 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
27120 RTVEC_ELT (p, j++)
27121 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27122 info->first_fp_reg_save + i),
27123 frame_reg_rtx,
27124 info->fp_save_offset + frame_off + 8 * i);
27125 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27126 RTVEC_ELT (p, j++)
27127 = gen_frame_store (gen_rtx_REG (V4SImode,
27128 info->first_altivec_reg_save + i),
27129 frame_reg_rtx,
27130 info->altivec_save_offset + frame_off + 16 * i);
27131 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27132 RTVEC_ELT (p, j++)
27133 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27134 frame_reg_rtx,
27135 info->gp_save_offset + frame_off + reg_size * i);
27136
27137 /* CR register traditionally saved as CR2. */
27138 RTVEC_ELT (p, j++)
27139 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
27140 frame_reg_rtx, info->cr_save_offset + frame_off);
27141 /* Explain about use of R0. */
27142 if (info->lr_save_p)
27143 RTVEC_ELT (p, j++)
27144 = gen_frame_store (reg0,
27145 frame_reg_rtx, info->lr_save_offset + frame_off);
27146 /* Explain what happens to the stack pointer. */
27147 {
27148 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27149 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27150 }
27151
27152 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27153 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27154 treg, GEN_INT (-info->total_size));
27155 sp_off = frame_off = info->total_size;
27156 }
27157
27158 strategy = info->savres_strategy;
27159
27160 /* For V.4, update stack before we do any saving and set back pointer. */
27161 if (! WORLD_SAVE_P (info)
27162 && info->push_p
27163 && (DEFAULT_ABI == ABI_V4
27164 || crtl->calls_eh_return))
27165 {
27166 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27167 || !(strategy & SAVE_INLINE_GPRS)
27168 || !(strategy & SAVE_INLINE_VRS));
27169 int ptr_regno = -1;
27170 rtx ptr_reg = NULL_RTX;
27171 int ptr_off = 0;
27172
27173 if (info->total_size < 32767)
27174 frame_off = info->total_size;
27175 else if (need_r11)
27176 ptr_regno = 11;
27177 else if (info->cr_save_p
27178 || info->lr_save_p
27179 || info->first_fp_reg_save < 64
27180 || info->first_gp_reg_save < 32
27181 || info->altivec_size != 0
27182 || info->vrsave_size != 0
27183 || crtl->calls_eh_return)
27184 ptr_regno = 12;
27185 else
27186 {
27187 /* The prologue won't be saving any regs so there is no need
27188 to set up a frame register to access any frame save area.
27189 We also won't be using frame_off anywhere below, but set
27190 the correct value anyway to protect against future
27191 changes to this function. */
27192 frame_off = info->total_size;
27193 }
27194 if (ptr_regno != -1)
27195 {
27196 /* Set up the frame offset to that needed by the first
27197 out-of-line save function. */
27198 START_USE (ptr_regno);
27199 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27200 frame_reg_rtx = ptr_reg;
27201 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27202 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27203 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27204 ptr_off = info->gp_save_offset + info->gp_size;
27205 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27206 ptr_off = info->altivec_save_offset + info->altivec_size;
27207 frame_off = -ptr_off;
27208 }
27209 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27210 ptr_reg, ptr_off);
27211 if (REGNO (frame_reg_rtx) == 12)
27212 sp_adjust = 0;
27213 sp_off = info->total_size;
27214 if (frame_reg_rtx != sp_reg_rtx)
27215 rs6000_emit_stack_tie (frame_reg_rtx, false);
27216 }
27217
27218 /* If we use the link register, get it into r0. */
27219 if (!WORLD_SAVE_P (info) && info->lr_save_p
27220 && !cfun->machine->lr_is_wrapped_separately)
27221 {
27222 rtx addr, reg, mem;
27223
27224 reg = gen_rtx_REG (Pmode, 0);
27225 START_USE (0);
27226 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27227 RTX_FRAME_RELATED_P (insn) = 1;
27228
27229 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27230 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27231 {
27232 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27233 GEN_INT (info->lr_save_offset + frame_off));
27234 mem = gen_rtx_MEM (Pmode, addr);
27235 /* This should not be of rs6000_sr_alias_set, because of
27236 __builtin_return_address. */
27237
27238 insn = emit_move_insn (mem, reg);
27239 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27240 NULL_RTX, NULL_RTX);
27241 END_USE (0);
27242 }
27243 }
27244
27245 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27246 r12 will be needed by out-of-line gpr save. */
27247 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27248 && !(strategy & (SAVE_INLINE_GPRS
27249 | SAVE_NOINLINE_GPRS_SAVES_LR))
27250 ? 11 : 12);
27251 if (!WORLD_SAVE_P (info)
27252 && info->cr_save_p
27253 && REGNO (frame_reg_rtx) != cr_save_regno
27254 && !(using_static_chain_p && cr_save_regno == 11)
27255 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27256 {
27257 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27258 START_USE (cr_save_regno);
27259 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27260 }
27261
27262 /* Do any required saving of fpr's. If only one or two to save, do
27263 it ourselves. Otherwise, call function. */
27264 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27265 {
27266 int offset = info->fp_save_offset + frame_off;
27267 for (int i = info->first_fp_reg_save; i < 64; i++)
27268 {
27269 if (save_reg_p (i)
27270 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27271 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27272 sp_off - frame_off);
27273
27274 offset += fp_reg_size;
27275 }
27276 }
27277 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27278 {
27279 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27280 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27281 unsigned ptr_regno = ptr_regno_for_savres (sel);
27282 rtx ptr_reg = frame_reg_rtx;
27283
27284 if (REGNO (frame_reg_rtx) == ptr_regno)
27285 gcc_checking_assert (frame_off == 0);
27286 else
27287 {
27288 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27289 NOT_INUSE (ptr_regno);
27290 emit_insn (gen_add3_insn (ptr_reg,
27291 frame_reg_rtx, GEN_INT (frame_off)));
27292 }
27293 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27294 info->fp_save_offset,
27295 info->lr_save_offset,
27296 DFmode, sel);
27297 rs6000_frame_related (insn, ptr_reg, sp_off,
27298 NULL_RTX, NULL_RTX);
27299 if (lr)
27300 END_USE (0);
27301 }
27302
27303 /* Save GPRs. This is done as a PARALLEL if we are using
27304 the store-multiple instructions. */
27305 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27306 {
27307 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27308 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27309 unsigned ptr_regno = ptr_regno_for_savres (sel);
27310 rtx ptr_reg = frame_reg_rtx;
27311 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27312 int end_save = info->gp_save_offset + info->gp_size;
27313 int ptr_off;
27314
27315 if (ptr_regno == 12)
27316 sp_adjust = 0;
27317 if (!ptr_set_up)
27318 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27319
27320 /* Need to adjust r11 (r12) if we saved any FPRs. */
27321 if (end_save + frame_off != 0)
27322 {
27323 rtx offset = GEN_INT (end_save + frame_off);
27324
27325 if (ptr_set_up)
27326 frame_off = -end_save;
27327 else
27328 NOT_INUSE (ptr_regno);
27329 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27330 }
27331 else if (!ptr_set_up)
27332 {
27333 NOT_INUSE (ptr_regno);
27334 emit_move_insn (ptr_reg, frame_reg_rtx);
27335 }
27336 ptr_off = -end_save;
27337 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27338 info->gp_save_offset + ptr_off,
27339 info->lr_save_offset + ptr_off,
27340 reg_mode, sel);
27341 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27342 NULL_RTX, NULL_RTX);
27343 if (lr)
27344 END_USE (0);
27345 }
27346 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27347 {
27348 rtvec p;
27349 int i;
27350 p = rtvec_alloc (32 - info->first_gp_reg_save);
27351 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27352 RTVEC_ELT (p, i)
27353 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27354 frame_reg_rtx,
27355 info->gp_save_offset + frame_off + reg_size * i);
27356 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27357 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27358 NULL_RTX, NULL_RTX);
27359 }
27360 else if (!WORLD_SAVE_P (info))
27361 {
27362 int offset = info->gp_save_offset + frame_off;
27363 for (int i = info->first_gp_reg_save; i < 32; i++)
27364 {
27365 if (save_reg_p (i)
27366 && !cfun->machine->gpr_is_wrapped_separately[i])
27367 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27368 sp_off - frame_off);
27369
27370 offset += reg_size;
27371 }
27372 }
27373
27374 if (crtl->calls_eh_return)
27375 {
27376 unsigned int i;
27377 rtvec p;
27378
27379 for (i = 0; ; ++i)
27380 {
27381 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27382 if (regno == INVALID_REGNUM)
27383 break;
27384 }
27385
27386 p = rtvec_alloc (i);
27387
27388 for (i = 0; ; ++i)
27389 {
27390 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27391 if (regno == INVALID_REGNUM)
27392 break;
27393
27394 rtx set
27395 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27396 sp_reg_rtx,
27397 info->ehrd_offset + sp_off + reg_size * (int) i);
27398 RTVEC_ELT (p, i) = set;
27399 RTX_FRAME_RELATED_P (set) = 1;
27400 }
27401
27402 insn = emit_insn (gen_blockage ());
27403 RTX_FRAME_RELATED_P (insn) = 1;
27404 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27405 }
27406
27407 /* In AIX ABI we need to make sure r2 is really saved. */
27408 if (TARGET_AIX && crtl->calls_eh_return)
27409 {
27410 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27411 rtx join_insn, note;
27412 rtx_insn *save_insn;
27413 long toc_restore_insn;
27414
27415 tmp_reg = gen_rtx_REG (Pmode, 11);
27416 tmp_reg_si = gen_rtx_REG (SImode, 11);
27417 if (using_static_chain_p)
27418 {
27419 START_USE (0);
27420 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27421 }
27422 else
27423 START_USE (11);
27424 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27425 /* Peek at instruction to which this function returns. If it's
27426 restoring r2, then we know we've already saved r2. We can't
27427 unconditionally save r2 because the value we have will already
27428 be updated if we arrived at this function via a plt call or
27429 toc adjusting stub. */
27430 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27431 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27432 + RS6000_TOC_SAVE_SLOT);
27433 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27434 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27435 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27436 validate_condition_mode (EQ, CCUNSmode);
27437 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27438 emit_insn (gen_rtx_SET (compare_result,
27439 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27440 toc_save_done = gen_label_rtx ();
27441 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27442 gen_rtx_EQ (VOIDmode, compare_result,
27443 const0_rtx),
27444 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27445 pc_rtx);
27446 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27447 JUMP_LABEL (jump) = toc_save_done;
27448 LABEL_NUSES (toc_save_done) += 1;
27449
27450 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27451 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27452 sp_off - frame_off);
27453
27454 emit_label (toc_save_done);
27455
27456 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27457 have a CFG that has different saves along different paths.
27458 Move the note to a dummy blockage insn, which describes that
27459 R2 is unconditionally saved after the label. */
27460 /* ??? An alternate representation might be a special insn pattern
27461 containing both the branch and the store. That might let the
27462 code that minimizes the number of DW_CFA_advance opcodes better
27463 freedom in placing the annotations. */
27464 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27465 if (note)
27466 remove_note (save_insn, note);
27467 else
27468 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27469 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27470 RTX_FRAME_RELATED_P (save_insn) = 0;
27471
27472 join_insn = emit_insn (gen_blockage ());
27473 REG_NOTES (join_insn) = note;
27474 RTX_FRAME_RELATED_P (join_insn) = 1;
27475
27476 if (using_static_chain_p)
27477 {
27478 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27479 END_USE (0);
27480 }
27481 else
27482 END_USE (11);
27483 }
27484
27485 /* Save CR if we use any that must be preserved. */
27486 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27487 {
27488 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27489 GEN_INT (info->cr_save_offset + frame_off));
27490 rtx mem = gen_frame_mem (SImode, addr);
27491
27492 /* If we didn't copy cr before, do so now using r0. */
27493 if (cr_save_rtx == NULL_RTX)
27494 {
27495 START_USE (0);
27496 cr_save_rtx = gen_rtx_REG (SImode, 0);
27497 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27498 }
27499
27500 /* Saving CR requires a two-instruction sequence: one instruction
27501 to move the CR to a general-purpose register, and a second
27502 instruction that stores the GPR to memory.
27503
27504 We do not emit any DWARF CFI records for the first of these,
27505 because we cannot properly represent the fact that CR is saved in
27506 a register. One reason is that we cannot express that multiple
27507 CR fields are saved; another reason is that on 64-bit, the size
27508 of the CR register in DWARF (4 bytes) differs from the size of
27509 a general-purpose register.
27510
27511 This means if any intervening instruction were to clobber one of
27512 the call-saved CR fields, we'd have incorrect CFI. To prevent
27513 this from happening, we mark the store to memory as a use of
27514 those CR fields, which prevents any such instruction from being
27515 scheduled in between the two instructions. */
27516 rtx crsave_v[9];
27517 int n_crsave = 0;
27518 int i;
27519
27520 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27521 for (i = 0; i < 8; i++)
27522 if (save_reg_p (CR0_REGNO + i))
27523 crsave_v[n_crsave++]
27524 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27525
27526 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27527 gen_rtvec_v (n_crsave, crsave_v)));
27528 END_USE (REGNO (cr_save_rtx));
27529
27530 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27531 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27532 so we need to construct a frame expression manually. */
27533 RTX_FRAME_RELATED_P (insn) = 1;
27534
27535 /* Update address to be stack-pointer relative, like
27536 rs6000_frame_related would do. */
27537 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27538 GEN_INT (info->cr_save_offset + sp_off));
27539 mem = gen_frame_mem (SImode, addr);
27540
27541 if (DEFAULT_ABI == ABI_ELFv2)
27542 {
27543 /* In the ELFv2 ABI we generate separate CFI records for each
27544 CR field that was actually saved. They all point to the
27545 same 32-bit stack slot. */
27546 rtx crframe[8];
27547 int n_crframe = 0;
27548
27549 for (i = 0; i < 8; i++)
27550 if (save_reg_p (CR0_REGNO + i))
27551 {
27552 crframe[n_crframe]
27553 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27554
27555 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27556 n_crframe++;
27557 }
27558
27559 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27560 gen_rtx_PARALLEL (VOIDmode,
27561 gen_rtvec_v (n_crframe, crframe)));
27562 }
27563 else
27564 {
27565 /* In other ABIs, by convention, we use a single CR regnum to
27566 represent the fact that all call-saved CR fields are saved.
27567 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27568 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27569 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27570 }
27571 }
27572
27573 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27574 *separate* slots if the routine calls __builtin_eh_return, so
27575 that they can be independently restored by the unwinder. */
27576 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27577 {
27578 int i, cr_off = info->ehcr_offset;
27579 rtx crsave;
27580
27581 /* ??? We might get better performance by using multiple mfocrf
27582 instructions. */
27583 crsave = gen_rtx_REG (SImode, 0);
27584 emit_insn (gen_prologue_movesi_from_cr (crsave));
27585
27586 for (i = 0; i < 8; i++)
27587 if (!call_used_regs[CR0_REGNO + i])
27588 {
27589 rtvec p = rtvec_alloc (2);
27590 RTVEC_ELT (p, 0)
27591 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27592 RTVEC_ELT (p, 1)
27593 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27594
27595 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27596
27597 RTX_FRAME_RELATED_P (insn) = 1;
27598 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27599 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27600 sp_reg_rtx, cr_off + sp_off));
27601
27602 cr_off += reg_size;
27603 }
27604 }
27605
27606 /* If we are emitting stack probes, but allocate no stack, then
27607 just note that in the dump file. */
27608 if (flag_stack_clash_protection
27609 && dump_file
27610 && !info->push_p)
27611 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27612
27613 /* Update stack and set back pointer unless this is V.4,
27614 for which it was done previously. */
27615 if (!WORLD_SAVE_P (info) && info->push_p
27616 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27617 {
27618 rtx ptr_reg = NULL;
27619 int ptr_off = 0;
27620
27621 /* If saving altivec regs we need to be able to address all save
27622 locations using a 16-bit offset. */
27623 if ((strategy & SAVE_INLINE_VRS) == 0
27624 || (info->altivec_size != 0
27625 && (info->altivec_save_offset + info->altivec_size - 16
27626 + info->total_size - frame_off) > 32767)
27627 || (info->vrsave_size != 0
27628 && (info->vrsave_save_offset
27629 + info->total_size - frame_off) > 32767))
27630 {
27631 int sel = SAVRES_SAVE | SAVRES_VR;
27632 unsigned ptr_regno = ptr_regno_for_savres (sel);
27633
27634 if (using_static_chain_p
27635 && ptr_regno == STATIC_CHAIN_REGNUM)
27636 ptr_regno = 12;
27637 if (REGNO (frame_reg_rtx) != ptr_regno)
27638 START_USE (ptr_regno);
27639 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27640 frame_reg_rtx = ptr_reg;
27641 ptr_off = info->altivec_save_offset + info->altivec_size;
27642 frame_off = -ptr_off;
27643 }
27644 else if (REGNO (frame_reg_rtx) == 1)
27645 frame_off = info->total_size;
27646 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27647 ptr_reg, ptr_off);
27648 if (REGNO (frame_reg_rtx) == 12)
27649 sp_adjust = 0;
27650 sp_off = info->total_size;
27651 if (frame_reg_rtx != sp_reg_rtx)
27652 rs6000_emit_stack_tie (frame_reg_rtx, false);
27653 }
27654
27655 /* Set frame pointer, if needed. */
27656 if (frame_pointer_needed)
27657 {
27658 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27659 sp_reg_rtx);
27660 RTX_FRAME_RELATED_P (insn) = 1;
27661 }
27662
27663 /* Save AltiVec registers if needed. Save here because the red zone does
27664 not always include AltiVec registers. */
27665 if (!WORLD_SAVE_P (info)
27666 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27667 {
27668 int end_save = info->altivec_save_offset + info->altivec_size;
27669 int ptr_off;
27670 /* Oddly, the vector save/restore functions point r0 at the end
27671 of the save area, then use r11 or r12 to load offsets for
27672 [reg+reg] addressing. */
27673 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27674 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27675 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27676
27677 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27678 NOT_INUSE (0);
27679 if (scratch_regno == 12)
27680 sp_adjust = 0;
27681 if (end_save + frame_off != 0)
27682 {
27683 rtx offset = GEN_INT (end_save + frame_off);
27684
27685 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27686 }
27687 else
27688 emit_move_insn (ptr_reg, frame_reg_rtx);
27689
27690 ptr_off = -end_save;
27691 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27692 info->altivec_save_offset + ptr_off,
27693 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27694 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27695 NULL_RTX, NULL_RTX);
27696 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27697 {
27698 /* The oddity mentioned above clobbered our frame reg. */
27699 emit_move_insn (frame_reg_rtx, ptr_reg);
27700 frame_off = ptr_off;
27701 }
27702 }
27703 else if (!WORLD_SAVE_P (info)
27704 && info->altivec_size != 0)
27705 {
27706 int i;
27707
27708 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27709 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27710 {
27711 rtx areg, savereg, mem;
27712 HOST_WIDE_INT offset;
27713
27714 offset = (info->altivec_save_offset + frame_off
27715 + 16 * (i - info->first_altivec_reg_save));
27716
27717 savereg = gen_rtx_REG (V4SImode, i);
27718
27719 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27720 {
27721 mem = gen_frame_mem (V4SImode,
27722 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27723 GEN_INT (offset)));
27724 insn = emit_insn (gen_rtx_SET (mem, savereg));
27725 areg = NULL_RTX;
27726 }
27727 else
27728 {
27729 NOT_INUSE (0);
27730 areg = gen_rtx_REG (Pmode, 0);
27731 emit_move_insn (areg, GEN_INT (offset));
27732
27733 /* AltiVec addressing mode is [reg+reg]. */
27734 mem = gen_frame_mem (V4SImode,
27735 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27736
27737 /* Rather than emitting a generic move, force use of the stvx
27738 instruction, which we always want on ISA 2.07 (power8) systems.
27739 In particular we don't want xxpermdi/stxvd2x for little
27740 endian. */
27741 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27742 }
27743
27744 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27745 areg, GEN_INT (offset));
27746 }
27747 }
27748
27749 /* VRSAVE is a bit vector representing which AltiVec registers
27750 are used. The OS uses this to determine which vector
27751 registers to save on a context switch. We need to save
27752 VRSAVE on the stack frame, add whatever AltiVec registers we
27753 used in this function, and do the corresponding magic in the
27754 epilogue. */
27755
27756 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27757 {
27758 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27759 be using r12 as frame_reg_rtx and r11 as the static chain
27760 pointer for nested functions. */
27761 int save_regno = 12;
27762 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27763 && !using_static_chain_p)
27764 save_regno = 11;
27765 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27766 {
27767 save_regno = 11;
27768 if (using_static_chain_p)
27769 save_regno = 0;
27770 }
27771 NOT_INUSE (save_regno);
27772
27773 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27774 }
27775
27776 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27777 if (!TARGET_SINGLE_PIC_BASE
27778 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27779 && !constant_pool_empty_p ())
27780 || (DEFAULT_ABI == ABI_V4
27781 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27782 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27783 {
27784 /* If emit_load_toc_table will use the link register, we need to save
27785 it. We use R12 for this purpose because emit_load_toc_table
27786 can use register 0. This allows us to use a plain 'blr' to return
27787 from the procedure more often. */
27788 int save_LR_around_toc_setup = (TARGET_ELF
27789 && DEFAULT_ABI == ABI_V4
27790 && flag_pic
27791 && ! info->lr_save_p
27792 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27793 if (save_LR_around_toc_setup)
27794 {
27795 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27796 rtx tmp = gen_rtx_REG (Pmode, 12);
27797
27798 sp_adjust = 0;
27799 insn = emit_move_insn (tmp, lr);
27800 RTX_FRAME_RELATED_P (insn) = 1;
27801
27802 rs6000_emit_load_toc_table (TRUE);
27803
27804 insn = emit_move_insn (lr, tmp);
27805 add_reg_note (insn, REG_CFA_RESTORE, lr);
27806 RTX_FRAME_RELATED_P (insn) = 1;
27807 }
27808 else
27809 rs6000_emit_load_toc_table (TRUE);
27810 }
27811
27812 #if TARGET_MACHO
27813 if (!TARGET_SINGLE_PIC_BASE
27814 && DEFAULT_ABI == ABI_DARWIN
27815 && flag_pic && crtl->uses_pic_offset_table)
27816 {
27817 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27818 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27819
27820 /* Save and restore LR locally around this call (in R0). */
27821 if (!info->lr_save_p)
27822 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27823
27824 emit_insn (gen_load_macho_picbase (src));
27825
27826 emit_move_insn (gen_rtx_REG (Pmode,
27827 RS6000_PIC_OFFSET_TABLE_REGNUM),
27828 lr);
27829
27830 if (!info->lr_save_p)
27831 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27832 }
27833 #endif
27834
27835 /* If we need to, save the TOC register after doing the stack setup.
27836 Do not emit eh frame info for this save. The unwinder wants info,
27837 conceptually attached to instructions in this function, about
27838 register values in the caller of this function. This R2 may have
27839 already been changed from the value in the caller.
27840 We don't attempt to write accurate DWARF EH frame info for R2
27841 because code emitted by gcc for a (non-pointer) function call
27842 doesn't save and restore R2. Instead, R2 is managed out-of-line
27843 by a linker generated plt call stub when the function resides in
27844 a shared library. This behavior is costly to describe in DWARF,
27845 both in terms of the size of DWARF info and the time taken in the
27846 unwinder to interpret it. R2 changes, apart from the
27847 calls_eh_return case earlier in this function, are handled by
27848 linux-unwind.h frob_update_context. */
27849 if (rs6000_save_toc_in_prologue_p ()
27850 && !cfun->machine->toc_is_wrapped_separately)
27851 {
27852 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27853 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27854 }
27855
27856 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27857 if (using_split_stack && split_stack_arg_pointer_used_p ())
27858 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27859 }
27860
27861 /* Output .extern statements for the save/restore routines we use. */
27862
27863 static void
27864 rs6000_output_savres_externs (FILE *file)
27865 {
27866 rs6000_stack_t *info = rs6000_stack_info ();
27867
27868 if (TARGET_DEBUG_STACK)
27869 debug_stack_info (info);
27870
27871 /* Write .extern for any function we will call to save and restore
27872 fp values. */
27873 if (info->first_fp_reg_save < 64
27874 && !TARGET_MACHO
27875 && !TARGET_ELF)
27876 {
27877 char *name;
27878 int regno = info->first_fp_reg_save - 32;
27879
27880 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27881 {
27882 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27883 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27884 name = rs6000_savres_routine_name (regno, sel);
27885 fprintf (file, "\t.extern %s\n", name);
27886 }
27887 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27888 {
27889 bool lr = (info->savres_strategy
27890 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27891 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27892 name = rs6000_savres_routine_name (regno, sel);
27893 fprintf (file, "\t.extern %s\n", name);
27894 }
27895 }
27896 }
27897
27898 /* Write function prologue. */
27899
27900 static void
27901 rs6000_output_function_prologue (FILE *file)
27902 {
27903 if (!cfun->is_thunk)
27904 rs6000_output_savres_externs (file);
27905
27906 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27907 immediately after the global entry point label. */
27908 if (rs6000_global_entry_point_needed_p ())
27909 {
27910 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27911
27912 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27913
27914 if (TARGET_CMODEL != CMODEL_LARGE)
27915 {
27916 /* In the small and medium code models, we assume the TOC is less
27917 2 GB away from the text section, so it can be computed via the
27918 following two-instruction sequence. */
27919 char buf[256];
27920
27921 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27922 fprintf (file, "0:\taddis 2,12,.TOC.-");
27923 assemble_name (file, buf);
27924 fprintf (file, "@ha\n");
27925 fprintf (file, "\taddi 2,2,.TOC.-");
27926 assemble_name (file, buf);
27927 fprintf (file, "@l\n");
27928 }
27929 else
27930 {
27931 /* In the large code model, we allow arbitrary offsets between the
27932 TOC and the text section, so we have to load the offset from
27933 memory. The data field is emitted directly before the global
27934 entry point in rs6000_elf_declare_function_name. */
27935 char buf[256];
27936
27937 #ifdef HAVE_AS_ENTRY_MARKERS
27938 /* If supported by the linker, emit a marker relocation. If the
27939 total code size of the final executable or shared library
27940 happens to fit into 2 GB after all, the linker will replace
27941 this code sequence with the sequence for the small or medium
27942 code model. */
27943 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27944 #endif
27945 fprintf (file, "\tld 2,");
27946 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27947 assemble_name (file, buf);
27948 fprintf (file, "-");
27949 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27950 assemble_name (file, buf);
27951 fprintf (file, "(12)\n");
27952 fprintf (file, "\tadd 2,2,12\n");
27953 }
27954
27955 fputs ("\t.localentry\t", file);
27956 assemble_name (file, name);
27957 fputs (",.-", file);
27958 assemble_name (file, name);
27959 fputs ("\n", file);
27960 }
27961
27962 /* Output -mprofile-kernel code. This needs to be done here instead of
27963 in output_function_profile since it must go after the ELFv2 ABI
27964 local entry point. */
27965 if (TARGET_PROFILE_KERNEL && crtl->profile)
27966 {
27967 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27968 gcc_assert (!TARGET_32BIT);
27969
27970 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27971
27972 /* In the ELFv2 ABI we have no compiler stack word. It must be
27973 the resposibility of _mcount to preserve the static chain
27974 register if required. */
27975 if (DEFAULT_ABI != ABI_ELFv2
27976 && cfun->static_chain_decl != NULL)
27977 {
27978 asm_fprintf (file, "\tstd %s,24(%s)\n",
27979 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27980 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27981 asm_fprintf (file, "\tld %s,24(%s)\n",
27982 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27983 }
27984 else
27985 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27986 }
27987
27988 rs6000_pic_labelno++;
27989 }
27990
27991 /* -mprofile-kernel code calls mcount before the function prolog,
27992 so a profiled leaf function should stay a leaf function. */
27993 static bool
27994 rs6000_keep_leaf_when_profiled ()
27995 {
27996 return TARGET_PROFILE_KERNEL;
27997 }
27998
27999 /* Non-zero if vmx regs are restored before the frame pop, zero if
28000 we restore after the pop when possible. */
28001 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
28002
28003 /* Restoring cr is a two step process: loading a reg from the frame
28004 save, then moving the reg to cr. For ABI_V4 we must let the
28005 unwinder know that the stack location is no longer valid at or
28006 before the stack deallocation, but we can't emit a cfa_restore for
28007 cr at the stack deallocation like we do for other registers.
28008 The trouble is that it is possible for the move to cr to be
28009 scheduled after the stack deallocation. So say exactly where cr
28010 is located on each of the two insns. */
28011
28012 static rtx
28013 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
28014 {
28015 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
28016 rtx reg = gen_rtx_REG (SImode, regno);
28017 rtx_insn *insn = emit_move_insn (reg, mem);
28018
28019 if (!exit_func && DEFAULT_ABI == ABI_V4)
28020 {
28021 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28022 rtx set = gen_rtx_SET (reg, cr);
28023
28024 add_reg_note (insn, REG_CFA_REGISTER, set);
28025 RTX_FRAME_RELATED_P (insn) = 1;
28026 }
28027 return reg;
28028 }
28029
28030 /* Reload CR from REG. */
28031
28032 static void
28033 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
28034 {
28035 int count = 0;
28036 int i;
28037
28038 if (using_mfcr_multiple)
28039 {
28040 for (i = 0; i < 8; i++)
28041 if (save_reg_p (CR0_REGNO + i))
28042 count++;
28043 gcc_assert (count);
28044 }
28045
28046 if (using_mfcr_multiple && count > 1)
28047 {
28048 rtx_insn *insn;
28049 rtvec p;
28050 int ndx;
28051
28052 p = rtvec_alloc (count);
28053
28054 ndx = 0;
28055 for (i = 0; i < 8; i++)
28056 if (save_reg_p (CR0_REGNO + i))
28057 {
28058 rtvec r = rtvec_alloc (2);
28059 RTVEC_ELT (r, 0) = reg;
28060 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
28061 RTVEC_ELT (p, ndx) =
28062 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
28063 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
28064 ndx++;
28065 }
28066 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28067 gcc_assert (ndx == count);
28068
28069 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28070 CR field separately. */
28071 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28072 {
28073 for (i = 0; i < 8; i++)
28074 if (save_reg_p (CR0_REGNO + i))
28075 add_reg_note (insn, REG_CFA_RESTORE,
28076 gen_rtx_REG (SImode, CR0_REGNO + i));
28077
28078 RTX_FRAME_RELATED_P (insn) = 1;
28079 }
28080 }
28081 else
28082 for (i = 0; i < 8; i++)
28083 if (save_reg_p (CR0_REGNO + i))
28084 {
28085 rtx insn = emit_insn (gen_movsi_to_cr_one
28086 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28087
28088 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28089 CR field separately, attached to the insn that in fact
28090 restores this particular CR field. */
28091 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28092 {
28093 add_reg_note (insn, REG_CFA_RESTORE,
28094 gen_rtx_REG (SImode, CR0_REGNO + i));
28095
28096 RTX_FRAME_RELATED_P (insn) = 1;
28097 }
28098 }
28099
28100 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28101 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
28102 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28103 {
28104 rtx_insn *insn = get_last_insn ();
28105 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28106
28107 add_reg_note (insn, REG_CFA_RESTORE, cr);
28108 RTX_FRAME_RELATED_P (insn) = 1;
28109 }
28110 }
28111
28112 /* Like cr, the move to lr instruction can be scheduled after the
28113 stack deallocation, but unlike cr, its stack frame save is still
28114 valid. So we only need to emit the cfa_restore on the correct
28115 instruction. */
28116
28117 static void
28118 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
28119 {
28120 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
28121 rtx reg = gen_rtx_REG (Pmode, regno);
28122
28123 emit_move_insn (reg, mem);
28124 }
28125
28126 static void
28127 restore_saved_lr (int regno, bool exit_func)
28128 {
28129 rtx reg = gen_rtx_REG (Pmode, regno);
28130 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28131 rtx_insn *insn = emit_move_insn (lr, reg);
28132
28133 if (!exit_func && flag_shrink_wrap)
28134 {
28135 add_reg_note (insn, REG_CFA_RESTORE, lr);
28136 RTX_FRAME_RELATED_P (insn) = 1;
28137 }
28138 }
28139
28140 static rtx
28141 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
28142 {
28143 if (DEFAULT_ABI == ABI_ELFv2)
28144 {
28145 int i;
28146 for (i = 0; i < 8; i++)
28147 if (save_reg_p (CR0_REGNO + i))
28148 {
28149 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28150 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28151 cfa_restores);
28152 }
28153 }
28154 else if (info->cr_save_p)
28155 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28156 gen_rtx_REG (SImode, CR2_REGNO),
28157 cfa_restores);
28158
28159 if (info->lr_save_p)
28160 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28161 gen_rtx_REG (Pmode, LR_REGNO),
28162 cfa_restores);
28163 return cfa_restores;
28164 }
28165
28166 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28167 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28168 below stack pointer not cloberred by signals. */
28169
28170 static inline bool
28171 offset_below_red_zone_p (HOST_WIDE_INT offset)
28172 {
28173 return offset < (DEFAULT_ABI == ABI_V4
28174 ? 0
28175 : TARGET_32BIT ? -220 : -288);
28176 }
28177
28178 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28179
28180 static void
28181 emit_cfa_restores (rtx cfa_restores)
28182 {
28183 rtx_insn *insn = get_last_insn ();
28184 rtx *loc = &REG_NOTES (insn);
28185
28186 while (*loc)
28187 loc = &XEXP (*loc, 1);
28188 *loc = cfa_restores;
28189 RTX_FRAME_RELATED_P (insn) = 1;
28190 }
28191
28192 /* Emit function epilogue as insns. */
28193
28194 void
28195 rs6000_emit_epilogue (int sibcall)
28196 {
28197 rs6000_stack_t *info;
28198 int restoring_GPRs_inline;
28199 int restoring_FPRs_inline;
28200 int using_load_multiple;
28201 int using_mtcr_multiple;
28202 int use_backchain_to_restore_sp;
28203 int restore_lr;
28204 int strategy;
28205 HOST_WIDE_INT frame_off = 0;
28206 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28207 rtx frame_reg_rtx = sp_reg_rtx;
28208 rtx cfa_restores = NULL_RTX;
28209 rtx insn;
28210 rtx cr_save_reg = NULL_RTX;
28211 machine_mode reg_mode = Pmode;
28212 int reg_size = TARGET_32BIT ? 4 : 8;
28213 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
28214 int fp_reg_size = 8;
28215 int i;
28216 bool exit_func;
28217 unsigned ptr_regno;
28218
28219 info = rs6000_stack_info ();
28220
28221 strategy = info->savres_strategy;
28222 using_load_multiple = strategy & REST_MULTIPLE;
28223 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28224 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28225 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28226 || rs6000_tune == PROCESSOR_PPC603
28227 || rs6000_tune == PROCESSOR_PPC750
28228 || optimize_size);
28229 /* Restore via the backchain when we have a large frame, since this
28230 is more efficient than an addis, addi pair. The second condition
28231 here will not trigger at the moment; We don't actually need a
28232 frame pointer for alloca, but the generic parts of the compiler
28233 give us one anyway. */
28234 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28235 ? info->lr_save_offset
28236 : 0) > 32767
28237 || (cfun->calls_alloca
28238 && !frame_pointer_needed));
28239 restore_lr = (info->lr_save_p
28240 && (restoring_FPRs_inline
28241 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28242 && (restoring_GPRs_inline
28243 || info->first_fp_reg_save < 64)
28244 && !cfun->machine->lr_is_wrapped_separately);
28245
28246
28247 if (WORLD_SAVE_P (info))
28248 {
28249 int i, j;
28250 char rname[30];
28251 const char *alloc_rname;
28252 rtvec p;
28253
28254 /* eh_rest_world_r10 will return to the location saved in the LR
28255 stack slot (which is not likely to be our caller.)
28256 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28257 rest_world is similar, except any R10 parameter is ignored.
28258 The exception-handling stuff that was here in 2.95 is no
28259 longer necessary. */
28260
28261 p = rtvec_alloc (9
28262 + 32 - info->first_gp_reg_save
28263 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28264 + 63 + 1 - info->first_fp_reg_save);
28265
28266 strcpy (rname, ((crtl->calls_eh_return) ?
28267 "*eh_rest_world_r10" : "*rest_world"));
28268 alloc_rname = ggc_strdup (rname);
28269
28270 j = 0;
28271 RTVEC_ELT (p, j++) = ret_rtx;
28272 RTVEC_ELT (p, j++)
28273 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28274 /* The instruction pattern requires a clobber here;
28275 it is shared with the restVEC helper. */
28276 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
28277
28278 {
28279 /* CR register traditionally saved as CR2. */
28280 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28281 RTVEC_ELT (p, j++)
28282 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28283 if (flag_shrink_wrap)
28284 {
28285 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28286 gen_rtx_REG (Pmode, LR_REGNO),
28287 cfa_restores);
28288 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28289 }
28290 }
28291
28292 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28293 {
28294 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28295 RTVEC_ELT (p, j++)
28296 = gen_frame_load (reg,
28297 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28298 if (flag_shrink_wrap
28299 && save_reg_p (info->first_gp_reg_save + i))
28300 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28301 }
28302 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28303 {
28304 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28305 RTVEC_ELT (p, j++)
28306 = gen_frame_load (reg,
28307 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28308 if (flag_shrink_wrap
28309 && save_reg_p (info->first_altivec_reg_save + i))
28310 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28311 }
28312 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28313 {
28314 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
28315 info->first_fp_reg_save + i);
28316 RTVEC_ELT (p, j++)
28317 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28318 if (flag_shrink_wrap
28319 && save_reg_p (info->first_fp_reg_save + i))
28320 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28321 }
28322 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
28323 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
28324 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
28325 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
28326 RTVEC_ELT (p, j++)
28327 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28328 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28329
28330 if (flag_shrink_wrap)
28331 {
28332 REG_NOTES (insn) = cfa_restores;
28333 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28334 RTX_FRAME_RELATED_P (insn) = 1;
28335 }
28336 return;
28337 }
28338
28339 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28340 if (info->push_p)
28341 frame_off = info->total_size;
28342
28343 /* Restore AltiVec registers if we must do so before adjusting the
28344 stack. */
28345 if (info->altivec_size != 0
28346 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28347 || (DEFAULT_ABI != ABI_V4
28348 && offset_below_red_zone_p (info->altivec_save_offset))))
28349 {
28350 int i;
28351 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28352
28353 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28354 if (use_backchain_to_restore_sp)
28355 {
28356 int frame_regno = 11;
28357
28358 if ((strategy & REST_INLINE_VRS) == 0)
28359 {
28360 /* Of r11 and r12, select the one not clobbered by an
28361 out-of-line restore function for the frame register. */
28362 frame_regno = 11 + 12 - scratch_regno;
28363 }
28364 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28365 emit_move_insn (frame_reg_rtx,
28366 gen_rtx_MEM (Pmode, sp_reg_rtx));
28367 frame_off = 0;
28368 }
28369 else if (frame_pointer_needed)
28370 frame_reg_rtx = hard_frame_pointer_rtx;
28371
28372 if ((strategy & REST_INLINE_VRS) == 0)
28373 {
28374 int end_save = info->altivec_save_offset + info->altivec_size;
28375 int ptr_off;
28376 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28377 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28378
28379 if (end_save + frame_off != 0)
28380 {
28381 rtx offset = GEN_INT (end_save + frame_off);
28382
28383 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28384 }
28385 else
28386 emit_move_insn (ptr_reg, frame_reg_rtx);
28387
28388 ptr_off = -end_save;
28389 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28390 info->altivec_save_offset + ptr_off,
28391 0, V4SImode, SAVRES_VR);
28392 }
28393 else
28394 {
28395 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28396 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28397 {
28398 rtx addr, areg, mem, insn;
28399 rtx reg = gen_rtx_REG (V4SImode, i);
28400 HOST_WIDE_INT offset
28401 = (info->altivec_save_offset + frame_off
28402 + 16 * (i - info->first_altivec_reg_save));
28403
28404 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28405 {
28406 mem = gen_frame_mem (V4SImode,
28407 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28408 GEN_INT (offset)));
28409 insn = gen_rtx_SET (reg, mem);
28410 }
28411 else
28412 {
28413 areg = gen_rtx_REG (Pmode, 0);
28414 emit_move_insn (areg, GEN_INT (offset));
28415
28416 /* AltiVec addressing mode is [reg+reg]. */
28417 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28418 mem = gen_frame_mem (V4SImode, addr);
28419
28420 /* Rather than emitting a generic move, force use of the
28421 lvx instruction, which we always want. In particular we
28422 don't want lxvd2x/xxpermdi for little endian. */
28423 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28424 }
28425
28426 (void) emit_insn (insn);
28427 }
28428 }
28429
28430 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28431 if (((strategy & REST_INLINE_VRS) == 0
28432 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28433 && (flag_shrink_wrap
28434 || (offset_below_red_zone_p
28435 (info->altivec_save_offset
28436 + 16 * (i - info->first_altivec_reg_save))))
28437 && save_reg_p (i))
28438 {
28439 rtx reg = gen_rtx_REG (V4SImode, i);
28440 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28441 }
28442 }
28443
28444 /* Restore VRSAVE if we must do so before adjusting the stack. */
28445 if (info->vrsave_size != 0
28446 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28447 || (DEFAULT_ABI != ABI_V4
28448 && offset_below_red_zone_p (info->vrsave_save_offset))))
28449 {
28450 rtx reg;
28451
28452 if (frame_reg_rtx == sp_reg_rtx)
28453 {
28454 if (use_backchain_to_restore_sp)
28455 {
28456 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28457 emit_move_insn (frame_reg_rtx,
28458 gen_rtx_MEM (Pmode, sp_reg_rtx));
28459 frame_off = 0;
28460 }
28461 else if (frame_pointer_needed)
28462 frame_reg_rtx = hard_frame_pointer_rtx;
28463 }
28464
28465 reg = gen_rtx_REG (SImode, 12);
28466 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28467 info->vrsave_save_offset + frame_off));
28468
28469 emit_insn (generate_set_vrsave (reg, info, 1));
28470 }
28471
28472 insn = NULL_RTX;
28473 /* If we have a large stack frame, restore the old stack pointer
28474 using the backchain. */
28475 if (use_backchain_to_restore_sp)
28476 {
28477 if (frame_reg_rtx == sp_reg_rtx)
28478 {
28479 /* Under V.4, don't reset the stack pointer until after we're done
28480 loading the saved registers. */
28481 if (DEFAULT_ABI == ABI_V4)
28482 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28483
28484 insn = emit_move_insn (frame_reg_rtx,
28485 gen_rtx_MEM (Pmode, sp_reg_rtx));
28486 frame_off = 0;
28487 }
28488 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28489 && DEFAULT_ABI == ABI_V4)
28490 /* frame_reg_rtx has been set up by the altivec restore. */
28491 ;
28492 else
28493 {
28494 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28495 frame_reg_rtx = sp_reg_rtx;
28496 }
28497 }
28498 /* If we have a frame pointer, we can restore the old stack pointer
28499 from it. */
28500 else if (frame_pointer_needed)
28501 {
28502 frame_reg_rtx = sp_reg_rtx;
28503 if (DEFAULT_ABI == ABI_V4)
28504 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28505 /* Prevent reordering memory accesses against stack pointer restore. */
28506 else if (cfun->calls_alloca
28507 || offset_below_red_zone_p (-info->total_size))
28508 rs6000_emit_stack_tie (frame_reg_rtx, true);
28509
28510 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28511 GEN_INT (info->total_size)));
28512 frame_off = 0;
28513 }
28514 else if (info->push_p
28515 && DEFAULT_ABI != ABI_V4
28516 && !crtl->calls_eh_return)
28517 {
28518 /* Prevent reordering memory accesses against stack pointer restore. */
28519 if (cfun->calls_alloca
28520 || offset_below_red_zone_p (-info->total_size))
28521 rs6000_emit_stack_tie (frame_reg_rtx, false);
28522 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28523 GEN_INT (info->total_size)));
28524 frame_off = 0;
28525 }
28526 if (insn && frame_reg_rtx == sp_reg_rtx)
28527 {
28528 if (cfa_restores)
28529 {
28530 REG_NOTES (insn) = cfa_restores;
28531 cfa_restores = NULL_RTX;
28532 }
28533 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28534 RTX_FRAME_RELATED_P (insn) = 1;
28535 }
28536
28537 /* Restore AltiVec registers if we have not done so already. */
28538 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28539 && info->altivec_size != 0
28540 && (DEFAULT_ABI == ABI_V4
28541 || !offset_below_red_zone_p (info->altivec_save_offset)))
28542 {
28543 int i;
28544
28545 if ((strategy & REST_INLINE_VRS) == 0)
28546 {
28547 int end_save = info->altivec_save_offset + info->altivec_size;
28548 int ptr_off;
28549 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28550 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28551 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28552
28553 if (end_save + frame_off != 0)
28554 {
28555 rtx offset = GEN_INT (end_save + frame_off);
28556
28557 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28558 }
28559 else
28560 emit_move_insn (ptr_reg, frame_reg_rtx);
28561
28562 ptr_off = -end_save;
28563 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28564 info->altivec_save_offset + ptr_off,
28565 0, V4SImode, SAVRES_VR);
28566 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28567 {
28568 /* Frame reg was clobbered by out-of-line save. Restore it
28569 from ptr_reg, and if we are calling out-of-line gpr or
28570 fpr restore set up the correct pointer and offset. */
28571 unsigned newptr_regno = 1;
28572 if (!restoring_GPRs_inline)
28573 {
28574 bool lr = info->gp_save_offset + info->gp_size == 0;
28575 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28576 newptr_regno = ptr_regno_for_savres (sel);
28577 end_save = info->gp_save_offset + info->gp_size;
28578 }
28579 else if (!restoring_FPRs_inline)
28580 {
28581 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28582 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28583 newptr_regno = ptr_regno_for_savres (sel);
28584 end_save = info->fp_save_offset + info->fp_size;
28585 }
28586
28587 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28588 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28589
28590 if (end_save + ptr_off != 0)
28591 {
28592 rtx offset = GEN_INT (end_save + ptr_off);
28593
28594 frame_off = -end_save;
28595 if (TARGET_32BIT)
28596 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28597 ptr_reg, offset));
28598 else
28599 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28600 ptr_reg, offset));
28601 }
28602 else
28603 {
28604 frame_off = ptr_off;
28605 emit_move_insn (frame_reg_rtx, ptr_reg);
28606 }
28607 }
28608 }
28609 else
28610 {
28611 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28612 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28613 {
28614 rtx addr, areg, mem, insn;
28615 rtx reg = gen_rtx_REG (V4SImode, i);
28616 HOST_WIDE_INT offset
28617 = (info->altivec_save_offset + frame_off
28618 + 16 * (i - info->first_altivec_reg_save));
28619
28620 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28621 {
28622 mem = gen_frame_mem (V4SImode,
28623 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28624 GEN_INT (offset)));
28625 insn = gen_rtx_SET (reg, mem);
28626 }
28627 else
28628 {
28629 areg = gen_rtx_REG (Pmode, 0);
28630 emit_move_insn (areg, GEN_INT (offset));
28631
28632 /* AltiVec addressing mode is [reg+reg]. */
28633 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28634 mem = gen_frame_mem (V4SImode, addr);
28635
28636 /* Rather than emitting a generic move, force use of the
28637 lvx instruction, which we always want. In particular we
28638 don't want lxvd2x/xxpermdi for little endian. */
28639 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28640 }
28641
28642 (void) emit_insn (insn);
28643 }
28644 }
28645
28646 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28647 if (((strategy & REST_INLINE_VRS) == 0
28648 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28649 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28650 && save_reg_p (i))
28651 {
28652 rtx reg = gen_rtx_REG (V4SImode, i);
28653 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28654 }
28655 }
28656
28657 /* Restore VRSAVE if we have not done so already. */
28658 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28659 && info->vrsave_size != 0
28660 && (DEFAULT_ABI == ABI_V4
28661 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28662 {
28663 rtx reg;
28664
28665 reg = gen_rtx_REG (SImode, 12);
28666 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28667 info->vrsave_save_offset + frame_off));
28668
28669 emit_insn (generate_set_vrsave (reg, info, 1));
28670 }
28671
28672 /* If we exit by an out-of-line restore function on ABI_V4 then that
28673 function will deallocate the stack, so we don't need to worry
28674 about the unwinder restoring cr from an invalid stack frame
28675 location. */
28676 exit_func = (!restoring_FPRs_inline
28677 || (!restoring_GPRs_inline
28678 && info->first_fp_reg_save == 64));
28679
28680 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28681 *separate* slots if the routine calls __builtin_eh_return, so
28682 that they can be independently restored by the unwinder. */
28683 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28684 {
28685 int i, cr_off = info->ehcr_offset;
28686
28687 for (i = 0; i < 8; i++)
28688 if (!call_used_regs[CR0_REGNO + i])
28689 {
28690 rtx reg = gen_rtx_REG (SImode, 0);
28691 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28692 cr_off + frame_off));
28693
28694 insn = emit_insn (gen_movsi_to_cr_one
28695 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28696
28697 if (!exit_func && flag_shrink_wrap)
28698 {
28699 add_reg_note (insn, REG_CFA_RESTORE,
28700 gen_rtx_REG (SImode, CR0_REGNO + i));
28701
28702 RTX_FRAME_RELATED_P (insn) = 1;
28703 }
28704
28705 cr_off += reg_size;
28706 }
28707 }
28708
28709 /* Get the old lr if we saved it. If we are restoring registers
28710 out-of-line, then the out-of-line routines can do this for us. */
28711 if (restore_lr && restoring_GPRs_inline)
28712 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28713
28714 /* Get the old cr if we saved it. */
28715 if (info->cr_save_p)
28716 {
28717 unsigned cr_save_regno = 12;
28718
28719 if (!restoring_GPRs_inline)
28720 {
28721 /* Ensure we don't use the register used by the out-of-line
28722 gpr register restore below. */
28723 bool lr = info->gp_save_offset + info->gp_size == 0;
28724 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28725 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28726
28727 if (gpr_ptr_regno == 12)
28728 cr_save_regno = 11;
28729 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28730 }
28731 else if (REGNO (frame_reg_rtx) == 12)
28732 cr_save_regno = 11;
28733
28734 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28735 info->cr_save_offset + frame_off,
28736 exit_func);
28737 }
28738
28739 /* Set LR here to try to overlap restores below. */
28740 if (restore_lr && restoring_GPRs_inline)
28741 restore_saved_lr (0, exit_func);
28742
28743 /* Load exception handler data registers, if needed. */
28744 if (crtl->calls_eh_return)
28745 {
28746 unsigned int i, regno;
28747
28748 if (TARGET_AIX)
28749 {
28750 rtx reg = gen_rtx_REG (reg_mode, 2);
28751 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28752 frame_off + RS6000_TOC_SAVE_SLOT));
28753 }
28754
28755 for (i = 0; ; ++i)
28756 {
28757 rtx mem;
28758
28759 regno = EH_RETURN_DATA_REGNO (i);
28760 if (regno == INVALID_REGNUM)
28761 break;
28762
28763 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28764 info->ehrd_offset + frame_off
28765 + reg_size * (int) i);
28766
28767 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28768 }
28769 }
28770
28771 /* Restore GPRs. This is done as a PARALLEL if we are using
28772 the load-multiple instructions. */
28773 if (!restoring_GPRs_inline)
28774 {
28775 /* We are jumping to an out-of-line function. */
28776 rtx ptr_reg;
28777 int end_save = info->gp_save_offset + info->gp_size;
28778 bool can_use_exit = end_save == 0;
28779 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28780 int ptr_off;
28781
28782 /* Emit stack reset code if we need it. */
28783 ptr_regno = ptr_regno_for_savres (sel);
28784 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28785 if (can_use_exit)
28786 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28787 else if (end_save + frame_off != 0)
28788 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28789 GEN_INT (end_save + frame_off)));
28790 else if (REGNO (frame_reg_rtx) != ptr_regno)
28791 emit_move_insn (ptr_reg, frame_reg_rtx);
28792 if (REGNO (frame_reg_rtx) == ptr_regno)
28793 frame_off = -end_save;
28794
28795 if (can_use_exit && info->cr_save_p)
28796 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28797
28798 ptr_off = -end_save;
28799 rs6000_emit_savres_rtx (info, ptr_reg,
28800 info->gp_save_offset + ptr_off,
28801 info->lr_save_offset + ptr_off,
28802 reg_mode, sel);
28803 }
28804 else if (using_load_multiple)
28805 {
28806 rtvec p;
28807 p = rtvec_alloc (32 - info->first_gp_reg_save);
28808 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28809 RTVEC_ELT (p, i)
28810 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28811 frame_reg_rtx,
28812 info->gp_save_offset + frame_off + reg_size * i);
28813 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28814 }
28815 else
28816 {
28817 int offset = info->gp_save_offset + frame_off;
28818 for (i = info->first_gp_reg_save; i < 32; i++)
28819 {
28820 if (save_reg_p (i)
28821 && !cfun->machine->gpr_is_wrapped_separately[i])
28822 {
28823 rtx reg = gen_rtx_REG (reg_mode, i);
28824 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28825 }
28826
28827 offset += reg_size;
28828 }
28829 }
28830
28831 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28832 {
28833 /* If the frame pointer was used then we can't delay emitting
28834 a REG_CFA_DEF_CFA note. This must happen on the insn that
28835 restores the frame pointer, r31. We may have already emitted
28836 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28837 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28838 be harmless if emitted. */
28839 if (frame_pointer_needed)
28840 {
28841 insn = get_last_insn ();
28842 add_reg_note (insn, REG_CFA_DEF_CFA,
28843 plus_constant (Pmode, frame_reg_rtx, frame_off));
28844 RTX_FRAME_RELATED_P (insn) = 1;
28845 }
28846
28847 /* Set up cfa_restores. We always need these when
28848 shrink-wrapping. If not shrink-wrapping then we only need
28849 the cfa_restore when the stack location is no longer valid.
28850 The cfa_restores must be emitted on or before the insn that
28851 invalidates the stack, and of course must not be emitted
28852 before the insn that actually does the restore. The latter
28853 is why it is a bad idea to emit the cfa_restores as a group
28854 on the last instruction here that actually does a restore:
28855 That insn may be reordered with respect to others doing
28856 restores. */
28857 if (flag_shrink_wrap
28858 && !restoring_GPRs_inline
28859 && info->first_fp_reg_save == 64)
28860 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28861
28862 for (i = info->first_gp_reg_save; i < 32; i++)
28863 if (save_reg_p (i)
28864 && !cfun->machine->gpr_is_wrapped_separately[i])
28865 {
28866 rtx reg = gen_rtx_REG (reg_mode, i);
28867 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28868 }
28869 }
28870
28871 if (!restoring_GPRs_inline
28872 && info->first_fp_reg_save == 64)
28873 {
28874 /* We are jumping to an out-of-line function. */
28875 if (cfa_restores)
28876 emit_cfa_restores (cfa_restores);
28877 return;
28878 }
28879
28880 if (restore_lr && !restoring_GPRs_inline)
28881 {
28882 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28883 restore_saved_lr (0, exit_func);
28884 }
28885
28886 /* Restore fpr's if we need to do it without calling a function. */
28887 if (restoring_FPRs_inline)
28888 {
28889 int offset = info->fp_save_offset + frame_off;
28890 for (i = info->first_fp_reg_save; i < 64; i++)
28891 {
28892 if (save_reg_p (i)
28893 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28894 {
28895 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28896 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28897 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28898 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28899 cfa_restores);
28900 }
28901
28902 offset += fp_reg_size;
28903 }
28904 }
28905
28906 /* If we saved cr, restore it here. Just those that were used. */
28907 if (info->cr_save_p)
28908 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28909
28910 /* If this is V.4, unwind the stack pointer after all of the loads
28911 have been done, or set up r11 if we are restoring fp out of line. */
28912 ptr_regno = 1;
28913 if (!restoring_FPRs_inline)
28914 {
28915 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28916 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28917 ptr_regno = ptr_regno_for_savres (sel);
28918 }
28919
28920 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28921 if (REGNO (frame_reg_rtx) == ptr_regno)
28922 frame_off = 0;
28923
28924 if (insn && restoring_FPRs_inline)
28925 {
28926 if (cfa_restores)
28927 {
28928 REG_NOTES (insn) = cfa_restores;
28929 cfa_restores = NULL_RTX;
28930 }
28931 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28932 RTX_FRAME_RELATED_P (insn) = 1;
28933 }
28934
28935 if (crtl->calls_eh_return)
28936 {
28937 rtx sa = EH_RETURN_STACKADJ_RTX;
28938 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28939 }
28940
28941 if (!sibcall && restoring_FPRs_inline)
28942 {
28943 if (cfa_restores)
28944 {
28945 /* We can't hang the cfa_restores off a simple return,
28946 since the shrink-wrap code sometimes uses an existing
28947 return. This means there might be a path from
28948 pre-prologue code to this return, and dwarf2cfi code
28949 wants the eh_frame unwinder state to be the same on
28950 all paths to any point. So we need to emit the
28951 cfa_restores before the return. For -m64 we really
28952 don't need epilogue cfa_restores at all, except for
28953 this irritating dwarf2cfi with shrink-wrap
28954 requirement; The stack red-zone means eh_frame info
28955 from the prologue telling the unwinder to restore
28956 from the stack is perfectly good right to the end of
28957 the function. */
28958 emit_insn (gen_blockage ());
28959 emit_cfa_restores (cfa_restores);
28960 cfa_restores = NULL_RTX;
28961 }
28962
28963 emit_jump_insn (targetm.gen_simple_return ());
28964 }
28965
28966 if (!sibcall && !restoring_FPRs_inline)
28967 {
28968 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28969 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28970 int elt = 0;
28971 RTVEC_ELT (p, elt++) = ret_rtx;
28972 if (lr)
28973 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28974
28975 /* We have to restore more than two FP registers, so branch to the
28976 restore function. It will return to our caller. */
28977 int i;
28978 int reg;
28979 rtx sym;
28980
28981 if (flag_shrink_wrap)
28982 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28983
28984 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28985 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28986 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28987 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28988
28989 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28990 {
28991 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28992
28993 RTVEC_ELT (p, elt++)
28994 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28995 if (flag_shrink_wrap
28996 && save_reg_p (info->first_fp_reg_save + i))
28997 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28998 }
28999
29000 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
29001 }
29002
29003 if (cfa_restores)
29004 {
29005 if (sibcall)
29006 /* Ensure the cfa_restores are hung off an insn that won't
29007 be reordered above other restores. */
29008 emit_insn (gen_blockage ());
29009
29010 emit_cfa_restores (cfa_restores);
29011 }
29012 }
29013
29014 /* Write function epilogue. */
29015
29016 static void
29017 rs6000_output_function_epilogue (FILE *file)
29018 {
29019 #if TARGET_MACHO
29020 macho_branch_islands ();
29021
29022 {
29023 rtx_insn *insn = get_last_insn ();
29024 rtx_insn *deleted_debug_label = NULL;
29025
29026 /* Mach-O doesn't support labels at the end of objects, so if
29027 it looks like we might want one, take special action.
29028
29029 First, collect any sequence of deleted debug labels. */
29030 while (insn
29031 && NOTE_P (insn)
29032 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
29033 {
29034 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
29035 notes only, instead set their CODE_LABEL_NUMBER to -1,
29036 otherwise there would be code generation differences
29037 in between -g and -g0. */
29038 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29039 deleted_debug_label = insn;
29040 insn = PREV_INSN (insn);
29041 }
29042
29043 /* Second, if we have:
29044 label:
29045 barrier
29046 then this needs to be detected, so skip past the barrier. */
29047
29048 if (insn && BARRIER_P (insn))
29049 insn = PREV_INSN (insn);
29050
29051 /* Up to now we've only seen notes or barriers. */
29052 if (insn)
29053 {
29054 if (LABEL_P (insn)
29055 || (NOTE_P (insn)
29056 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
29057 /* Trailing label: <barrier>. */
29058 fputs ("\tnop\n", file);
29059 else
29060 {
29061 /* Lastly, see if we have a completely empty function body. */
29062 while (insn && ! INSN_P (insn))
29063 insn = PREV_INSN (insn);
29064 /* If we don't find any insns, we've got an empty function body;
29065 I.e. completely empty - without a return or branch. This is
29066 taken as the case where a function body has been removed
29067 because it contains an inline __builtin_unreachable(). GCC
29068 states that reaching __builtin_unreachable() means UB so we're
29069 not obliged to do anything special; however, we want
29070 non-zero-sized function bodies. To meet this, and help the
29071 user out, let's trap the case. */
29072 if (insn == NULL)
29073 fputs ("\ttrap\n", file);
29074 }
29075 }
29076 else if (deleted_debug_label)
29077 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
29078 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29079 CODE_LABEL_NUMBER (insn) = -1;
29080 }
29081 #endif
29082
29083 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29084 on its format.
29085
29086 We don't output a traceback table if -finhibit-size-directive was
29087 used. The documentation for -finhibit-size-directive reads
29088 ``don't output a @code{.size} assembler directive, or anything
29089 else that would cause trouble if the function is split in the
29090 middle, and the two halves are placed at locations far apart in
29091 memory.'' The traceback table has this property, since it
29092 includes the offset from the start of the function to the
29093 traceback table itself.
29094
29095 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29096 different traceback table. */
29097 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29098 && ! flag_inhibit_size_directive
29099 && rs6000_traceback != traceback_none && !cfun->is_thunk)
29100 {
29101 const char *fname = NULL;
29102 const char *language_string = lang_hooks.name;
29103 int fixed_parms = 0, float_parms = 0, parm_info = 0;
29104 int i;
29105 int optional_tbtab;
29106 rs6000_stack_t *info = rs6000_stack_info ();
29107
29108 if (rs6000_traceback == traceback_full)
29109 optional_tbtab = 1;
29110 else if (rs6000_traceback == traceback_part)
29111 optional_tbtab = 0;
29112 else
29113 optional_tbtab = !optimize_size && !TARGET_ELF;
29114
29115 if (optional_tbtab)
29116 {
29117 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
29118 while (*fname == '.') /* V.4 encodes . in the name */
29119 fname++;
29120
29121 /* Need label immediately before tbtab, so we can compute
29122 its offset from the function start. */
29123 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29124 ASM_OUTPUT_LABEL (file, fname);
29125 }
29126
29127 /* The .tbtab pseudo-op can only be used for the first eight
29128 expressions, since it can't handle the possibly variable
29129 length fields that follow. However, if you omit the optional
29130 fields, the assembler outputs zeros for all optional fields
29131 anyways, giving each variable length field is minimum length
29132 (as defined in sys/debug.h). Thus we cannot use the .tbtab
29133 pseudo-op at all. */
29134
29135 /* An all-zero word flags the start of the tbtab, for debuggers
29136 that have to find it by searching forward from the entry
29137 point or from the current pc. */
29138 fputs ("\t.long 0\n", file);
29139
29140 /* Tbtab format type. Use format type 0. */
29141 fputs ("\t.byte 0,", file);
29142
29143 /* Language type. Unfortunately, there does not seem to be any
29144 official way to discover the language being compiled, so we
29145 use language_string.
29146 C is 0. Fortran is 1. Ada is 3. C++ is 9.
29147 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29148 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
29149 numbers either, so for now use 0. */
29150 if (lang_GNU_C ()
29151 || ! strcmp (language_string, "GNU GIMPLE")
29152 || ! strcmp (language_string, "GNU Go")
29153 || ! strcmp (language_string, "GNU D")
29154 || ! strcmp (language_string, "libgccjit"))
29155 i = 0;
29156 else if (! strcmp (language_string, "GNU F77")
29157 || lang_GNU_Fortran ())
29158 i = 1;
29159 else if (! strcmp (language_string, "GNU Ada"))
29160 i = 3;
29161 else if (lang_GNU_CXX ()
29162 || ! strcmp (language_string, "GNU Objective-C++"))
29163 i = 9;
29164 else if (! strcmp (language_string, "GNU Java"))
29165 i = 13;
29166 else if (! strcmp (language_string, "GNU Objective-C"))
29167 i = 14;
29168 else
29169 gcc_unreachable ();
29170 fprintf (file, "%d,", i);
29171
29172 /* 8 single bit fields: global linkage (not set for C extern linkage,
29173 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29174 from start of procedure stored in tbtab, internal function, function
29175 has controlled storage, function has no toc, function uses fp,
29176 function logs/aborts fp operations. */
29177 /* Assume that fp operations are used if any fp reg must be saved. */
29178 fprintf (file, "%d,",
29179 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29180
29181 /* 6 bitfields: function is interrupt handler, name present in
29182 proc table, function calls alloca, on condition directives
29183 (controls stack walks, 3 bits), saves condition reg, saves
29184 link reg. */
29185 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29186 set up as a frame pointer, even when there is no alloca call. */
29187 fprintf (file, "%d,",
29188 ((optional_tbtab << 6)
29189 | ((optional_tbtab & frame_pointer_needed) << 5)
29190 | (info->cr_save_p << 1)
29191 | (info->lr_save_p)));
29192
29193 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29194 (6 bits). */
29195 fprintf (file, "%d,",
29196 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29197
29198 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29199 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29200
29201 if (optional_tbtab)
29202 {
29203 /* Compute the parameter info from the function decl argument
29204 list. */
29205 tree decl;
29206 int next_parm_info_bit = 31;
29207
29208 for (decl = DECL_ARGUMENTS (current_function_decl);
29209 decl; decl = DECL_CHAIN (decl))
29210 {
29211 rtx parameter = DECL_INCOMING_RTL (decl);
29212 machine_mode mode = GET_MODE (parameter);
29213
29214 if (REG_P (parameter))
29215 {
29216 if (SCALAR_FLOAT_MODE_P (mode))
29217 {
29218 int bits;
29219
29220 float_parms++;
29221
29222 switch (mode)
29223 {
29224 case E_SFmode:
29225 case E_SDmode:
29226 bits = 0x2;
29227 break;
29228
29229 case E_DFmode:
29230 case E_DDmode:
29231 case E_TFmode:
29232 case E_TDmode:
29233 case E_IFmode:
29234 case E_KFmode:
29235 bits = 0x3;
29236 break;
29237
29238 default:
29239 gcc_unreachable ();
29240 }
29241
29242 /* If only one bit will fit, don't or in this entry. */
29243 if (next_parm_info_bit > 0)
29244 parm_info |= (bits << (next_parm_info_bit - 1));
29245 next_parm_info_bit -= 2;
29246 }
29247 else
29248 {
29249 fixed_parms += ((GET_MODE_SIZE (mode)
29250 + (UNITS_PER_WORD - 1))
29251 / UNITS_PER_WORD);
29252 next_parm_info_bit -= 1;
29253 }
29254 }
29255 }
29256 }
29257
29258 /* Number of fixed point parameters. */
29259 /* This is actually the number of words of fixed point parameters; thus
29260 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29261 fprintf (file, "%d,", fixed_parms);
29262
29263 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29264 all on stack. */
29265 /* This is actually the number of fp registers that hold parameters;
29266 and thus the maximum value is 13. */
29267 /* Set parameters on stack bit if parameters are not in their original
29268 registers, regardless of whether they are on the stack? Xlc
29269 seems to set the bit when not optimizing. */
29270 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29271
29272 if (optional_tbtab)
29273 {
29274 /* Optional fields follow. Some are variable length. */
29275
29276 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29277 float, 11 double float. */
29278 /* There is an entry for each parameter in a register, in the order
29279 that they occur in the parameter list. Any intervening arguments
29280 on the stack are ignored. If the list overflows a long (max
29281 possible length 34 bits) then completely leave off all elements
29282 that don't fit. */
29283 /* Only emit this long if there was at least one parameter. */
29284 if (fixed_parms || float_parms)
29285 fprintf (file, "\t.long %d\n", parm_info);
29286
29287 /* Offset from start of code to tb table. */
29288 fputs ("\t.long ", file);
29289 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29290 RS6000_OUTPUT_BASENAME (file, fname);
29291 putc ('-', file);
29292 rs6000_output_function_entry (file, fname);
29293 putc ('\n', file);
29294
29295 /* Interrupt handler mask. */
29296 /* Omit this long, since we never set the interrupt handler bit
29297 above. */
29298
29299 /* Number of CTL (controlled storage) anchors. */
29300 /* Omit this long, since the has_ctl bit is never set above. */
29301
29302 /* Displacement into stack of each CTL anchor. */
29303 /* Omit this list of longs, because there are no CTL anchors. */
29304
29305 /* Length of function name. */
29306 if (*fname == '*')
29307 ++fname;
29308 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29309
29310 /* Function name. */
29311 assemble_string (fname, strlen (fname));
29312
29313 /* Register for alloca automatic storage; this is always reg 31.
29314 Only emit this if the alloca bit was set above. */
29315 if (frame_pointer_needed)
29316 fputs ("\t.byte 31\n", file);
29317
29318 fputs ("\t.align 2\n", file);
29319 }
29320 }
29321
29322 /* Arrange to define .LCTOC1 label, if not already done. */
29323 if (need_toc_init)
29324 {
29325 need_toc_init = 0;
29326 if (!toc_initialized)
29327 {
29328 switch_to_section (toc_section);
29329 switch_to_section (current_function_section ());
29330 }
29331 }
29332 }
29333
29334 /* -fsplit-stack support. */
29335
29336 /* A SYMBOL_REF for __morestack. */
29337 static GTY(()) rtx morestack_ref;
29338
29339 static rtx
29340 gen_add3_const (rtx rt, rtx ra, long c)
29341 {
29342 if (TARGET_64BIT)
29343 return gen_adddi3 (rt, ra, GEN_INT (c));
29344 else
29345 return gen_addsi3 (rt, ra, GEN_INT (c));
29346 }
29347
29348 /* Emit -fsplit-stack prologue, which goes before the regular function
29349 prologue (at local entry point in the case of ELFv2). */
29350
29351 void
29352 rs6000_expand_split_stack_prologue (void)
29353 {
29354 rs6000_stack_t *info = rs6000_stack_info ();
29355 unsigned HOST_WIDE_INT allocate;
29356 long alloc_hi, alloc_lo;
29357 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29358 rtx_insn *insn;
29359
29360 gcc_assert (flag_split_stack && reload_completed);
29361
29362 if (!info->push_p)
29363 return;
29364
29365 if (global_regs[29])
29366 {
29367 error ("%qs uses register r29", "%<-fsplit-stack%>");
29368 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29369 "conflicts with %qD", global_regs_decl[29]);
29370 }
29371
29372 allocate = info->total_size;
29373 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29374 {
29375 sorry ("Stack frame larger than 2G is not supported for "
29376 "%<-fsplit-stack%>");
29377 return;
29378 }
29379 if (morestack_ref == NULL_RTX)
29380 {
29381 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29382 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29383 | SYMBOL_FLAG_FUNCTION);
29384 }
29385
29386 r0 = gen_rtx_REG (Pmode, 0);
29387 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29388 r12 = gen_rtx_REG (Pmode, 12);
29389 emit_insn (gen_load_split_stack_limit (r0));
29390 /* Always emit two insns here to calculate the requested stack,
29391 so that the linker can edit them when adjusting size for calling
29392 non-split-stack code. */
29393 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29394 alloc_lo = -allocate - alloc_hi;
29395 if (alloc_hi != 0)
29396 {
29397 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29398 if (alloc_lo != 0)
29399 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29400 else
29401 emit_insn (gen_nop ());
29402 }
29403 else
29404 {
29405 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29406 emit_insn (gen_nop ());
29407 }
29408
29409 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29410 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29411 ok_label = gen_label_rtx ();
29412 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29413 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29414 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29415 pc_rtx);
29416 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29417 JUMP_LABEL (insn) = ok_label;
29418 /* Mark the jump as very likely to be taken. */
29419 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29420
29421 lr = gen_rtx_REG (Pmode, LR_REGNO);
29422 insn = emit_move_insn (r0, lr);
29423 RTX_FRAME_RELATED_P (insn) = 1;
29424 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29425 RTX_FRAME_RELATED_P (insn) = 1;
29426
29427 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29428 const0_rtx, const0_rtx));
29429 call_fusage = NULL_RTX;
29430 use_reg (&call_fusage, r12);
29431 /* Say the call uses r0, even though it doesn't, to stop regrename
29432 from twiddling with the insns saving lr, trashing args for cfun.
29433 The insns restoring lr are similarly protected by making
29434 split_stack_return use r0. */
29435 use_reg (&call_fusage, r0);
29436 add_function_usage_to (insn, call_fusage);
29437 /* Indicate that this function can't jump to non-local gotos. */
29438 make_reg_eh_region_note_nothrow_nononlocal (insn);
29439 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29440 insn = emit_move_insn (lr, r0);
29441 add_reg_note (insn, REG_CFA_RESTORE, lr);
29442 RTX_FRAME_RELATED_P (insn) = 1;
29443 emit_insn (gen_split_stack_return ());
29444
29445 emit_label (ok_label);
29446 LABEL_NUSES (ok_label) = 1;
29447 }
29448
29449 /* Return the internal arg pointer used for function incoming
29450 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29451 to copy it to a pseudo in order for it to be preserved over calls
29452 and suchlike. We'd really like to use a pseudo here for the
29453 internal arg pointer but data-flow analysis is not prepared to
29454 accept pseudos as live at the beginning of a function. */
29455
29456 static rtx
29457 rs6000_internal_arg_pointer (void)
29458 {
29459 if (flag_split_stack
29460 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29461 == NULL))
29462
29463 {
29464 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29465 {
29466 rtx pat;
29467
29468 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29469 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29470
29471 /* Put the pseudo initialization right after the note at the
29472 beginning of the function. */
29473 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29474 gen_rtx_REG (Pmode, 12));
29475 push_topmost_sequence ();
29476 emit_insn_after (pat, get_insns ());
29477 pop_topmost_sequence ();
29478 }
29479 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29480 FIRST_PARM_OFFSET (current_function_decl));
29481 return copy_to_reg (ret);
29482 }
29483 return virtual_incoming_args_rtx;
29484 }
29485
29486 /* We may have to tell the dataflow pass that the split stack prologue
29487 is initializing a register. */
29488
29489 static void
29490 rs6000_live_on_entry (bitmap regs)
29491 {
29492 if (flag_split_stack)
29493 bitmap_set_bit (regs, 12);
29494 }
29495
29496 /* Emit -fsplit-stack dynamic stack allocation space check. */
29497
29498 void
29499 rs6000_split_stack_space_check (rtx size, rtx label)
29500 {
29501 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29502 rtx limit = gen_reg_rtx (Pmode);
29503 rtx requested = gen_reg_rtx (Pmode);
29504 rtx cmp = gen_reg_rtx (CCUNSmode);
29505 rtx jump;
29506
29507 emit_insn (gen_load_split_stack_limit (limit));
29508 if (CONST_INT_P (size))
29509 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29510 else
29511 {
29512 size = force_reg (Pmode, size);
29513 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29514 }
29515 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29516 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29517 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29518 gen_rtx_LABEL_REF (VOIDmode, label),
29519 pc_rtx);
29520 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29521 JUMP_LABEL (jump) = label;
29522 }
29523 \f
29524 /* A C compound statement that outputs the assembler code for a thunk
29525 function, used to implement C++ virtual function calls with
29526 multiple inheritance. The thunk acts as a wrapper around a virtual
29527 function, adjusting the implicit object parameter before handing
29528 control off to the real function.
29529
29530 First, emit code to add the integer DELTA to the location that
29531 contains the incoming first argument. Assume that this argument
29532 contains a pointer, and is the one used to pass the `this' pointer
29533 in C++. This is the incoming argument *before* the function
29534 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29535 values of all other incoming arguments.
29536
29537 After the addition, emit code to jump to FUNCTION, which is a
29538 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29539 not touch the return address. Hence returning from FUNCTION will
29540 return to whoever called the current `thunk'.
29541
29542 The effect must be as if FUNCTION had been called directly with the
29543 adjusted first argument. This macro is responsible for emitting
29544 all of the code for a thunk function; output_function_prologue()
29545 and output_function_epilogue() are not invoked.
29546
29547 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29548 been extracted from it.) It might possibly be useful on some
29549 targets, but probably not.
29550
29551 If you do not define this macro, the target-independent code in the
29552 C++ frontend will generate a less efficient heavyweight thunk that
29553 calls FUNCTION instead of jumping to it. The generic approach does
29554 not support varargs. */
29555
29556 static void
29557 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29558 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29559 tree function)
29560 {
29561 rtx this_rtx, funexp;
29562 rtx_insn *insn;
29563
29564 reload_completed = 1;
29565 epilogue_completed = 1;
29566
29567 /* Mark the end of the (empty) prologue. */
29568 emit_note (NOTE_INSN_PROLOGUE_END);
29569
29570 /* Find the "this" pointer. If the function returns a structure,
29571 the structure return pointer is in r3. */
29572 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29573 this_rtx = gen_rtx_REG (Pmode, 4);
29574 else
29575 this_rtx = gen_rtx_REG (Pmode, 3);
29576
29577 /* Apply the constant offset, if required. */
29578 if (delta)
29579 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29580
29581 /* Apply the offset from the vtable, if required. */
29582 if (vcall_offset)
29583 {
29584 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29585 rtx tmp = gen_rtx_REG (Pmode, 12);
29586
29587 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29588 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29589 {
29590 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29591 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29592 }
29593 else
29594 {
29595 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29596
29597 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29598 }
29599 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29600 }
29601
29602 /* Generate a tail call to the target function. */
29603 if (!TREE_USED (function))
29604 {
29605 assemble_external (function);
29606 TREE_USED (function) = 1;
29607 }
29608 funexp = XEXP (DECL_RTL (function), 0);
29609 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29610
29611 #if TARGET_MACHO
29612 if (MACHOPIC_INDIRECT)
29613 funexp = machopic_indirect_call_target (funexp);
29614 #endif
29615
29616 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29617 generate sibcall RTL explicitly. */
29618 insn = emit_call_insn (
29619 gen_rtx_PARALLEL (VOIDmode,
29620 gen_rtvec (3,
29621 gen_rtx_CALL (VOIDmode,
29622 funexp, const0_rtx),
29623 gen_rtx_USE (VOIDmode, const0_rtx),
29624 simple_return_rtx)));
29625 SIBLING_CALL_P (insn) = 1;
29626 emit_barrier ();
29627
29628 /* Run just enough of rest_of_compilation to get the insns emitted.
29629 There's not really enough bulk here to make other passes such as
29630 instruction scheduling worth while. Note that use_thunk calls
29631 assemble_start_function and assemble_end_function. */
29632 insn = get_insns ();
29633 shorten_branches (insn);
29634 final_start_function (insn, file, 1);
29635 final (insn, file, 1);
29636 final_end_function ();
29637
29638 reload_completed = 0;
29639 epilogue_completed = 0;
29640 }
29641 \f
29642 /* A quick summary of the various types of 'constant-pool tables'
29643 under PowerPC:
29644
29645 Target Flags Name One table per
29646 AIX (none) AIX TOC object file
29647 AIX -mfull-toc AIX TOC object file
29648 AIX -mminimal-toc AIX minimal TOC translation unit
29649 SVR4/EABI (none) SVR4 SDATA object file
29650 SVR4/EABI -fpic SVR4 pic object file
29651 SVR4/EABI -fPIC SVR4 PIC translation unit
29652 SVR4/EABI -mrelocatable EABI TOC function
29653 SVR4/EABI -maix AIX TOC object file
29654 SVR4/EABI -maix -mminimal-toc
29655 AIX minimal TOC translation unit
29656
29657 Name Reg. Set by entries contains:
29658 made by addrs? fp? sum?
29659
29660 AIX TOC 2 crt0 as Y option option
29661 AIX minimal TOC 30 prolog gcc Y Y option
29662 SVR4 SDATA 13 crt0 gcc N Y N
29663 SVR4 pic 30 prolog ld Y not yet N
29664 SVR4 PIC 30 prolog gcc Y option option
29665 EABI TOC 30 prolog gcc Y option option
29666
29667 */
29668
29669 /* Hash functions for the hash table. */
29670
29671 static unsigned
29672 rs6000_hash_constant (rtx k)
29673 {
29674 enum rtx_code code = GET_CODE (k);
29675 machine_mode mode = GET_MODE (k);
29676 unsigned result = (code << 3) ^ mode;
29677 const char *format;
29678 int flen, fidx;
29679
29680 format = GET_RTX_FORMAT (code);
29681 flen = strlen (format);
29682 fidx = 0;
29683
29684 switch (code)
29685 {
29686 case LABEL_REF:
29687 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29688
29689 case CONST_WIDE_INT:
29690 {
29691 int i;
29692 flen = CONST_WIDE_INT_NUNITS (k);
29693 for (i = 0; i < flen; i++)
29694 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29695 return result;
29696 }
29697
29698 case CONST_DOUBLE:
29699 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29700
29701 case CODE_LABEL:
29702 fidx = 3;
29703 break;
29704
29705 default:
29706 break;
29707 }
29708
29709 for (; fidx < flen; fidx++)
29710 switch (format[fidx])
29711 {
29712 case 's':
29713 {
29714 unsigned i, len;
29715 const char *str = XSTR (k, fidx);
29716 len = strlen (str);
29717 result = result * 613 + len;
29718 for (i = 0; i < len; i++)
29719 result = result * 613 + (unsigned) str[i];
29720 break;
29721 }
29722 case 'u':
29723 case 'e':
29724 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29725 break;
29726 case 'i':
29727 case 'n':
29728 result = result * 613 + (unsigned) XINT (k, fidx);
29729 break;
29730 case 'w':
29731 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29732 result = result * 613 + (unsigned) XWINT (k, fidx);
29733 else
29734 {
29735 size_t i;
29736 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29737 result = result * 613 + (unsigned) (XWINT (k, fidx)
29738 >> CHAR_BIT * i);
29739 }
29740 break;
29741 case '0':
29742 break;
29743 default:
29744 gcc_unreachable ();
29745 }
29746
29747 return result;
29748 }
29749
29750 hashval_t
29751 toc_hasher::hash (toc_hash_struct *thc)
29752 {
29753 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29754 }
29755
29756 /* Compare H1 and H2 for equivalence. */
29757
29758 bool
29759 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29760 {
29761 rtx r1 = h1->key;
29762 rtx r2 = h2->key;
29763
29764 if (h1->key_mode != h2->key_mode)
29765 return 0;
29766
29767 return rtx_equal_p (r1, r2);
29768 }
29769
29770 /* These are the names given by the C++ front-end to vtables, and
29771 vtable-like objects. Ideally, this logic should not be here;
29772 instead, there should be some programmatic way of inquiring as
29773 to whether or not an object is a vtable. */
29774
29775 #define VTABLE_NAME_P(NAME) \
29776 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29777 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29778 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29779 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29780 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29781
29782 #ifdef NO_DOLLAR_IN_LABEL
29783 /* Return a GGC-allocated character string translating dollar signs in
29784 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29785
29786 const char *
29787 rs6000_xcoff_strip_dollar (const char *name)
29788 {
29789 char *strip, *p;
29790 const char *q;
29791 size_t len;
29792
29793 q = (const char *) strchr (name, '$');
29794
29795 if (q == 0 || q == name)
29796 return name;
29797
29798 len = strlen (name);
29799 strip = XALLOCAVEC (char, len + 1);
29800 strcpy (strip, name);
29801 p = strip + (q - name);
29802 while (p)
29803 {
29804 *p = '_';
29805 p = strchr (p + 1, '$');
29806 }
29807
29808 return ggc_alloc_string (strip, len);
29809 }
29810 #endif
29811
29812 void
29813 rs6000_output_symbol_ref (FILE *file, rtx x)
29814 {
29815 const char *name = XSTR (x, 0);
29816
29817 /* Currently C++ toc references to vtables can be emitted before it
29818 is decided whether the vtable is public or private. If this is
29819 the case, then the linker will eventually complain that there is
29820 a reference to an unknown section. Thus, for vtables only,
29821 we emit the TOC reference to reference the identifier and not the
29822 symbol. */
29823 if (VTABLE_NAME_P (name))
29824 {
29825 RS6000_OUTPUT_BASENAME (file, name);
29826 }
29827 else
29828 assemble_name (file, name);
29829 }
29830
29831 /* Output a TOC entry. We derive the entry name from what is being
29832 written. */
29833
29834 void
29835 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29836 {
29837 char buf[256];
29838 const char *name = buf;
29839 rtx base = x;
29840 HOST_WIDE_INT offset = 0;
29841
29842 gcc_assert (!TARGET_NO_TOC);
29843
29844 /* When the linker won't eliminate them, don't output duplicate
29845 TOC entries (this happens on AIX if there is any kind of TOC,
29846 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29847 CODE_LABELs. */
29848 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29849 {
29850 struct toc_hash_struct *h;
29851
29852 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29853 time because GGC is not initialized at that point. */
29854 if (toc_hash_table == NULL)
29855 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29856
29857 h = ggc_alloc<toc_hash_struct> ();
29858 h->key = x;
29859 h->key_mode = mode;
29860 h->labelno = labelno;
29861
29862 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29863 if (*found == NULL)
29864 *found = h;
29865 else /* This is indeed a duplicate.
29866 Set this label equal to that label. */
29867 {
29868 fputs ("\t.set ", file);
29869 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29870 fprintf (file, "%d,", labelno);
29871 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29872 fprintf (file, "%d\n", ((*found)->labelno));
29873
29874 #ifdef HAVE_AS_TLS
29875 if (TARGET_XCOFF && SYMBOL_REF_P (x)
29876 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29877 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29878 {
29879 fputs ("\t.set ", file);
29880 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29881 fprintf (file, "%d,", labelno);
29882 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29883 fprintf (file, "%d\n", ((*found)->labelno));
29884 }
29885 #endif
29886 return;
29887 }
29888 }
29889
29890 /* If we're going to put a double constant in the TOC, make sure it's
29891 aligned properly when strict alignment is on. */
29892 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29893 && STRICT_ALIGNMENT
29894 && GET_MODE_BITSIZE (mode) >= 64
29895 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29896 ASM_OUTPUT_ALIGN (file, 3);
29897 }
29898
29899 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29900
29901 /* Handle FP constants specially. Note that if we have a minimal
29902 TOC, things we put here aren't actually in the TOC, so we can allow
29903 FP constants. */
29904 if (CONST_DOUBLE_P (x)
29905 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29906 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29907 {
29908 long k[4];
29909
29910 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29911 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29912 else
29913 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29914
29915 if (TARGET_64BIT)
29916 {
29917 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29918 fputs (DOUBLE_INT_ASM_OP, file);
29919 else
29920 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29921 k[0] & 0xffffffff, k[1] & 0xffffffff,
29922 k[2] & 0xffffffff, k[3] & 0xffffffff);
29923 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29924 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29925 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29926 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29927 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29928 return;
29929 }
29930 else
29931 {
29932 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29933 fputs ("\t.long ", file);
29934 else
29935 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29936 k[0] & 0xffffffff, k[1] & 0xffffffff,
29937 k[2] & 0xffffffff, k[3] & 0xffffffff);
29938 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29939 k[0] & 0xffffffff, k[1] & 0xffffffff,
29940 k[2] & 0xffffffff, k[3] & 0xffffffff);
29941 return;
29942 }
29943 }
29944 else if (CONST_DOUBLE_P (x)
29945 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29946 {
29947 long k[2];
29948
29949 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29950 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29951 else
29952 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29953
29954 if (TARGET_64BIT)
29955 {
29956 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29957 fputs (DOUBLE_INT_ASM_OP, file);
29958 else
29959 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29960 k[0] & 0xffffffff, k[1] & 0xffffffff);
29961 fprintf (file, "0x%lx%08lx\n",
29962 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29963 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29964 return;
29965 }
29966 else
29967 {
29968 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29969 fputs ("\t.long ", file);
29970 else
29971 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29972 k[0] & 0xffffffff, k[1] & 0xffffffff);
29973 fprintf (file, "0x%lx,0x%lx\n",
29974 k[0] & 0xffffffff, k[1] & 0xffffffff);
29975 return;
29976 }
29977 }
29978 else if (CONST_DOUBLE_P (x)
29979 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29980 {
29981 long l;
29982
29983 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29984 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29985 else
29986 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29987
29988 if (TARGET_64BIT)
29989 {
29990 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29991 fputs (DOUBLE_INT_ASM_OP, file);
29992 else
29993 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29994 if (WORDS_BIG_ENDIAN)
29995 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29996 else
29997 fprintf (file, "0x%lx\n", l & 0xffffffff);
29998 return;
29999 }
30000 else
30001 {
30002 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30003 fputs ("\t.long ", file);
30004 else
30005 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30006 fprintf (file, "0x%lx\n", l & 0xffffffff);
30007 return;
30008 }
30009 }
30010 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
30011 {
30012 unsigned HOST_WIDE_INT low;
30013 HOST_WIDE_INT high;
30014
30015 low = INTVAL (x) & 0xffffffff;
30016 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
30017
30018 /* TOC entries are always Pmode-sized, so when big-endian
30019 smaller integer constants in the TOC need to be padded.
30020 (This is still a win over putting the constants in
30021 a separate constant pool, because then we'd have
30022 to have both a TOC entry _and_ the actual constant.)
30023
30024 For a 32-bit target, CONST_INT values are loaded and shifted
30025 entirely within `low' and can be stored in one TOC entry. */
30026
30027 /* It would be easy to make this work, but it doesn't now. */
30028 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
30029
30030 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
30031 {
30032 low |= high << 32;
30033 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
30034 high = (HOST_WIDE_INT) low >> 32;
30035 low &= 0xffffffff;
30036 }
30037
30038 if (TARGET_64BIT)
30039 {
30040 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30041 fputs (DOUBLE_INT_ASM_OP, file);
30042 else
30043 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30044 (long) high & 0xffffffff, (long) low & 0xffffffff);
30045 fprintf (file, "0x%lx%08lx\n",
30046 (long) high & 0xffffffff, (long) low & 0xffffffff);
30047 return;
30048 }
30049 else
30050 {
30051 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
30052 {
30053 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30054 fputs ("\t.long ", file);
30055 else
30056 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30057 (long) high & 0xffffffff, (long) low & 0xffffffff);
30058 fprintf (file, "0x%lx,0x%lx\n",
30059 (long) high & 0xffffffff, (long) low & 0xffffffff);
30060 }
30061 else
30062 {
30063 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30064 fputs ("\t.long ", file);
30065 else
30066 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
30067 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
30068 }
30069 return;
30070 }
30071 }
30072
30073 if (GET_CODE (x) == CONST)
30074 {
30075 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
30076 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
30077
30078 base = XEXP (XEXP (x, 0), 0);
30079 offset = INTVAL (XEXP (XEXP (x, 0), 1));
30080 }
30081
30082 switch (GET_CODE (base))
30083 {
30084 case SYMBOL_REF:
30085 name = XSTR (base, 0);
30086 break;
30087
30088 case LABEL_REF:
30089 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
30090 CODE_LABEL_NUMBER (XEXP (base, 0)));
30091 break;
30092
30093 case CODE_LABEL:
30094 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
30095 break;
30096
30097 default:
30098 gcc_unreachable ();
30099 }
30100
30101 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30102 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
30103 else
30104 {
30105 fputs ("\t.tc ", file);
30106 RS6000_OUTPUT_BASENAME (file, name);
30107
30108 if (offset < 0)
30109 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
30110 else if (offset)
30111 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
30112
30113 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30114 after other TOC symbols, reducing overflow of small TOC access
30115 to [TC] symbols. */
30116 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
30117 ? "[TE]," : "[TC],", file);
30118 }
30119
30120 /* Currently C++ toc references to vtables can be emitted before it
30121 is decided whether the vtable is public or private. If this is
30122 the case, then the linker will eventually complain that there is
30123 a TOC reference to an unknown section. Thus, for vtables only,
30124 we emit the TOC reference to reference the symbol and not the
30125 section. */
30126 if (VTABLE_NAME_P (name))
30127 {
30128 RS6000_OUTPUT_BASENAME (file, name);
30129 if (offset < 0)
30130 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
30131 else if (offset > 0)
30132 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
30133 }
30134 else
30135 output_addr_const (file, x);
30136
30137 #if HAVE_AS_TLS
30138 if (TARGET_XCOFF && SYMBOL_REF_P (base))
30139 {
30140 switch (SYMBOL_REF_TLS_MODEL (base))
30141 {
30142 case 0:
30143 break;
30144 case TLS_MODEL_LOCAL_EXEC:
30145 fputs ("@le", file);
30146 break;
30147 case TLS_MODEL_INITIAL_EXEC:
30148 fputs ("@ie", file);
30149 break;
30150 /* Use global-dynamic for local-dynamic. */
30151 case TLS_MODEL_GLOBAL_DYNAMIC:
30152 case TLS_MODEL_LOCAL_DYNAMIC:
30153 putc ('\n', file);
30154 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30155 fputs ("\t.tc .", file);
30156 RS6000_OUTPUT_BASENAME (file, name);
30157 fputs ("[TC],", file);
30158 output_addr_const (file, x);
30159 fputs ("@m", file);
30160 break;
30161 default:
30162 gcc_unreachable ();
30163 }
30164 }
30165 #endif
30166
30167 putc ('\n', file);
30168 }
30169 \f
30170 /* Output an assembler pseudo-op to write an ASCII string of N characters
30171 starting at P to FILE.
30172
30173 On the RS/6000, we have to do this using the .byte operation and
30174 write out special characters outside the quoted string.
30175 Also, the assembler is broken; very long strings are truncated,
30176 so we must artificially break them up early. */
30177
30178 void
30179 output_ascii (FILE *file, const char *p, int n)
30180 {
30181 char c;
30182 int i, count_string;
30183 const char *for_string = "\t.byte \"";
30184 const char *for_decimal = "\t.byte ";
30185 const char *to_close = NULL;
30186
30187 count_string = 0;
30188 for (i = 0; i < n; i++)
30189 {
30190 c = *p++;
30191 if (c >= ' ' && c < 0177)
30192 {
30193 if (for_string)
30194 fputs (for_string, file);
30195 putc (c, file);
30196
30197 /* Write two quotes to get one. */
30198 if (c == '"')
30199 {
30200 putc (c, file);
30201 ++count_string;
30202 }
30203
30204 for_string = NULL;
30205 for_decimal = "\"\n\t.byte ";
30206 to_close = "\"\n";
30207 ++count_string;
30208
30209 if (count_string >= 512)
30210 {
30211 fputs (to_close, file);
30212
30213 for_string = "\t.byte \"";
30214 for_decimal = "\t.byte ";
30215 to_close = NULL;
30216 count_string = 0;
30217 }
30218 }
30219 else
30220 {
30221 if (for_decimal)
30222 fputs (for_decimal, file);
30223 fprintf (file, "%d", c);
30224
30225 for_string = "\n\t.byte \"";
30226 for_decimal = ", ";
30227 to_close = "\n";
30228 count_string = 0;
30229 }
30230 }
30231
30232 /* Now close the string if we have written one. Then end the line. */
30233 if (to_close)
30234 fputs (to_close, file);
30235 }
30236 \f
30237 /* Generate a unique section name for FILENAME for a section type
30238 represented by SECTION_DESC. Output goes into BUF.
30239
30240 SECTION_DESC can be any string, as long as it is different for each
30241 possible section type.
30242
30243 We name the section in the same manner as xlc. The name begins with an
30244 underscore followed by the filename (after stripping any leading directory
30245 names) with the last period replaced by the string SECTION_DESC. If
30246 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30247 the name. */
30248
30249 void
30250 rs6000_gen_section_name (char **buf, const char *filename,
30251 const char *section_desc)
30252 {
30253 const char *q, *after_last_slash, *last_period = 0;
30254 char *p;
30255 int len;
30256
30257 after_last_slash = filename;
30258 for (q = filename; *q; q++)
30259 {
30260 if (*q == '/')
30261 after_last_slash = q + 1;
30262 else if (*q == '.')
30263 last_period = q;
30264 }
30265
30266 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30267 *buf = (char *) xmalloc (len);
30268
30269 p = *buf;
30270 *p++ = '_';
30271
30272 for (q = after_last_slash; *q; q++)
30273 {
30274 if (q == last_period)
30275 {
30276 strcpy (p, section_desc);
30277 p += strlen (section_desc);
30278 break;
30279 }
30280
30281 else if (ISALNUM (*q))
30282 *p++ = *q;
30283 }
30284
30285 if (last_period == 0)
30286 strcpy (p, section_desc);
30287 else
30288 *p = '\0';
30289 }
30290 \f
30291 /* Emit profile function. */
30292
30293 void
30294 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30295 {
30296 /* Non-standard profiling for kernels, which just saves LR then calls
30297 _mcount without worrying about arg saves. The idea is to change
30298 the function prologue as little as possible as it isn't easy to
30299 account for arg save/restore code added just for _mcount. */
30300 if (TARGET_PROFILE_KERNEL)
30301 return;
30302
30303 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30304 {
30305 #ifndef NO_PROFILE_COUNTERS
30306 # define NO_PROFILE_COUNTERS 0
30307 #endif
30308 if (NO_PROFILE_COUNTERS)
30309 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30310 LCT_NORMAL, VOIDmode);
30311 else
30312 {
30313 char buf[30];
30314 const char *label_name;
30315 rtx fun;
30316
30317 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30318 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30319 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30320
30321 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30322 LCT_NORMAL, VOIDmode, fun, Pmode);
30323 }
30324 }
30325 else if (DEFAULT_ABI == ABI_DARWIN)
30326 {
30327 const char *mcount_name = RS6000_MCOUNT;
30328 int caller_addr_regno = LR_REGNO;
30329
30330 /* Be conservative and always set this, at least for now. */
30331 crtl->uses_pic_offset_table = 1;
30332
30333 #if TARGET_MACHO
30334 /* For PIC code, set up a stub and collect the caller's address
30335 from r0, which is where the prologue puts it. */
30336 if (MACHOPIC_INDIRECT
30337 && crtl->uses_pic_offset_table)
30338 caller_addr_regno = 0;
30339 #endif
30340 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30341 LCT_NORMAL, VOIDmode,
30342 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30343 }
30344 }
30345
30346 /* Write function profiler code. */
30347
30348 void
30349 output_function_profiler (FILE *file, int labelno)
30350 {
30351 char buf[100];
30352
30353 switch (DEFAULT_ABI)
30354 {
30355 default:
30356 gcc_unreachable ();
30357
30358 case ABI_V4:
30359 if (!TARGET_32BIT)
30360 {
30361 warning (0, "no profiling of 64-bit code for this ABI");
30362 return;
30363 }
30364 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30365 fprintf (file, "\tmflr %s\n", reg_names[0]);
30366 if (NO_PROFILE_COUNTERS)
30367 {
30368 asm_fprintf (file, "\tstw %s,4(%s)\n",
30369 reg_names[0], reg_names[1]);
30370 }
30371 else if (TARGET_SECURE_PLT && flag_pic)
30372 {
30373 if (TARGET_LINK_STACK)
30374 {
30375 char name[32];
30376 get_ppc476_thunk_name (name);
30377 asm_fprintf (file, "\tbl %s\n", name);
30378 }
30379 else
30380 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30381 asm_fprintf (file, "\tstw %s,4(%s)\n",
30382 reg_names[0], reg_names[1]);
30383 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30384 asm_fprintf (file, "\taddis %s,%s,",
30385 reg_names[12], reg_names[12]);
30386 assemble_name (file, buf);
30387 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30388 assemble_name (file, buf);
30389 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30390 }
30391 else if (flag_pic == 1)
30392 {
30393 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30394 asm_fprintf (file, "\tstw %s,4(%s)\n",
30395 reg_names[0], reg_names[1]);
30396 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30397 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30398 assemble_name (file, buf);
30399 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30400 }
30401 else if (flag_pic > 1)
30402 {
30403 asm_fprintf (file, "\tstw %s,4(%s)\n",
30404 reg_names[0], reg_names[1]);
30405 /* Now, we need to get the address of the label. */
30406 if (TARGET_LINK_STACK)
30407 {
30408 char name[32];
30409 get_ppc476_thunk_name (name);
30410 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30411 assemble_name (file, buf);
30412 fputs ("-.\n1:", file);
30413 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30414 asm_fprintf (file, "\taddi %s,%s,4\n",
30415 reg_names[11], reg_names[11]);
30416 }
30417 else
30418 {
30419 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30420 assemble_name (file, buf);
30421 fputs ("-.\n1:", file);
30422 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30423 }
30424 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30425 reg_names[0], reg_names[11]);
30426 asm_fprintf (file, "\tadd %s,%s,%s\n",
30427 reg_names[0], reg_names[0], reg_names[11]);
30428 }
30429 else
30430 {
30431 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30432 assemble_name (file, buf);
30433 fputs ("@ha\n", file);
30434 asm_fprintf (file, "\tstw %s,4(%s)\n",
30435 reg_names[0], reg_names[1]);
30436 asm_fprintf (file, "\tla %s,", reg_names[0]);
30437 assemble_name (file, buf);
30438 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30439 }
30440
30441 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30442 fprintf (file, "\tbl %s%s\n",
30443 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30444 break;
30445
30446 case ABI_AIX:
30447 case ABI_ELFv2:
30448 case ABI_DARWIN:
30449 /* Don't do anything, done in output_profile_hook (). */
30450 break;
30451 }
30452 }
30453
30454 \f
30455
30456 /* The following variable value is the last issued insn. */
30457
30458 static rtx_insn *last_scheduled_insn;
30459
30460 /* The following variable helps to balance issuing of load and
30461 store instructions */
30462
30463 static int load_store_pendulum;
30464
30465 /* The following variable helps pair divide insns during scheduling. */
30466 static int divide_cnt;
30467 /* The following variable helps pair and alternate vector and vector load
30468 insns during scheduling. */
30469 static int vec_pairing;
30470
30471
30472 /* Power4 load update and store update instructions are cracked into a
30473 load or store and an integer insn which are executed in the same cycle.
30474 Branches have their own dispatch slot which does not count against the
30475 GCC issue rate, but it changes the program flow so there are no other
30476 instructions to issue in this cycle. */
30477
30478 static int
30479 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30480 {
30481 last_scheduled_insn = insn;
30482 if (GET_CODE (PATTERN (insn)) == USE
30483 || GET_CODE (PATTERN (insn)) == CLOBBER)
30484 {
30485 cached_can_issue_more = more;
30486 return cached_can_issue_more;
30487 }
30488
30489 if (insn_terminates_group_p (insn, current_group))
30490 {
30491 cached_can_issue_more = 0;
30492 return cached_can_issue_more;
30493 }
30494
30495 /* If no reservation, but reach here */
30496 if (recog_memoized (insn) < 0)
30497 return more;
30498
30499 if (rs6000_sched_groups)
30500 {
30501 if (is_microcoded_insn (insn))
30502 cached_can_issue_more = 0;
30503 else if (is_cracked_insn (insn))
30504 cached_can_issue_more = more > 2 ? more - 2 : 0;
30505 else
30506 cached_can_issue_more = more - 1;
30507
30508 return cached_can_issue_more;
30509 }
30510
30511 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30512 return 0;
30513
30514 cached_can_issue_more = more - 1;
30515 return cached_can_issue_more;
30516 }
30517
30518 static int
30519 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30520 {
30521 int r = rs6000_variable_issue_1 (insn, more);
30522 if (verbose)
30523 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30524 return r;
30525 }
30526
30527 /* Adjust the cost of a scheduling dependency. Return the new cost of
30528 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30529
30530 static int
30531 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30532 unsigned int)
30533 {
30534 enum attr_type attr_type;
30535
30536 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30537 return cost;
30538
30539 switch (dep_type)
30540 {
30541 case REG_DEP_TRUE:
30542 {
30543 /* Data dependency; DEP_INSN writes a register that INSN reads
30544 some cycles later. */
30545
30546 /* Separate a load from a narrower, dependent store. */
30547 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30548 && GET_CODE (PATTERN (insn)) == SET
30549 && GET_CODE (PATTERN (dep_insn)) == SET
30550 && MEM_P (XEXP (PATTERN (insn), 1))
30551 && MEM_P (XEXP (PATTERN (dep_insn), 0))
30552 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30553 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30554 return cost + 14;
30555
30556 attr_type = get_attr_type (insn);
30557
30558 switch (attr_type)
30559 {
30560 case TYPE_JMPREG:
30561 /* Tell the first scheduling pass about the latency between
30562 a mtctr and bctr (and mtlr and br/blr). The first
30563 scheduling pass will not know about this latency since
30564 the mtctr instruction, which has the latency associated
30565 to it, will be generated by reload. */
30566 return 4;
30567 case TYPE_BRANCH:
30568 /* Leave some extra cycles between a compare and its
30569 dependent branch, to inhibit expensive mispredicts. */
30570 if ((rs6000_tune == PROCESSOR_PPC603
30571 || rs6000_tune == PROCESSOR_PPC604
30572 || rs6000_tune == PROCESSOR_PPC604e
30573 || rs6000_tune == PROCESSOR_PPC620
30574 || rs6000_tune == PROCESSOR_PPC630
30575 || rs6000_tune == PROCESSOR_PPC750
30576 || rs6000_tune == PROCESSOR_PPC7400
30577 || rs6000_tune == PROCESSOR_PPC7450
30578 || rs6000_tune == PROCESSOR_PPCE5500
30579 || rs6000_tune == PROCESSOR_PPCE6500
30580 || rs6000_tune == PROCESSOR_POWER4
30581 || rs6000_tune == PROCESSOR_POWER5
30582 || rs6000_tune == PROCESSOR_POWER7
30583 || rs6000_tune == PROCESSOR_POWER8
30584 || rs6000_tune == PROCESSOR_POWER9
30585 || rs6000_tune == PROCESSOR_CELL)
30586 && recog_memoized (dep_insn)
30587 && (INSN_CODE (dep_insn) >= 0))
30588
30589 switch (get_attr_type (dep_insn))
30590 {
30591 case TYPE_CMP:
30592 case TYPE_FPCOMPARE:
30593 case TYPE_CR_LOGICAL:
30594 return cost + 2;
30595 case TYPE_EXTS:
30596 case TYPE_MUL:
30597 if (get_attr_dot (dep_insn) == DOT_YES)
30598 return cost + 2;
30599 else
30600 break;
30601 case TYPE_SHIFT:
30602 if (get_attr_dot (dep_insn) == DOT_YES
30603 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30604 return cost + 2;
30605 else
30606 break;
30607 default:
30608 break;
30609 }
30610 break;
30611
30612 case TYPE_STORE:
30613 case TYPE_FPSTORE:
30614 if ((rs6000_tune == PROCESSOR_POWER6)
30615 && recog_memoized (dep_insn)
30616 && (INSN_CODE (dep_insn) >= 0))
30617 {
30618
30619 if (GET_CODE (PATTERN (insn)) != SET)
30620 /* If this happens, we have to extend this to schedule
30621 optimally. Return default for now. */
30622 return cost;
30623
30624 /* Adjust the cost for the case where the value written
30625 by a fixed point operation is used as the address
30626 gen value on a store. */
30627 switch (get_attr_type (dep_insn))
30628 {
30629 case TYPE_LOAD:
30630 case TYPE_CNTLZ:
30631 {
30632 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30633 return get_attr_sign_extend (dep_insn)
30634 == SIGN_EXTEND_YES ? 6 : 4;
30635 break;
30636 }
30637 case TYPE_SHIFT:
30638 {
30639 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30640 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30641 6 : 3;
30642 break;
30643 }
30644 case TYPE_INTEGER:
30645 case TYPE_ADD:
30646 case TYPE_LOGICAL:
30647 case TYPE_EXTS:
30648 case TYPE_INSERT:
30649 {
30650 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30651 return 3;
30652 break;
30653 }
30654 case TYPE_STORE:
30655 case TYPE_FPLOAD:
30656 case TYPE_FPSTORE:
30657 {
30658 if (get_attr_update (dep_insn) == UPDATE_YES
30659 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30660 return 3;
30661 break;
30662 }
30663 case TYPE_MUL:
30664 {
30665 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30666 return 17;
30667 break;
30668 }
30669 case TYPE_DIV:
30670 {
30671 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30672 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30673 break;
30674 }
30675 default:
30676 break;
30677 }
30678 }
30679 break;
30680
30681 case TYPE_LOAD:
30682 if ((rs6000_tune == PROCESSOR_POWER6)
30683 && recog_memoized (dep_insn)
30684 && (INSN_CODE (dep_insn) >= 0))
30685 {
30686
30687 /* Adjust the cost for the case where the value written
30688 by a fixed point instruction is used within the address
30689 gen portion of a subsequent load(u)(x) */
30690 switch (get_attr_type (dep_insn))
30691 {
30692 case TYPE_LOAD:
30693 case TYPE_CNTLZ:
30694 {
30695 if (set_to_load_agen (dep_insn, insn))
30696 return get_attr_sign_extend (dep_insn)
30697 == SIGN_EXTEND_YES ? 6 : 4;
30698 break;
30699 }
30700 case TYPE_SHIFT:
30701 {
30702 if (set_to_load_agen (dep_insn, insn))
30703 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30704 6 : 3;
30705 break;
30706 }
30707 case TYPE_INTEGER:
30708 case TYPE_ADD:
30709 case TYPE_LOGICAL:
30710 case TYPE_EXTS:
30711 case TYPE_INSERT:
30712 {
30713 if (set_to_load_agen (dep_insn, insn))
30714 return 3;
30715 break;
30716 }
30717 case TYPE_STORE:
30718 case TYPE_FPLOAD:
30719 case TYPE_FPSTORE:
30720 {
30721 if (get_attr_update (dep_insn) == UPDATE_YES
30722 && set_to_load_agen (dep_insn, insn))
30723 return 3;
30724 break;
30725 }
30726 case TYPE_MUL:
30727 {
30728 if (set_to_load_agen (dep_insn, insn))
30729 return 17;
30730 break;
30731 }
30732 case TYPE_DIV:
30733 {
30734 if (set_to_load_agen (dep_insn, insn))
30735 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30736 break;
30737 }
30738 default:
30739 break;
30740 }
30741 }
30742 break;
30743
30744 case TYPE_FPLOAD:
30745 if ((rs6000_tune == PROCESSOR_POWER6)
30746 && get_attr_update (insn) == UPDATE_NO
30747 && recog_memoized (dep_insn)
30748 && (INSN_CODE (dep_insn) >= 0)
30749 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30750 return 2;
30751
30752 default:
30753 break;
30754 }
30755
30756 /* Fall out to return default cost. */
30757 }
30758 break;
30759
30760 case REG_DEP_OUTPUT:
30761 /* Output dependency; DEP_INSN writes a register that INSN writes some
30762 cycles later. */
30763 if ((rs6000_tune == PROCESSOR_POWER6)
30764 && recog_memoized (dep_insn)
30765 && (INSN_CODE (dep_insn) >= 0))
30766 {
30767 attr_type = get_attr_type (insn);
30768
30769 switch (attr_type)
30770 {
30771 case TYPE_FP:
30772 case TYPE_FPSIMPLE:
30773 if (get_attr_type (dep_insn) == TYPE_FP
30774 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30775 return 1;
30776 break;
30777 case TYPE_FPLOAD:
30778 if (get_attr_update (insn) == UPDATE_NO
30779 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30780 return 2;
30781 break;
30782 default:
30783 break;
30784 }
30785 }
30786 /* Fall through, no cost for output dependency. */
30787 /* FALLTHRU */
30788
30789 case REG_DEP_ANTI:
30790 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30791 cycles later. */
30792 return 0;
30793
30794 default:
30795 gcc_unreachable ();
30796 }
30797
30798 return cost;
30799 }
30800
30801 /* Debug version of rs6000_adjust_cost. */
30802
30803 static int
30804 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30805 int cost, unsigned int dw)
30806 {
30807 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30808
30809 if (ret != cost)
30810 {
30811 const char *dep;
30812
30813 switch (dep_type)
30814 {
30815 default: dep = "unknown depencency"; break;
30816 case REG_DEP_TRUE: dep = "data dependency"; break;
30817 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30818 case REG_DEP_ANTI: dep = "anti depencency"; break;
30819 }
30820
30821 fprintf (stderr,
30822 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30823 "%s, insn:\n", ret, cost, dep);
30824
30825 debug_rtx (insn);
30826 }
30827
30828 return ret;
30829 }
30830
30831 /* The function returns a true if INSN is microcoded.
30832 Return false otherwise. */
30833
30834 static bool
30835 is_microcoded_insn (rtx_insn *insn)
30836 {
30837 if (!insn || !NONDEBUG_INSN_P (insn)
30838 || GET_CODE (PATTERN (insn)) == USE
30839 || GET_CODE (PATTERN (insn)) == CLOBBER)
30840 return false;
30841
30842 if (rs6000_tune == PROCESSOR_CELL)
30843 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30844
30845 if (rs6000_sched_groups
30846 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30847 {
30848 enum attr_type type = get_attr_type (insn);
30849 if ((type == TYPE_LOAD
30850 && get_attr_update (insn) == UPDATE_YES
30851 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30852 || ((type == TYPE_LOAD || type == TYPE_STORE)
30853 && get_attr_update (insn) == UPDATE_YES
30854 && get_attr_indexed (insn) == INDEXED_YES)
30855 || type == TYPE_MFCR)
30856 return true;
30857 }
30858
30859 return false;
30860 }
30861
30862 /* The function returns true if INSN is cracked into 2 instructions
30863 by the processor (and therefore occupies 2 issue slots). */
30864
30865 static bool
30866 is_cracked_insn (rtx_insn *insn)
30867 {
30868 if (!insn || !NONDEBUG_INSN_P (insn)
30869 || GET_CODE (PATTERN (insn)) == USE
30870 || GET_CODE (PATTERN (insn)) == CLOBBER)
30871 return false;
30872
30873 if (rs6000_sched_groups
30874 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30875 {
30876 enum attr_type type = get_attr_type (insn);
30877 if ((type == TYPE_LOAD
30878 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30879 && get_attr_update (insn) == UPDATE_NO)
30880 || (type == TYPE_LOAD
30881 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30882 && get_attr_update (insn) == UPDATE_YES
30883 && get_attr_indexed (insn) == INDEXED_NO)
30884 || (type == TYPE_STORE
30885 && get_attr_update (insn) == UPDATE_YES
30886 && get_attr_indexed (insn) == INDEXED_NO)
30887 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30888 && get_attr_update (insn) == UPDATE_YES)
30889 || (type == TYPE_CR_LOGICAL
30890 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30891 || (type == TYPE_EXTS
30892 && get_attr_dot (insn) == DOT_YES)
30893 || (type == TYPE_SHIFT
30894 && get_attr_dot (insn) == DOT_YES
30895 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30896 || (type == TYPE_MUL
30897 && get_attr_dot (insn) == DOT_YES)
30898 || type == TYPE_DIV
30899 || (type == TYPE_INSERT
30900 && get_attr_size (insn) == SIZE_32))
30901 return true;
30902 }
30903
30904 return false;
30905 }
30906
30907 /* The function returns true if INSN can be issued only from
30908 the branch slot. */
30909
30910 static bool
30911 is_branch_slot_insn (rtx_insn *insn)
30912 {
30913 if (!insn || !NONDEBUG_INSN_P (insn)
30914 || GET_CODE (PATTERN (insn)) == USE
30915 || GET_CODE (PATTERN (insn)) == CLOBBER)
30916 return false;
30917
30918 if (rs6000_sched_groups)
30919 {
30920 enum attr_type type = get_attr_type (insn);
30921 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30922 return true;
30923 return false;
30924 }
30925
30926 return false;
30927 }
30928
30929 /* The function returns true if out_inst sets a value that is
30930 used in the address generation computation of in_insn */
30931 static bool
30932 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30933 {
30934 rtx out_set, in_set;
30935
30936 /* For performance reasons, only handle the simple case where
30937 both loads are a single_set. */
30938 out_set = single_set (out_insn);
30939 if (out_set)
30940 {
30941 in_set = single_set (in_insn);
30942 if (in_set)
30943 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30944 }
30945
30946 return false;
30947 }
30948
30949 /* Try to determine base/offset/size parts of the given MEM.
30950 Return true if successful, false if all the values couldn't
30951 be determined.
30952
30953 This function only looks for REG or REG+CONST address forms.
30954 REG+REG address form will return false. */
30955
30956 static bool
30957 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30958 HOST_WIDE_INT *size)
30959 {
30960 rtx addr_rtx;
30961 if MEM_SIZE_KNOWN_P (mem)
30962 *size = MEM_SIZE (mem);
30963 else
30964 return false;
30965
30966 addr_rtx = (XEXP (mem, 0));
30967 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30968 addr_rtx = XEXP (addr_rtx, 1);
30969
30970 *offset = 0;
30971 while (GET_CODE (addr_rtx) == PLUS
30972 && CONST_INT_P (XEXP (addr_rtx, 1)))
30973 {
30974 *offset += INTVAL (XEXP (addr_rtx, 1));
30975 addr_rtx = XEXP (addr_rtx, 0);
30976 }
30977 if (!REG_P (addr_rtx))
30978 return false;
30979
30980 *base = addr_rtx;
30981 return true;
30982 }
30983
30984 /* The function returns true if the target storage location of
30985 mem1 is adjacent to the target storage location of mem2 */
30986 /* Return 1 if memory locations are adjacent. */
30987
30988 static bool
30989 adjacent_mem_locations (rtx mem1, rtx mem2)
30990 {
30991 rtx reg1, reg2;
30992 HOST_WIDE_INT off1, size1, off2, size2;
30993
30994 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30995 && get_memref_parts (mem2, &reg2, &off2, &size2))
30996 return ((REGNO (reg1) == REGNO (reg2))
30997 && ((off1 + size1 == off2)
30998 || (off2 + size2 == off1)));
30999
31000 return false;
31001 }
31002
31003 /* This function returns true if it can be determined that the two MEM
31004 locations overlap by at least 1 byte based on base reg/offset/size. */
31005
31006 static bool
31007 mem_locations_overlap (rtx mem1, rtx mem2)
31008 {
31009 rtx reg1, reg2;
31010 HOST_WIDE_INT off1, size1, off2, size2;
31011
31012 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31013 && get_memref_parts (mem2, &reg2, &off2, &size2))
31014 return ((REGNO (reg1) == REGNO (reg2))
31015 && (((off1 <= off2) && (off1 + size1 > off2))
31016 || ((off2 <= off1) && (off2 + size2 > off1))));
31017
31018 return false;
31019 }
31020
31021 /* A C statement (sans semicolon) to update the integer scheduling
31022 priority INSN_PRIORITY (INSN). Increase the priority to execute the
31023 INSN earlier, reduce the priority to execute INSN later. Do not
31024 define this macro if you do not need to adjust the scheduling
31025 priorities of insns. */
31026
31027 static int
31028 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
31029 {
31030 rtx load_mem, str_mem;
31031 /* On machines (like the 750) which have asymmetric integer units,
31032 where one integer unit can do multiply and divides and the other
31033 can't, reduce the priority of multiply/divide so it is scheduled
31034 before other integer operations. */
31035
31036 #if 0
31037 if (! INSN_P (insn))
31038 return priority;
31039
31040 if (GET_CODE (PATTERN (insn)) == USE)
31041 return priority;
31042
31043 switch (rs6000_tune) {
31044 case PROCESSOR_PPC750:
31045 switch (get_attr_type (insn))
31046 {
31047 default:
31048 break;
31049
31050 case TYPE_MUL:
31051 case TYPE_DIV:
31052 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
31053 priority, priority);
31054 if (priority >= 0 && priority < 0x01000000)
31055 priority >>= 3;
31056 break;
31057 }
31058 }
31059 #endif
31060
31061 if (insn_must_be_first_in_group (insn)
31062 && reload_completed
31063 && current_sched_info->sched_max_insns_priority
31064 && rs6000_sched_restricted_insns_priority)
31065 {
31066
31067 /* Prioritize insns that can be dispatched only in the first
31068 dispatch slot. */
31069 if (rs6000_sched_restricted_insns_priority == 1)
31070 /* Attach highest priority to insn. This means that in
31071 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31072 precede 'priority' (critical path) considerations. */
31073 return current_sched_info->sched_max_insns_priority;
31074 else if (rs6000_sched_restricted_insns_priority == 2)
31075 /* Increase priority of insn by a minimal amount. This means that in
31076 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31077 considerations precede dispatch-slot restriction considerations. */
31078 return (priority + 1);
31079 }
31080
31081 if (rs6000_tune == PROCESSOR_POWER6
31082 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
31083 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
31084 /* Attach highest priority to insn if the scheduler has just issued two
31085 stores and this instruction is a load, or two loads and this instruction
31086 is a store. Power6 wants loads and stores scheduled alternately
31087 when possible */
31088 return current_sched_info->sched_max_insns_priority;
31089
31090 return priority;
31091 }
31092
31093 /* Return true if the instruction is nonpipelined on the Cell. */
31094 static bool
31095 is_nonpipeline_insn (rtx_insn *insn)
31096 {
31097 enum attr_type type;
31098 if (!insn || !NONDEBUG_INSN_P (insn)
31099 || GET_CODE (PATTERN (insn)) == USE
31100 || GET_CODE (PATTERN (insn)) == CLOBBER)
31101 return false;
31102
31103 type = get_attr_type (insn);
31104 if (type == TYPE_MUL
31105 || type == TYPE_DIV
31106 || type == TYPE_SDIV
31107 || type == TYPE_DDIV
31108 || type == TYPE_SSQRT
31109 || type == TYPE_DSQRT
31110 || type == TYPE_MFCR
31111 || type == TYPE_MFCRF
31112 || type == TYPE_MFJMPR)
31113 {
31114 return true;
31115 }
31116 return false;
31117 }
31118
31119
31120 /* Return how many instructions the machine can issue per cycle. */
31121
31122 static int
31123 rs6000_issue_rate (void)
31124 {
31125 /* Unless scheduling for register pressure, use issue rate of 1 for
31126 first scheduling pass to decrease degradation. */
31127 if (!reload_completed && !flag_sched_pressure)
31128 return 1;
31129
31130 switch (rs6000_tune) {
31131 case PROCESSOR_RS64A:
31132 case PROCESSOR_PPC601: /* ? */
31133 case PROCESSOR_PPC7450:
31134 return 3;
31135 case PROCESSOR_PPC440:
31136 case PROCESSOR_PPC603:
31137 case PROCESSOR_PPC750:
31138 case PROCESSOR_PPC7400:
31139 case PROCESSOR_PPC8540:
31140 case PROCESSOR_PPC8548:
31141 case PROCESSOR_CELL:
31142 case PROCESSOR_PPCE300C2:
31143 case PROCESSOR_PPCE300C3:
31144 case PROCESSOR_PPCE500MC:
31145 case PROCESSOR_PPCE500MC64:
31146 case PROCESSOR_PPCE5500:
31147 case PROCESSOR_PPCE6500:
31148 case PROCESSOR_TITAN:
31149 return 2;
31150 case PROCESSOR_PPC476:
31151 case PROCESSOR_PPC604:
31152 case PROCESSOR_PPC604e:
31153 case PROCESSOR_PPC620:
31154 case PROCESSOR_PPC630:
31155 return 4;
31156 case PROCESSOR_POWER4:
31157 case PROCESSOR_POWER5:
31158 case PROCESSOR_POWER6:
31159 case PROCESSOR_POWER7:
31160 return 5;
31161 case PROCESSOR_POWER8:
31162 return 7;
31163 case PROCESSOR_POWER9:
31164 return 6;
31165 default:
31166 return 1;
31167 }
31168 }
31169
31170 /* Return how many instructions to look ahead for better insn
31171 scheduling. */
31172
31173 static int
31174 rs6000_use_sched_lookahead (void)
31175 {
31176 switch (rs6000_tune)
31177 {
31178 case PROCESSOR_PPC8540:
31179 case PROCESSOR_PPC8548:
31180 return 4;
31181
31182 case PROCESSOR_CELL:
31183 return (reload_completed ? 8 : 0);
31184
31185 default:
31186 return 0;
31187 }
31188 }
31189
31190 /* We are choosing insn from the ready queue. Return zero if INSN can be
31191 chosen. */
31192 static int
31193 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31194 {
31195 if (ready_index == 0)
31196 return 0;
31197
31198 if (rs6000_tune != PROCESSOR_CELL)
31199 return 0;
31200
31201 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31202
31203 if (!reload_completed
31204 || is_nonpipeline_insn (insn)
31205 || is_microcoded_insn (insn))
31206 return 1;
31207
31208 return 0;
31209 }
31210
31211 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31212 and return true. */
31213
31214 static bool
31215 find_mem_ref (rtx pat, rtx *mem_ref)
31216 {
31217 const char * fmt;
31218 int i, j;
31219
31220 /* stack_tie does not produce any real memory traffic. */
31221 if (tie_operand (pat, VOIDmode))
31222 return false;
31223
31224 if (MEM_P (pat))
31225 {
31226 *mem_ref = pat;
31227 return true;
31228 }
31229
31230 /* Recursively process the pattern. */
31231 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31232
31233 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31234 {
31235 if (fmt[i] == 'e')
31236 {
31237 if (find_mem_ref (XEXP (pat, i), mem_ref))
31238 return true;
31239 }
31240 else if (fmt[i] == 'E')
31241 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31242 {
31243 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31244 return true;
31245 }
31246 }
31247
31248 return false;
31249 }
31250
31251 /* Determine if PAT is a PATTERN of a load insn. */
31252
31253 static bool
31254 is_load_insn1 (rtx pat, rtx *load_mem)
31255 {
31256 if (!pat || pat == NULL_RTX)
31257 return false;
31258
31259 if (GET_CODE (pat) == SET)
31260 return find_mem_ref (SET_SRC (pat), load_mem);
31261
31262 if (GET_CODE (pat) == PARALLEL)
31263 {
31264 int i;
31265
31266 for (i = 0; i < XVECLEN (pat, 0); i++)
31267 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31268 return true;
31269 }
31270
31271 return false;
31272 }
31273
31274 /* Determine if INSN loads from memory. */
31275
31276 static bool
31277 is_load_insn (rtx insn, rtx *load_mem)
31278 {
31279 if (!insn || !INSN_P (insn))
31280 return false;
31281
31282 if (CALL_P (insn))
31283 return false;
31284
31285 return is_load_insn1 (PATTERN (insn), load_mem);
31286 }
31287
31288 /* Determine if PAT is a PATTERN of a store insn. */
31289
31290 static bool
31291 is_store_insn1 (rtx pat, rtx *str_mem)
31292 {
31293 if (!pat || pat == NULL_RTX)
31294 return false;
31295
31296 if (GET_CODE (pat) == SET)
31297 return find_mem_ref (SET_DEST (pat), str_mem);
31298
31299 if (GET_CODE (pat) == PARALLEL)
31300 {
31301 int i;
31302
31303 for (i = 0; i < XVECLEN (pat, 0); i++)
31304 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31305 return true;
31306 }
31307
31308 return false;
31309 }
31310
31311 /* Determine if INSN stores to memory. */
31312
31313 static bool
31314 is_store_insn (rtx insn, rtx *str_mem)
31315 {
31316 if (!insn || !INSN_P (insn))
31317 return false;
31318
31319 return is_store_insn1 (PATTERN (insn), str_mem);
31320 }
31321
31322 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31323
31324 static bool
31325 is_power9_pairable_vec_type (enum attr_type type)
31326 {
31327 switch (type)
31328 {
31329 case TYPE_VECSIMPLE:
31330 case TYPE_VECCOMPLEX:
31331 case TYPE_VECDIV:
31332 case TYPE_VECCMP:
31333 case TYPE_VECPERM:
31334 case TYPE_VECFLOAT:
31335 case TYPE_VECFDIV:
31336 case TYPE_VECDOUBLE:
31337 return true;
31338 default:
31339 break;
31340 }
31341 return false;
31342 }
31343
31344 /* Returns whether the dependence between INSN and NEXT is considered
31345 costly by the given target. */
31346
31347 static bool
31348 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31349 {
31350 rtx insn;
31351 rtx next;
31352 rtx load_mem, str_mem;
31353
31354 /* If the flag is not enabled - no dependence is considered costly;
31355 allow all dependent insns in the same group.
31356 This is the most aggressive option. */
31357 if (rs6000_sched_costly_dep == no_dep_costly)
31358 return false;
31359
31360 /* If the flag is set to 1 - a dependence is always considered costly;
31361 do not allow dependent instructions in the same group.
31362 This is the most conservative option. */
31363 if (rs6000_sched_costly_dep == all_deps_costly)
31364 return true;
31365
31366 insn = DEP_PRO (dep);
31367 next = DEP_CON (dep);
31368
31369 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31370 && is_load_insn (next, &load_mem)
31371 && is_store_insn (insn, &str_mem))
31372 /* Prevent load after store in the same group. */
31373 return true;
31374
31375 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31376 && is_load_insn (next, &load_mem)
31377 && is_store_insn (insn, &str_mem)
31378 && DEP_TYPE (dep) == REG_DEP_TRUE
31379 && mem_locations_overlap(str_mem, load_mem))
31380 /* Prevent load after store in the same group if it is a true
31381 dependence. */
31382 return true;
31383
31384 /* The flag is set to X; dependences with latency >= X are considered costly,
31385 and will not be scheduled in the same group. */
31386 if (rs6000_sched_costly_dep <= max_dep_latency
31387 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31388 return true;
31389
31390 return false;
31391 }
31392
31393 /* Return the next insn after INSN that is found before TAIL is reached,
31394 skipping any "non-active" insns - insns that will not actually occupy
31395 an issue slot. Return NULL_RTX if such an insn is not found. */
31396
31397 static rtx_insn *
31398 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31399 {
31400 if (insn == NULL_RTX || insn == tail)
31401 return NULL;
31402
31403 while (1)
31404 {
31405 insn = NEXT_INSN (insn);
31406 if (insn == NULL_RTX || insn == tail)
31407 return NULL;
31408
31409 if (CALL_P (insn)
31410 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31411 || (NONJUMP_INSN_P (insn)
31412 && GET_CODE (PATTERN (insn)) != USE
31413 && GET_CODE (PATTERN (insn)) != CLOBBER
31414 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31415 break;
31416 }
31417 return insn;
31418 }
31419
31420 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31421
31422 static int
31423 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31424 {
31425 int pos;
31426 int i;
31427 rtx_insn *tmp;
31428 enum attr_type type, type2;
31429
31430 type = get_attr_type (last_scheduled_insn);
31431
31432 /* Try to issue fixed point divides back-to-back in pairs so they will be
31433 routed to separate execution units and execute in parallel. */
31434 if (type == TYPE_DIV && divide_cnt == 0)
31435 {
31436 /* First divide has been scheduled. */
31437 divide_cnt = 1;
31438
31439 /* Scan the ready list looking for another divide, if found move it
31440 to the end of the list so it is chosen next. */
31441 pos = lastpos;
31442 while (pos >= 0)
31443 {
31444 if (recog_memoized (ready[pos]) >= 0
31445 && get_attr_type (ready[pos]) == TYPE_DIV)
31446 {
31447 tmp = ready[pos];
31448 for (i = pos; i < lastpos; i++)
31449 ready[i] = ready[i + 1];
31450 ready[lastpos] = tmp;
31451 break;
31452 }
31453 pos--;
31454 }
31455 }
31456 else
31457 {
31458 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31459 divide_cnt = 0;
31460
31461 /* The best dispatch throughput for vector and vector load insns can be
31462 achieved by interleaving a vector and vector load such that they'll
31463 dispatch to the same superslice. If this pairing cannot be achieved
31464 then it is best to pair vector insns together and vector load insns
31465 together.
31466
31467 To aid in this pairing, vec_pairing maintains the current state with
31468 the following values:
31469
31470 0 : Initial state, no vecload/vector pairing has been started.
31471
31472 1 : A vecload or vector insn has been issued and a candidate for
31473 pairing has been found and moved to the end of the ready
31474 list. */
31475 if (type == TYPE_VECLOAD)
31476 {
31477 /* Issued a vecload. */
31478 if (vec_pairing == 0)
31479 {
31480 int vecload_pos = -1;
31481 /* We issued a single vecload, look for a vector insn to pair it
31482 with. If one isn't found, try to pair another vecload. */
31483 pos = lastpos;
31484 while (pos >= 0)
31485 {
31486 if (recog_memoized (ready[pos]) >= 0)
31487 {
31488 type2 = get_attr_type (ready[pos]);
31489 if (is_power9_pairable_vec_type (type2))
31490 {
31491 /* Found a vector insn to pair with, move it to the
31492 end of the ready list so it is scheduled next. */
31493 tmp = ready[pos];
31494 for (i = pos; i < lastpos; i++)
31495 ready[i] = ready[i + 1];
31496 ready[lastpos] = tmp;
31497 vec_pairing = 1;
31498 return cached_can_issue_more;
31499 }
31500 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31501 /* Remember position of first vecload seen. */
31502 vecload_pos = pos;
31503 }
31504 pos--;
31505 }
31506 if (vecload_pos >= 0)
31507 {
31508 /* Didn't find a vector to pair with but did find a vecload,
31509 move it to the end of the ready list. */
31510 tmp = ready[vecload_pos];
31511 for (i = vecload_pos; i < lastpos; i++)
31512 ready[i] = ready[i + 1];
31513 ready[lastpos] = tmp;
31514 vec_pairing = 1;
31515 return cached_can_issue_more;
31516 }
31517 }
31518 }
31519 else if (is_power9_pairable_vec_type (type))
31520 {
31521 /* Issued a vector operation. */
31522 if (vec_pairing == 0)
31523 {
31524 int vec_pos = -1;
31525 /* We issued a single vector insn, look for a vecload to pair it
31526 with. If one isn't found, try to pair another vector. */
31527 pos = lastpos;
31528 while (pos >= 0)
31529 {
31530 if (recog_memoized (ready[pos]) >= 0)
31531 {
31532 type2 = get_attr_type (ready[pos]);
31533 if (type2 == TYPE_VECLOAD)
31534 {
31535 /* Found a vecload insn to pair with, move it to the
31536 end of the ready list so it is scheduled next. */
31537 tmp = ready[pos];
31538 for (i = pos; i < lastpos; i++)
31539 ready[i] = ready[i + 1];
31540 ready[lastpos] = tmp;
31541 vec_pairing = 1;
31542 return cached_can_issue_more;
31543 }
31544 else if (is_power9_pairable_vec_type (type2)
31545 && vec_pos == -1)
31546 /* Remember position of first vector insn seen. */
31547 vec_pos = pos;
31548 }
31549 pos--;
31550 }
31551 if (vec_pos >= 0)
31552 {
31553 /* Didn't find a vecload to pair with but did find a vector
31554 insn, move it to the end of the ready list. */
31555 tmp = ready[vec_pos];
31556 for (i = vec_pos; i < lastpos; i++)
31557 ready[i] = ready[i + 1];
31558 ready[lastpos] = tmp;
31559 vec_pairing = 1;
31560 return cached_can_issue_more;
31561 }
31562 }
31563 }
31564
31565 /* We've either finished a vec/vecload pair, couldn't find an insn to
31566 continue the current pair, or the last insn had nothing to do with
31567 with pairing. In any case, reset the state. */
31568 vec_pairing = 0;
31569 }
31570
31571 return cached_can_issue_more;
31572 }
31573
31574 /* We are about to begin issuing insns for this clock cycle. */
31575
31576 static int
31577 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31578 rtx_insn **ready ATTRIBUTE_UNUSED,
31579 int *pn_ready ATTRIBUTE_UNUSED,
31580 int clock_var ATTRIBUTE_UNUSED)
31581 {
31582 int n_ready = *pn_ready;
31583
31584 if (sched_verbose)
31585 fprintf (dump, "// rs6000_sched_reorder :\n");
31586
31587 /* Reorder the ready list, if the second to last ready insn
31588 is a nonepipeline insn. */
31589 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31590 {
31591 if (is_nonpipeline_insn (ready[n_ready - 1])
31592 && (recog_memoized (ready[n_ready - 2]) > 0))
31593 /* Simply swap first two insns. */
31594 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31595 }
31596
31597 if (rs6000_tune == PROCESSOR_POWER6)
31598 load_store_pendulum = 0;
31599
31600 return rs6000_issue_rate ();
31601 }
31602
31603 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31604
31605 static int
31606 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31607 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31608 {
31609 if (sched_verbose)
31610 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31611
31612 /* For Power6, we need to handle some special cases to try and keep the
31613 store queue from overflowing and triggering expensive flushes.
31614
31615 This code monitors how load and store instructions are being issued
31616 and skews the ready list one way or the other to increase the likelihood
31617 that a desired instruction is issued at the proper time.
31618
31619 A couple of things are done. First, we maintain a "load_store_pendulum"
31620 to track the current state of load/store issue.
31621
31622 - If the pendulum is at zero, then no loads or stores have been
31623 issued in the current cycle so we do nothing.
31624
31625 - If the pendulum is 1, then a single load has been issued in this
31626 cycle and we attempt to locate another load in the ready list to
31627 issue with it.
31628
31629 - If the pendulum is -2, then two stores have already been
31630 issued in this cycle, so we increase the priority of the first load
31631 in the ready list to increase it's likelihood of being chosen first
31632 in the next cycle.
31633
31634 - If the pendulum is -1, then a single store has been issued in this
31635 cycle and we attempt to locate another store in the ready list to
31636 issue with it, preferring a store to an adjacent memory location to
31637 facilitate store pairing in the store queue.
31638
31639 - If the pendulum is 2, then two loads have already been
31640 issued in this cycle, so we increase the priority of the first store
31641 in the ready list to increase it's likelihood of being chosen first
31642 in the next cycle.
31643
31644 - If the pendulum < -2 or > 2, then do nothing.
31645
31646 Note: This code covers the most common scenarios. There exist non
31647 load/store instructions which make use of the LSU and which
31648 would need to be accounted for to strictly model the behavior
31649 of the machine. Those instructions are currently unaccounted
31650 for to help minimize compile time overhead of this code.
31651 */
31652 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31653 {
31654 int pos;
31655 int i;
31656 rtx_insn *tmp;
31657 rtx load_mem, str_mem;
31658
31659 if (is_store_insn (last_scheduled_insn, &str_mem))
31660 /* Issuing a store, swing the load_store_pendulum to the left */
31661 load_store_pendulum--;
31662 else if (is_load_insn (last_scheduled_insn, &load_mem))
31663 /* Issuing a load, swing the load_store_pendulum to the right */
31664 load_store_pendulum++;
31665 else
31666 return cached_can_issue_more;
31667
31668 /* If the pendulum is balanced, or there is only one instruction on
31669 the ready list, then all is well, so return. */
31670 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31671 return cached_can_issue_more;
31672
31673 if (load_store_pendulum == 1)
31674 {
31675 /* A load has been issued in this cycle. Scan the ready list
31676 for another load to issue with it */
31677 pos = *pn_ready-1;
31678
31679 while (pos >= 0)
31680 {
31681 if (is_load_insn (ready[pos], &load_mem))
31682 {
31683 /* Found a load. Move it to the head of the ready list,
31684 and adjust it's priority so that it is more likely to
31685 stay there */
31686 tmp = ready[pos];
31687 for (i=pos; i<*pn_ready-1; i++)
31688 ready[i] = ready[i + 1];
31689 ready[*pn_ready-1] = tmp;
31690
31691 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31692 INSN_PRIORITY (tmp)++;
31693 break;
31694 }
31695 pos--;
31696 }
31697 }
31698 else if (load_store_pendulum == -2)
31699 {
31700 /* Two stores have been issued in this cycle. Increase the
31701 priority of the first load in the ready list to favor it for
31702 issuing in the next cycle. */
31703 pos = *pn_ready-1;
31704
31705 while (pos >= 0)
31706 {
31707 if (is_load_insn (ready[pos], &load_mem)
31708 && !sel_sched_p ()
31709 && INSN_PRIORITY_KNOWN (ready[pos]))
31710 {
31711 INSN_PRIORITY (ready[pos])++;
31712
31713 /* Adjust the pendulum to account for the fact that a load
31714 was found and increased in priority. This is to prevent
31715 increasing the priority of multiple loads */
31716 load_store_pendulum--;
31717
31718 break;
31719 }
31720 pos--;
31721 }
31722 }
31723 else if (load_store_pendulum == -1)
31724 {
31725 /* A store has been issued in this cycle. Scan the ready list for
31726 another store to issue with it, preferring a store to an adjacent
31727 memory location */
31728 int first_store_pos = -1;
31729
31730 pos = *pn_ready-1;
31731
31732 while (pos >= 0)
31733 {
31734 if (is_store_insn (ready[pos], &str_mem))
31735 {
31736 rtx str_mem2;
31737 /* Maintain the index of the first store found on the
31738 list */
31739 if (first_store_pos == -1)
31740 first_store_pos = pos;
31741
31742 if (is_store_insn (last_scheduled_insn, &str_mem2)
31743 && adjacent_mem_locations (str_mem, str_mem2))
31744 {
31745 /* Found an adjacent store. Move it to the head of the
31746 ready list, and adjust it's priority so that it is
31747 more likely to stay there */
31748 tmp = ready[pos];
31749 for (i=pos; i<*pn_ready-1; i++)
31750 ready[i] = ready[i + 1];
31751 ready[*pn_ready-1] = tmp;
31752
31753 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31754 INSN_PRIORITY (tmp)++;
31755
31756 first_store_pos = -1;
31757
31758 break;
31759 };
31760 }
31761 pos--;
31762 }
31763
31764 if (first_store_pos >= 0)
31765 {
31766 /* An adjacent store wasn't found, but a non-adjacent store was,
31767 so move the non-adjacent store to the front of the ready
31768 list, and adjust its priority so that it is more likely to
31769 stay there. */
31770 tmp = ready[first_store_pos];
31771 for (i=first_store_pos; i<*pn_ready-1; i++)
31772 ready[i] = ready[i + 1];
31773 ready[*pn_ready-1] = tmp;
31774 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31775 INSN_PRIORITY (tmp)++;
31776 }
31777 }
31778 else if (load_store_pendulum == 2)
31779 {
31780 /* Two loads have been issued in this cycle. Increase the priority
31781 of the first store in the ready list to favor it for issuing in
31782 the next cycle. */
31783 pos = *pn_ready-1;
31784
31785 while (pos >= 0)
31786 {
31787 if (is_store_insn (ready[pos], &str_mem)
31788 && !sel_sched_p ()
31789 && INSN_PRIORITY_KNOWN (ready[pos]))
31790 {
31791 INSN_PRIORITY (ready[pos])++;
31792
31793 /* Adjust the pendulum to account for the fact that a store
31794 was found and increased in priority. This is to prevent
31795 increasing the priority of multiple stores */
31796 load_store_pendulum++;
31797
31798 break;
31799 }
31800 pos--;
31801 }
31802 }
31803 }
31804
31805 /* Do Power9 dependent reordering if necessary. */
31806 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31807 && recog_memoized (last_scheduled_insn) >= 0)
31808 return power9_sched_reorder2 (ready, *pn_ready - 1);
31809
31810 return cached_can_issue_more;
31811 }
31812
31813 /* Return whether the presence of INSN causes a dispatch group termination
31814 of group WHICH_GROUP.
31815
31816 If WHICH_GROUP == current_group, this function will return true if INSN
31817 causes the termination of the current group (i.e, the dispatch group to
31818 which INSN belongs). This means that INSN will be the last insn in the
31819 group it belongs to.
31820
31821 If WHICH_GROUP == previous_group, this function will return true if INSN
31822 causes the termination of the previous group (i.e, the dispatch group that
31823 precedes the group to which INSN belongs). This means that INSN will be
31824 the first insn in the group it belongs to). */
31825
31826 static bool
31827 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31828 {
31829 bool first, last;
31830
31831 if (! insn)
31832 return false;
31833
31834 first = insn_must_be_first_in_group (insn);
31835 last = insn_must_be_last_in_group (insn);
31836
31837 if (first && last)
31838 return true;
31839
31840 if (which_group == current_group)
31841 return last;
31842 else if (which_group == previous_group)
31843 return first;
31844
31845 return false;
31846 }
31847
31848
31849 static bool
31850 insn_must_be_first_in_group (rtx_insn *insn)
31851 {
31852 enum attr_type type;
31853
31854 if (!insn
31855 || NOTE_P (insn)
31856 || DEBUG_INSN_P (insn)
31857 || GET_CODE (PATTERN (insn)) == USE
31858 || GET_CODE (PATTERN (insn)) == CLOBBER)
31859 return false;
31860
31861 switch (rs6000_tune)
31862 {
31863 case PROCESSOR_POWER5:
31864 if (is_cracked_insn (insn))
31865 return true;
31866 /* FALLTHRU */
31867 case PROCESSOR_POWER4:
31868 if (is_microcoded_insn (insn))
31869 return true;
31870
31871 if (!rs6000_sched_groups)
31872 return false;
31873
31874 type = get_attr_type (insn);
31875
31876 switch (type)
31877 {
31878 case TYPE_MFCR:
31879 case TYPE_MFCRF:
31880 case TYPE_MTCR:
31881 case TYPE_CR_LOGICAL:
31882 case TYPE_MTJMPR:
31883 case TYPE_MFJMPR:
31884 case TYPE_DIV:
31885 case TYPE_LOAD_L:
31886 case TYPE_STORE_C:
31887 case TYPE_ISYNC:
31888 case TYPE_SYNC:
31889 return true;
31890 default:
31891 break;
31892 }
31893 break;
31894 case PROCESSOR_POWER6:
31895 type = get_attr_type (insn);
31896
31897 switch (type)
31898 {
31899 case TYPE_EXTS:
31900 case TYPE_CNTLZ:
31901 case TYPE_TRAP:
31902 case TYPE_MUL:
31903 case TYPE_INSERT:
31904 case TYPE_FPCOMPARE:
31905 case TYPE_MFCR:
31906 case TYPE_MTCR:
31907 case TYPE_MFJMPR:
31908 case TYPE_MTJMPR:
31909 case TYPE_ISYNC:
31910 case TYPE_SYNC:
31911 case TYPE_LOAD_L:
31912 case TYPE_STORE_C:
31913 return true;
31914 case TYPE_SHIFT:
31915 if (get_attr_dot (insn) == DOT_NO
31916 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31917 return true;
31918 else
31919 break;
31920 case TYPE_DIV:
31921 if (get_attr_size (insn) == SIZE_32)
31922 return true;
31923 else
31924 break;
31925 case TYPE_LOAD:
31926 case TYPE_STORE:
31927 case TYPE_FPLOAD:
31928 case TYPE_FPSTORE:
31929 if (get_attr_update (insn) == UPDATE_YES)
31930 return true;
31931 else
31932 break;
31933 default:
31934 break;
31935 }
31936 break;
31937 case PROCESSOR_POWER7:
31938 type = get_attr_type (insn);
31939
31940 switch (type)
31941 {
31942 case TYPE_CR_LOGICAL:
31943 case TYPE_MFCR:
31944 case TYPE_MFCRF:
31945 case TYPE_MTCR:
31946 case TYPE_DIV:
31947 case TYPE_ISYNC:
31948 case TYPE_LOAD_L:
31949 case TYPE_STORE_C:
31950 case TYPE_MFJMPR:
31951 case TYPE_MTJMPR:
31952 return true;
31953 case TYPE_MUL:
31954 case TYPE_SHIFT:
31955 case TYPE_EXTS:
31956 if (get_attr_dot (insn) == DOT_YES)
31957 return true;
31958 else
31959 break;
31960 case TYPE_LOAD:
31961 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31962 || get_attr_update (insn) == UPDATE_YES)
31963 return true;
31964 else
31965 break;
31966 case TYPE_STORE:
31967 case TYPE_FPLOAD:
31968 case TYPE_FPSTORE:
31969 if (get_attr_update (insn) == UPDATE_YES)
31970 return true;
31971 else
31972 break;
31973 default:
31974 break;
31975 }
31976 break;
31977 case PROCESSOR_POWER8:
31978 type = get_attr_type (insn);
31979
31980 switch (type)
31981 {
31982 case TYPE_CR_LOGICAL:
31983 case TYPE_MFCR:
31984 case TYPE_MFCRF:
31985 case TYPE_MTCR:
31986 case TYPE_SYNC:
31987 case TYPE_ISYNC:
31988 case TYPE_LOAD_L:
31989 case TYPE_STORE_C:
31990 case TYPE_VECSTORE:
31991 case TYPE_MFJMPR:
31992 case TYPE_MTJMPR:
31993 return true;
31994 case TYPE_SHIFT:
31995 case TYPE_EXTS:
31996 case TYPE_MUL:
31997 if (get_attr_dot (insn) == DOT_YES)
31998 return true;
31999 else
32000 break;
32001 case TYPE_LOAD:
32002 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32003 || get_attr_update (insn) == UPDATE_YES)
32004 return true;
32005 else
32006 break;
32007 case TYPE_STORE:
32008 if (get_attr_update (insn) == UPDATE_YES
32009 && get_attr_indexed (insn) == INDEXED_YES)
32010 return true;
32011 else
32012 break;
32013 default:
32014 break;
32015 }
32016 break;
32017 default:
32018 break;
32019 }
32020
32021 return false;
32022 }
32023
32024 static bool
32025 insn_must_be_last_in_group (rtx_insn *insn)
32026 {
32027 enum attr_type type;
32028
32029 if (!insn
32030 || NOTE_P (insn)
32031 || DEBUG_INSN_P (insn)
32032 || GET_CODE (PATTERN (insn)) == USE
32033 || GET_CODE (PATTERN (insn)) == CLOBBER)
32034 return false;
32035
32036 switch (rs6000_tune) {
32037 case PROCESSOR_POWER4:
32038 case PROCESSOR_POWER5:
32039 if (is_microcoded_insn (insn))
32040 return true;
32041
32042 if (is_branch_slot_insn (insn))
32043 return true;
32044
32045 break;
32046 case PROCESSOR_POWER6:
32047 type = get_attr_type (insn);
32048
32049 switch (type)
32050 {
32051 case TYPE_EXTS:
32052 case TYPE_CNTLZ:
32053 case TYPE_TRAP:
32054 case TYPE_MUL:
32055 case TYPE_FPCOMPARE:
32056 case TYPE_MFCR:
32057 case TYPE_MTCR:
32058 case TYPE_MFJMPR:
32059 case TYPE_MTJMPR:
32060 case TYPE_ISYNC:
32061 case TYPE_SYNC:
32062 case TYPE_LOAD_L:
32063 case TYPE_STORE_C:
32064 return true;
32065 case TYPE_SHIFT:
32066 if (get_attr_dot (insn) == DOT_NO
32067 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32068 return true;
32069 else
32070 break;
32071 case TYPE_DIV:
32072 if (get_attr_size (insn) == SIZE_32)
32073 return true;
32074 else
32075 break;
32076 default:
32077 break;
32078 }
32079 break;
32080 case PROCESSOR_POWER7:
32081 type = get_attr_type (insn);
32082
32083 switch (type)
32084 {
32085 case TYPE_ISYNC:
32086 case TYPE_SYNC:
32087 case TYPE_LOAD_L:
32088 case TYPE_STORE_C:
32089 return true;
32090 case TYPE_LOAD:
32091 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32092 && get_attr_update (insn) == UPDATE_YES)
32093 return true;
32094 else
32095 break;
32096 case TYPE_STORE:
32097 if (get_attr_update (insn) == UPDATE_YES
32098 && get_attr_indexed (insn) == INDEXED_YES)
32099 return true;
32100 else
32101 break;
32102 default:
32103 break;
32104 }
32105 break;
32106 case PROCESSOR_POWER8:
32107 type = get_attr_type (insn);
32108
32109 switch (type)
32110 {
32111 case TYPE_MFCR:
32112 case TYPE_MTCR:
32113 case TYPE_ISYNC:
32114 case TYPE_SYNC:
32115 case TYPE_LOAD_L:
32116 case TYPE_STORE_C:
32117 return true;
32118 case TYPE_LOAD:
32119 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32120 && get_attr_update (insn) == UPDATE_YES)
32121 return true;
32122 else
32123 break;
32124 case TYPE_STORE:
32125 if (get_attr_update (insn) == UPDATE_YES
32126 && get_attr_indexed (insn) == INDEXED_YES)
32127 return true;
32128 else
32129 break;
32130 default:
32131 break;
32132 }
32133 break;
32134 default:
32135 break;
32136 }
32137
32138 return false;
32139 }
32140
32141 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32142 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32143
32144 static bool
32145 is_costly_group (rtx *group_insns, rtx next_insn)
32146 {
32147 int i;
32148 int issue_rate = rs6000_issue_rate ();
32149
32150 for (i = 0; i < issue_rate; i++)
32151 {
32152 sd_iterator_def sd_it;
32153 dep_t dep;
32154 rtx insn = group_insns[i];
32155
32156 if (!insn)
32157 continue;
32158
32159 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32160 {
32161 rtx next = DEP_CON (dep);
32162
32163 if (next == next_insn
32164 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32165 return true;
32166 }
32167 }
32168
32169 return false;
32170 }
32171
32172 /* Utility of the function redefine_groups.
32173 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32174 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32175 to keep it "far" (in a separate group) from GROUP_INSNS, following
32176 one of the following schemes, depending on the value of the flag
32177 -minsert_sched_nops = X:
32178 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32179 in order to force NEXT_INSN into a separate group.
32180 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32181 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32182 insertion (has a group just ended, how many vacant issue slots remain in the
32183 last group, and how many dispatch groups were encountered so far). */
32184
32185 static int
32186 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32187 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32188 int *group_count)
32189 {
32190 rtx nop;
32191 bool force;
32192 int issue_rate = rs6000_issue_rate ();
32193 bool end = *group_end;
32194 int i;
32195
32196 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32197 return can_issue_more;
32198
32199 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32200 return can_issue_more;
32201
32202 force = is_costly_group (group_insns, next_insn);
32203 if (!force)
32204 return can_issue_more;
32205
32206 if (sched_verbose > 6)
32207 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32208 *group_count ,can_issue_more);
32209
32210 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32211 {
32212 if (*group_end)
32213 can_issue_more = 0;
32214
32215 /* Since only a branch can be issued in the last issue_slot, it is
32216 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32217 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32218 in this case the last nop will start a new group and the branch
32219 will be forced to the new group. */
32220 if (can_issue_more && !is_branch_slot_insn (next_insn))
32221 can_issue_more--;
32222
32223 /* Do we have a special group ending nop? */
32224 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32225 || rs6000_tune == PROCESSOR_POWER8)
32226 {
32227 nop = gen_group_ending_nop ();
32228 emit_insn_before (nop, next_insn);
32229 can_issue_more = 0;
32230 }
32231 else
32232 while (can_issue_more > 0)
32233 {
32234 nop = gen_nop ();
32235 emit_insn_before (nop, next_insn);
32236 can_issue_more--;
32237 }
32238
32239 *group_end = true;
32240 return 0;
32241 }
32242
32243 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32244 {
32245 int n_nops = rs6000_sched_insert_nops;
32246
32247 /* Nops can't be issued from the branch slot, so the effective
32248 issue_rate for nops is 'issue_rate - 1'. */
32249 if (can_issue_more == 0)
32250 can_issue_more = issue_rate;
32251 can_issue_more--;
32252 if (can_issue_more == 0)
32253 {
32254 can_issue_more = issue_rate - 1;
32255 (*group_count)++;
32256 end = true;
32257 for (i = 0; i < issue_rate; i++)
32258 {
32259 group_insns[i] = 0;
32260 }
32261 }
32262
32263 while (n_nops > 0)
32264 {
32265 nop = gen_nop ();
32266 emit_insn_before (nop, next_insn);
32267 if (can_issue_more == issue_rate - 1) /* new group begins */
32268 end = false;
32269 can_issue_more--;
32270 if (can_issue_more == 0)
32271 {
32272 can_issue_more = issue_rate - 1;
32273 (*group_count)++;
32274 end = true;
32275 for (i = 0; i < issue_rate; i++)
32276 {
32277 group_insns[i] = 0;
32278 }
32279 }
32280 n_nops--;
32281 }
32282
32283 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32284 can_issue_more++;
32285
32286 /* Is next_insn going to start a new group? */
32287 *group_end
32288 = (end
32289 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32290 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32291 || (can_issue_more < issue_rate &&
32292 insn_terminates_group_p (next_insn, previous_group)));
32293 if (*group_end && end)
32294 (*group_count)--;
32295
32296 if (sched_verbose > 6)
32297 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32298 *group_count, can_issue_more);
32299 return can_issue_more;
32300 }
32301
32302 return can_issue_more;
32303 }
32304
32305 /* This function tries to synch the dispatch groups that the compiler "sees"
32306 with the dispatch groups that the processor dispatcher is expected to
32307 form in practice. It tries to achieve this synchronization by forcing the
32308 estimated processor grouping on the compiler (as opposed to the function
32309 'pad_goups' which tries to force the scheduler's grouping on the processor).
32310
32311 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32312 examines the (estimated) dispatch groups that will be formed by the processor
32313 dispatcher. It marks these group boundaries to reflect the estimated
32314 processor grouping, overriding the grouping that the scheduler had marked.
32315 Depending on the value of the flag '-minsert-sched-nops' this function can
32316 force certain insns into separate groups or force a certain distance between
32317 them by inserting nops, for example, if there exists a "costly dependence"
32318 between the insns.
32319
32320 The function estimates the group boundaries that the processor will form as
32321 follows: It keeps track of how many vacant issue slots are available after
32322 each insn. A subsequent insn will start a new group if one of the following
32323 4 cases applies:
32324 - no more vacant issue slots remain in the current dispatch group.
32325 - only the last issue slot, which is the branch slot, is vacant, but the next
32326 insn is not a branch.
32327 - only the last 2 or less issue slots, including the branch slot, are vacant,
32328 which means that a cracked insn (which occupies two issue slots) can't be
32329 issued in this group.
32330 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32331 start a new group. */
32332
32333 static int
32334 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32335 rtx_insn *tail)
32336 {
32337 rtx_insn *insn, *next_insn;
32338 int issue_rate;
32339 int can_issue_more;
32340 int slot, i;
32341 bool group_end;
32342 int group_count = 0;
32343 rtx *group_insns;
32344
32345 /* Initialize. */
32346 issue_rate = rs6000_issue_rate ();
32347 group_insns = XALLOCAVEC (rtx, issue_rate);
32348 for (i = 0; i < issue_rate; i++)
32349 {
32350 group_insns[i] = 0;
32351 }
32352 can_issue_more = issue_rate;
32353 slot = 0;
32354 insn = get_next_active_insn (prev_head_insn, tail);
32355 group_end = false;
32356
32357 while (insn != NULL_RTX)
32358 {
32359 slot = (issue_rate - can_issue_more);
32360 group_insns[slot] = insn;
32361 can_issue_more =
32362 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32363 if (insn_terminates_group_p (insn, current_group))
32364 can_issue_more = 0;
32365
32366 next_insn = get_next_active_insn (insn, tail);
32367 if (next_insn == NULL_RTX)
32368 return group_count + 1;
32369
32370 /* Is next_insn going to start a new group? */
32371 group_end
32372 = (can_issue_more == 0
32373 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32374 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32375 || (can_issue_more < issue_rate &&
32376 insn_terminates_group_p (next_insn, previous_group)));
32377
32378 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32379 next_insn, &group_end, can_issue_more,
32380 &group_count);
32381
32382 if (group_end)
32383 {
32384 group_count++;
32385 can_issue_more = 0;
32386 for (i = 0; i < issue_rate; i++)
32387 {
32388 group_insns[i] = 0;
32389 }
32390 }
32391
32392 if (GET_MODE (next_insn) == TImode && can_issue_more)
32393 PUT_MODE (next_insn, VOIDmode);
32394 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32395 PUT_MODE (next_insn, TImode);
32396
32397 insn = next_insn;
32398 if (can_issue_more == 0)
32399 can_issue_more = issue_rate;
32400 } /* while */
32401
32402 return group_count;
32403 }
32404
32405 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32406 dispatch group boundaries that the scheduler had marked. Pad with nops
32407 any dispatch groups which have vacant issue slots, in order to force the
32408 scheduler's grouping on the processor dispatcher. The function
32409 returns the number of dispatch groups found. */
32410
32411 static int
32412 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32413 rtx_insn *tail)
32414 {
32415 rtx_insn *insn, *next_insn;
32416 rtx nop;
32417 int issue_rate;
32418 int can_issue_more;
32419 int group_end;
32420 int group_count = 0;
32421
32422 /* Initialize issue_rate. */
32423 issue_rate = rs6000_issue_rate ();
32424 can_issue_more = issue_rate;
32425
32426 insn = get_next_active_insn (prev_head_insn, tail);
32427 next_insn = get_next_active_insn (insn, tail);
32428
32429 while (insn != NULL_RTX)
32430 {
32431 can_issue_more =
32432 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32433
32434 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32435
32436 if (next_insn == NULL_RTX)
32437 break;
32438
32439 if (group_end)
32440 {
32441 /* If the scheduler had marked group termination at this location
32442 (between insn and next_insn), and neither insn nor next_insn will
32443 force group termination, pad the group with nops to force group
32444 termination. */
32445 if (can_issue_more
32446 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32447 && !insn_terminates_group_p (insn, current_group)
32448 && !insn_terminates_group_p (next_insn, previous_group))
32449 {
32450 if (!is_branch_slot_insn (next_insn))
32451 can_issue_more--;
32452
32453 while (can_issue_more)
32454 {
32455 nop = gen_nop ();
32456 emit_insn_before (nop, next_insn);
32457 can_issue_more--;
32458 }
32459 }
32460
32461 can_issue_more = issue_rate;
32462 group_count++;
32463 }
32464
32465 insn = next_insn;
32466 next_insn = get_next_active_insn (insn, tail);
32467 }
32468
32469 return group_count;
32470 }
32471
32472 /* We're beginning a new block. Initialize data structures as necessary. */
32473
32474 static void
32475 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32476 int sched_verbose ATTRIBUTE_UNUSED,
32477 int max_ready ATTRIBUTE_UNUSED)
32478 {
32479 last_scheduled_insn = NULL;
32480 load_store_pendulum = 0;
32481 divide_cnt = 0;
32482 vec_pairing = 0;
32483 }
32484
32485 /* The following function is called at the end of scheduling BB.
32486 After reload, it inserts nops at insn group bundling. */
32487
32488 static void
32489 rs6000_sched_finish (FILE *dump, int sched_verbose)
32490 {
32491 int n_groups;
32492
32493 if (sched_verbose)
32494 fprintf (dump, "=== Finishing schedule.\n");
32495
32496 if (reload_completed && rs6000_sched_groups)
32497 {
32498 /* Do not run sched_finish hook when selective scheduling enabled. */
32499 if (sel_sched_p ())
32500 return;
32501
32502 if (rs6000_sched_insert_nops == sched_finish_none)
32503 return;
32504
32505 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32506 n_groups = pad_groups (dump, sched_verbose,
32507 current_sched_info->prev_head,
32508 current_sched_info->next_tail);
32509 else
32510 n_groups = redefine_groups (dump, sched_verbose,
32511 current_sched_info->prev_head,
32512 current_sched_info->next_tail);
32513
32514 if (sched_verbose >= 6)
32515 {
32516 fprintf (dump, "ngroups = %d\n", n_groups);
32517 print_rtl (dump, current_sched_info->prev_head);
32518 fprintf (dump, "Done finish_sched\n");
32519 }
32520 }
32521 }
32522
32523 struct rs6000_sched_context
32524 {
32525 short cached_can_issue_more;
32526 rtx_insn *last_scheduled_insn;
32527 int load_store_pendulum;
32528 int divide_cnt;
32529 int vec_pairing;
32530 };
32531
32532 typedef struct rs6000_sched_context rs6000_sched_context_def;
32533 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32534
32535 /* Allocate store for new scheduling context. */
32536 static void *
32537 rs6000_alloc_sched_context (void)
32538 {
32539 return xmalloc (sizeof (rs6000_sched_context_def));
32540 }
32541
32542 /* If CLEAN_P is true then initializes _SC with clean data,
32543 and from the global context otherwise. */
32544 static void
32545 rs6000_init_sched_context (void *_sc, bool clean_p)
32546 {
32547 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32548
32549 if (clean_p)
32550 {
32551 sc->cached_can_issue_more = 0;
32552 sc->last_scheduled_insn = NULL;
32553 sc->load_store_pendulum = 0;
32554 sc->divide_cnt = 0;
32555 sc->vec_pairing = 0;
32556 }
32557 else
32558 {
32559 sc->cached_can_issue_more = cached_can_issue_more;
32560 sc->last_scheduled_insn = last_scheduled_insn;
32561 sc->load_store_pendulum = load_store_pendulum;
32562 sc->divide_cnt = divide_cnt;
32563 sc->vec_pairing = vec_pairing;
32564 }
32565 }
32566
32567 /* Sets the global scheduling context to the one pointed to by _SC. */
32568 static void
32569 rs6000_set_sched_context (void *_sc)
32570 {
32571 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32572
32573 gcc_assert (sc != NULL);
32574
32575 cached_can_issue_more = sc->cached_can_issue_more;
32576 last_scheduled_insn = sc->last_scheduled_insn;
32577 load_store_pendulum = sc->load_store_pendulum;
32578 divide_cnt = sc->divide_cnt;
32579 vec_pairing = sc->vec_pairing;
32580 }
32581
32582 /* Free _SC. */
32583 static void
32584 rs6000_free_sched_context (void *_sc)
32585 {
32586 gcc_assert (_sc != NULL);
32587
32588 free (_sc);
32589 }
32590
32591 static bool
32592 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32593 {
32594 switch (get_attr_type (insn))
32595 {
32596 case TYPE_DIV:
32597 case TYPE_SDIV:
32598 case TYPE_DDIV:
32599 case TYPE_VECDIV:
32600 case TYPE_SSQRT:
32601 case TYPE_DSQRT:
32602 return false;
32603
32604 default:
32605 return true;
32606 }
32607 }
32608 \f
32609 /* Length in units of the trampoline for entering a nested function. */
32610
32611 int
32612 rs6000_trampoline_size (void)
32613 {
32614 int ret = 0;
32615
32616 switch (DEFAULT_ABI)
32617 {
32618 default:
32619 gcc_unreachable ();
32620
32621 case ABI_AIX:
32622 ret = (TARGET_32BIT) ? 12 : 24;
32623 break;
32624
32625 case ABI_ELFv2:
32626 gcc_assert (!TARGET_32BIT);
32627 ret = 32;
32628 break;
32629
32630 case ABI_DARWIN:
32631 case ABI_V4:
32632 ret = (TARGET_32BIT) ? 40 : 48;
32633 break;
32634 }
32635
32636 return ret;
32637 }
32638
32639 /* Emit RTL insns to initialize the variable parts of a trampoline.
32640 FNADDR is an RTX for the address of the function's pure code.
32641 CXT is an RTX for the static chain value for the function. */
32642
32643 static void
32644 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32645 {
32646 int regsize = (TARGET_32BIT) ? 4 : 8;
32647 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32648 rtx ctx_reg = force_reg (Pmode, cxt);
32649 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32650
32651 switch (DEFAULT_ABI)
32652 {
32653 default:
32654 gcc_unreachable ();
32655
32656 /* Under AIX, just build the 3 word function descriptor */
32657 case ABI_AIX:
32658 {
32659 rtx fnmem, fn_reg, toc_reg;
32660
32661 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32662 error ("you cannot take the address of a nested function if you use "
32663 "the %qs option", "-mno-pointers-to-nested-functions");
32664
32665 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32666 fn_reg = gen_reg_rtx (Pmode);
32667 toc_reg = gen_reg_rtx (Pmode);
32668
32669 /* Macro to shorten the code expansions below. */
32670 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32671
32672 m_tramp = replace_equiv_address (m_tramp, addr);
32673
32674 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32675 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32676 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32677 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32678 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32679
32680 # undef MEM_PLUS
32681 }
32682 break;
32683
32684 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32685 case ABI_ELFv2:
32686 case ABI_DARWIN:
32687 case ABI_V4:
32688 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32689 LCT_NORMAL, VOIDmode,
32690 addr, Pmode,
32691 GEN_INT (rs6000_trampoline_size ()), SImode,
32692 fnaddr, Pmode,
32693 ctx_reg, Pmode);
32694 break;
32695 }
32696 }
32697
32698 \f
32699 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32700 identifier as an argument, so the front end shouldn't look it up. */
32701
32702 static bool
32703 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32704 {
32705 return is_attribute_p ("altivec", attr_id);
32706 }
32707
32708 /* Handle the "altivec" attribute. The attribute may have
32709 arguments as follows:
32710
32711 __attribute__((altivec(vector__)))
32712 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32713 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32714
32715 and may appear more than once (e.g., 'vector bool char') in a
32716 given declaration. */
32717
32718 static tree
32719 rs6000_handle_altivec_attribute (tree *node,
32720 tree name ATTRIBUTE_UNUSED,
32721 tree args,
32722 int flags ATTRIBUTE_UNUSED,
32723 bool *no_add_attrs)
32724 {
32725 tree type = *node, result = NULL_TREE;
32726 machine_mode mode;
32727 int unsigned_p;
32728 char altivec_type
32729 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32730 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32731 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32732 : '?');
32733
32734 while (POINTER_TYPE_P (type)
32735 || TREE_CODE (type) == FUNCTION_TYPE
32736 || TREE_CODE (type) == METHOD_TYPE
32737 || TREE_CODE (type) == ARRAY_TYPE)
32738 type = TREE_TYPE (type);
32739
32740 mode = TYPE_MODE (type);
32741
32742 /* Check for invalid AltiVec type qualifiers. */
32743 if (type == long_double_type_node)
32744 error ("use of %<long double%> in AltiVec types is invalid");
32745 else if (type == boolean_type_node)
32746 error ("use of boolean types in AltiVec types is invalid");
32747 else if (TREE_CODE (type) == COMPLEX_TYPE)
32748 error ("use of %<complex%> in AltiVec types is invalid");
32749 else if (DECIMAL_FLOAT_MODE_P (mode))
32750 error ("use of decimal floating point types in AltiVec types is invalid");
32751 else if (!TARGET_VSX)
32752 {
32753 if (type == long_unsigned_type_node || type == long_integer_type_node)
32754 {
32755 if (TARGET_64BIT)
32756 error ("use of %<long%> in AltiVec types is invalid for "
32757 "64-bit code without %qs", "-mvsx");
32758 else if (rs6000_warn_altivec_long)
32759 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32760 "use %<int%>");
32761 }
32762 else if (type == long_long_unsigned_type_node
32763 || type == long_long_integer_type_node)
32764 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32765 "-mvsx");
32766 else if (type == double_type_node)
32767 error ("use of %<double%> in AltiVec types is invalid without %qs",
32768 "-mvsx");
32769 }
32770
32771 switch (altivec_type)
32772 {
32773 case 'v':
32774 unsigned_p = TYPE_UNSIGNED (type);
32775 switch (mode)
32776 {
32777 case E_TImode:
32778 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32779 break;
32780 case E_DImode:
32781 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32782 break;
32783 case E_SImode:
32784 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32785 break;
32786 case E_HImode:
32787 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32788 break;
32789 case E_QImode:
32790 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32791 break;
32792 case E_SFmode: result = V4SF_type_node; break;
32793 case E_DFmode: result = V2DF_type_node; break;
32794 /* If the user says 'vector int bool', we may be handed the 'bool'
32795 attribute _before_ the 'vector' attribute, and so select the
32796 proper type in the 'b' case below. */
32797 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32798 case E_V2DImode: case E_V2DFmode:
32799 result = type;
32800 default: break;
32801 }
32802 break;
32803 case 'b':
32804 switch (mode)
32805 {
32806 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32807 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32808 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32809 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32810 default: break;
32811 }
32812 break;
32813 case 'p':
32814 switch (mode)
32815 {
32816 case E_V8HImode: result = pixel_V8HI_type_node;
32817 default: break;
32818 }
32819 default: break;
32820 }
32821
32822 /* Propagate qualifiers attached to the element type
32823 onto the vector type. */
32824 if (result && result != type && TYPE_QUALS (type))
32825 result = build_qualified_type (result, TYPE_QUALS (type));
32826
32827 *no_add_attrs = true; /* No need to hang on to the attribute. */
32828
32829 if (result)
32830 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32831
32832 return NULL_TREE;
32833 }
32834
32835 /* AltiVec defines five built-in scalar types that serve as vector
32836 elements; we must teach the compiler how to mangle them. The 128-bit
32837 floating point mangling is target-specific as well. */
32838
32839 static const char *
32840 rs6000_mangle_type (const_tree type)
32841 {
32842 type = TYPE_MAIN_VARIANT (type);
32843
32844 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32845 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32846 return NULL;
32847
32848 if (type == bool_char_type_node) return "U6__boolc";
32849 if (type == bool_short_type_node) return "U6__bools";
32850 if (type == pixel_type_node) return "u7__pixel";
32851 if (type == bool_int_type_node) return "U6__booli";
32852 if (type == bool_long_long_type_node) return "U6__boolx";
32853
32854 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32855 return "g";
32856 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32857 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32858
32859 /* For all other types, use the default mangling. */
32860 return NULL;
32861 }
32862
32863 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32864 struct attribute_spec.handler. */
32865
32866 static tree
32867 rs6000_handle_longcall_attribute (tree *node, tree name,
32868 tree args ATTRIBUTE_UNUSED,
32869 int flags ATTRIBUTE_UNUSED,
32870 bool *no_add_attrs)
32871 {
32872 if (TREE_CODE (*node) != FUNCTION_TYPE
32873 && TREE_CODE (*node) != FIELD_DECL
32874 && TREE_CODE (*node) != TYPE_DECL)
32875 {
32876 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32877 name);
32878 *no_add_attrs = true;
32879 }
32880
32881 return NULL_TREE;
32882 }
32883
32884 /* Set longcall attributes on all functions declared when
32885 rs6000_default_long_calls is true. */
32886 static void
32887 rs6000_set_default_type_attributes (tree type)
32888 {
32889 if (rs6000_default_long_calls
32890 && (TREE_CODE (type) == FUNCTION_TYPE
32891 || TREE_CODE (type) == METHOD_TYPE))
32892 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32893 NULL_TREE,
32894 TYPE_ATTRIBUTES (type));
32895
32896 #if TARGET_MACHO
32897 darwin_set_default_type_attributes (type);
32898 #endif
32899 }
32900
32901 /* Return a reference suitable for calling a function with the
32902 longcall attribute. */
32903
32904 static rtx
32905 rs6000_longcall_ref (rtx call_ref, rtx arg)
32906 {
32907 /* System V adds '.' to the internal name, so skip them. */
32908 const char *call_name = XSTR (call_ref, 0);
32909 if (*call_name == '.')
32910 {
32911 while (*call_name == '.')
32912 call_name++;
32913
32914 tree node = get_identifier (call_name);
32915 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32916 }
32917
32918 if (TARGET_PLTSEQ)
32919 {
32920 rtx base = const0_rtx;
32921 int regno;
32922 if (DEFAULT_ABI == ABI_ELFv2)
32923 {
32924 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32925 regno = 12;
32926 }
32927 else
32928 {
32929 if (flag_pic)
32930 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32931 regno = 11;
32932 }
32933 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32934 may be used by a function global entry point. For SysV4, r11
32935 is used by __glink_PLTresolve lazy resolver entry. */
32936 rtx reg = gen_rtx_REG (Pmode, regno);
32937 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32938 UNSPEC_PLT16_HA);
32939 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32940 UNSPEC_PLT16_LO);
32941 emit_insn (gen_rtx_SET (reg, hi));
32942 emit_insn (gen_rtx_SET (reg, lo));
32943 return reg;
32944 }
32945
32946 return force_reg (Pmode, call_ref);
32947 }
32948 \f
32949 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32950 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32951 #endif
32952
32953 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32954 struct attribute_spec.handler. */
32955 static tree
32956 rs6000_handle_struct_attribute (tree *node, tree name,
32957 tree args ATTRIBUTE_UNUSED,
32958 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32959 {
32960 tree *type = NULL;
32961 if (DECL_P (*node))
32962 {
32963 if (TREE_CODE (*node) == TYPE_DECL)
32964 type = &TREE_TYPE (*node);
32965 }
32966 else
32967 type = node;
32968
32969 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32970 || TREE_CODE (*type) == UNION_TYPE)))
32971 {
32972 warning (OPT_Wattributes, "%qE attribute ignored", name);
32973 *no_add_attrs = true;
32974 }
32975
32976 else if ((is_attribute_p ("ms_struct", name)
32977 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32978 || ((is_attribute_p ("gcc_struct", name)
32979 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32980 {
32981 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32982 name);
32983 *no_add_attrs = true;
32984 }
32985
32986 return NULL_TREE;
32987 }
32988
32989 static bool
32990 rs6000_ms_bitfield_layout_p (const_tree record_type)
32991 {
32992 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32993 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32994 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32995 }
32996 \f
32997 #ifdef USING_ELFOS_H
32998
32999 /* A get_unnamed_section callback, used for switching to toc_section. */
33000
33001 static void
33002 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33003 {
33004 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33005 && TARGET_MINIMAL_TOC)
33006 {
33007 if (!toc_initialized)
33008 {
33009 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33010 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33011 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
33012 fprintf (asm_out_file, "\t.tc ");
33013 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
33014 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33015 fprintf (asm_out_file, "\n");
33016
33017 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33018 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33019 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33020 fprintf (asm_out_file, " = .+32768\n");
33021 toc_initialized = 1;
33022 }
33023 else
33024 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33025 }
33026 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33027 {
33028 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33029 if (!toc_initialized)
33030 {
33031 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33032 toc_initialized = 1;
33033 }
33034 }
33035 else
33036 {
33037 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33038 if (!toc_initialized)
33039 {
33040 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33041 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33042 fprintf (asm_out_file, " = .+32768\n");
33043 toc_initialized = 1;
33044 }
33045 }
33046 }
33047
33048 /* Implement TARGET_ASM_INIT_SECTIONS. */
33049
33050 static void
33051 rs6000_elf_asm_init_sections (void)
33052 {
33053 toc_section
33054 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
33055
33056 sdata2_section
33057 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
33058 SDATA2_SECTION_ASM_OP);
33059 }
33060
33061 /* Implement TARGET_SELECT_RTX_SECTION. */
33062
33063 static section *
33064 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
33065 unsigned HOST_WIDE_INT align)
33066 {
33067 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33068 return toc_section;
33069 else
33070 return default_elf_select_rtx_section (mode, x, align);
33071 }
33072 \f
33073 /* For a SYMBOL_REF, set generic flags and then perform some
33074 target-specific processing.
33075
33076 When the AIX ABI is requested on a non-AIX system, replace the
33077 function name with the real name (with a leading .) rather than the
33078 function descriptor name. This saves a lot of overriding code to
33079 read the prefixes. */
33080
33081 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
33082 static void
33083 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
33084 {
33085 default_encode_section_info (decl, rtl, first);
33086
33087 if (first
33088 && TREE_CODE (decl) == FUNCTION_DECL
33089 && !TARGET_AIX
33090 && DEFAULT_ABI == ABI_AIX)
33091 {
33092 rtx sym_ref = XEXP (rtl, 0);
33093 size_t len = strlen (XSTR (sym_ref, 0));
33094 char *str = XALLOCAVEC (char, len + 2);
33095 str[0] = '.';
33096 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
33097 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
33098 }
33099 }
33100
33101 static inline bool
33102 compare_section_name (const char *section, const char *templ)
33103 {
33104 int len;
33105
33106 len = strlen (templ);
33107 return (strncmp (section, templ, len) == 0
33108 && (section[len] == 0 || section[len] == '.'));
33109 }
33110
33111 bool
33112 rs6000_elf_in_small_data_p (const_tree decl)
33113 {
33114 if (rs6000_sdata == SDATA_NONE)
33115 return false;
33116
33117 /* We want to merge strings, so we never consider them small data. */
33118 if (TREE_CODE (decl) == STRING_CST)
33119 return false;
33120
33121 /* Functions are never in the small data area. */
33122 if (TREE_CODE (decl) == FUNCTION_DECL)
33123 return false;
33124
33125 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
33126 {
33127 const char *section = DECL_SECTION_NAME (decl);
33128 if (compare_section_name (section, ".sdata")
33129 || compare_section_name (section, ".sdata2")
33130 || compare_section_name (section, ".gnu.linkonce.s")
33131 || compare_section_name (section, ".sbss")
33132 || compare_section_name (section, ".sbss2")
33133 || compare_section_name (section, ".gnu.linkonce.sb")
33134 || strcmp (section, ".PPC.EMB.sdata0") == 0
33135 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33136 return true;
33137 }
33138 else
33139 {
33140 /* If we are told not to put readonly data in sdata, then don't. */
33141 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
33142 && !rs6000_readonly_in_sdata)
33143 return false;
33144
33145 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33146
33147 if (size > 0
33148 && size <= g_switch_value
33149 /* If it's not public, and we're not going to reference it there,
33150 there's no need to put it in the small data section. */
33151 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33152 return true;
33153 }
33154
33155 return false;
33156 }
33157
33158 #endif /* USING_ELFOS_H */
33159 \f
33160 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33161
33162 static bool
33163 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33164 {
33165 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33166 }
33167
33168 /* Do not place thread-local symbols refs in the object blocks. */
33169
33170 static bool
33171 rs6000_use_blocks_for_decl_p (const_tree decl)
33172 {
33173 return !DECL_THREAD_LOCAL_P (decl);
33174 }
33175 \f
33176 /* Return a REG that occurs in ADDR with coefficient 1.
33177 ADDR can be effectively incremented by incrementing REG.
33178
33179 r0 is special and we must not select it as an address
33180 register by this routine since our caller will try to
33181 increment the returned register via an "la" instruction. */
33182
33183 rtx
33184 find_addr_reg (rtx addr)
33185 {
33186 while (GET_CODE (addr) == PLUS)
33187 {
33188 if (REG_P (XEXP (addr, 0))
33189 && REGNO (XEXP (addr, 0)) != 0)
33190 addr = XEXP (addr, 0);
33191 else if (REG_P (XEXP (addr, 1))
33192 && REGNO (XEXP (addr, 1)) != 0)
33193 addr = XEXP (addr, 1);
33194 else if (CONSTANT_P (XEXP (addr, 0)))
33195 addr = XEXP (addr, 1);
33196 else if (CONSTANT_P (XEXP (addr, 1)))
33197 addr = XEXP (addr, 0);
33198 else
33199 gcc_unreachable ();
33200 }
33201 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
33202 return addr;
33203 }
33204
33205 void
33206 rs6000_fatal_bad_address (rtx op)
33207 {
33208 fatal_insn ("bad address", op);
33209 }
33210
33211 #if TARGET_MACHO
33212
33213 typedef struct branch_island_d {
33214 tree function_name;
33215 tree label_name;
33216 int line_number;
33217 } branch_island;
33218
33219
33220 static vec<branch_island, va_gc> *branch_islands;
33221
33222 /* Remember to generate a branch island for far calls to the given
33223 function. */
33224
33225 static void
33226 add_compiler_branch_island (tree label_name, tree function_name,
33227 int line_number)
33228 {
33229 branch_island bi = {function_name, label_name, line_number};
33230 vec_safe_push (branch_islands, bi);
33231 }
33232
33233 /* Generate far-jump branch islands for everything recorded in
33234 branch_islands. Invoked immediately after the last instruction of
33235 the epilogue has been emitted; the branch islands must be appended
33236 to, and contiguous with, the function body. Mach-O stubs are
33237 generated in machopic_output_stub(). */
33238
33239 static void
33240 macho_branch_islands (void)
33241 {
33242 char tmp_buf[512];
33243
33244 while (!vec_safe_is_empty (branch_islands))
33245 {
33246 branch_island *bi = &branch_islands->last ();
33247 const char *label = IDENTIFIER_POINTER (bi->label_name);
33248 const char *name = IDENTIFIER_POINTER (bi->function_name);
33249 char name_buf[512];
33250 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33251 if (name[0] == '*' || name[0] == '&')
33252 strcpy (name_buf, name+1);
33253 else
33254 {
33255 name_buf[0] = '_';
33256 strcpy (name_buf+1, name);
33257 }
33258 strcpy (tmp_buf, "\n");
33259 strcat (tmp_buf, label);
33260 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33261 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33262 dbxout_stabd (N_SLINE, bi->line_number);
33263 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33264 if (flag_pic)
33265 {
33266 if (TARGET_LINK_STACK)
33267 {
33268 char name[32];
33269 get_ppc476_thunk_name (name);
33270 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33271 strcat (tmp_buf, name);
33272 strcat (tmp_buf, "\n");
33273 strcat (tmp_buf, label);
33274 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33275 }
33276 else
33277 {
33278 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33279 strcat (tmp_buf, label);
33280 strcat (tmp_buf, "_pic\n");
33281 strcat (tmp_buf, label);
33282 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33283 }
33284
33285 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33286 strcat (tmp_buf, name_buf);
33287 strcat (tmp_buf, " - ");
33288 strcat (tmp_buf, label);
33289 strcat (tmp_buf, "_pic)\n");
33290
33291 strcat (tmp_buf, "\tmtlr r0\n");
33292
33293 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33294 strcat (tmp_buf, name_buf);
33295 strcat (tmp_buf, " - ");
33296 strcat (tmp_buf, label);
33297 strcat (tmp_buf, "_pic)\n");
33298
33299 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33300 }
33301 else
33302 {
33303 strcat (tmp_buf, ":\nlis r12,hi16(");
33304 strcat (tmp_buf, name_buf);
33305 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33306 strcat (tmp_buf, name_buf);
33307 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33308 }
33309 output_asm_insn (tmp_buf, 0);
33310 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33311 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33312 dbxout_stabd (N_SLINE, bi->line_number);
33313 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33314 branch_islands->pop ();
33315 }
33316 }
33317
33318 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33319 already there or not. */
33320
33321 static int
33322 no_previous_def (tree function_name)
33323 {
33324 branch_island *bi;
33325 unsigned ix;
33326
33327 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33328 if (function_name == bi->function_name)
33329 return 0;
33330 return 1;
33331 }
33332
33333 /* GET_PREV_LABEL gets the label name from the previous definition of
33334 the function. */
33335
33336 static tree
33337 get_prev_label (tree function_name)
33338 {
33339 branch_island *bi;
33340 unsigned ix;
33341
33342 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33343 if (function_name == bi->function_name)
33344 return bi->label_name;
33345 return NULL_TREE;
33346 }
33347
33348 /* Generate PIC and indirect symbol stubs. */
33349
33350 void
33351 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33352 {
33353 unsigned int length;
33354 char *symbol_name, *lazy_ptr_name;
33355 char *local_label_0;
33356 static int label = 0;
33357
33358 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33359 symb = (*targetm.strip_name_encoding) (symb);
33360
33361
33362 length = strlen (symb);
33363 symbol_name = XALLOCAVEC (char, length + 32);
33364 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33365
33366 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33367 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33368
33369 if (flag_pic == 2)
33370 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33371 else
33372 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33373
33374 if (flag_pic == 2)
33375 {
33376 fprintf (file, "\t.align 5\n");
33377
33378 fprintf (file, "%s:\n", stub);
33379 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33380
33381 label++;
33382 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33383 sprintf (local_label_0, "\"L%011d$spb\"", label);
33384
33385 fprintf (file, "\tmflr r0\n");
33386 if (TARGET_LINK_STACK)
33387 {
33388 char name[32];
33389 get_ppc476_thunk_name (name);
33390 fprintf (file, "\tbl %s\n", name);
33391 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33392 }
33393 else
33394 {
33395 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33396 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33397 }
33398 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33399 lazy_ptr_name, local_label_0);
33400 fprintf (file, "\tmtlr r0\n");
33401 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33402 (TARGET_64BIT ? "ldu" : "lwzu"),
33403 lazy_ptr_name, local_label_0);
33404 fprintf (file, "\tmtctr r12\n");
33405 fprintf (file, "\tbctr\n");
33406 }
33407 else
33408 {
33409 fprintf (file, "\t.align 4\n");
33410
33411 fprintf (file, "%s:\n", stub);
33412 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33413
33414 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33415 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33416 (TARGET_64BIT ? "ldu" : "lwzu"),
33417 lazy_ptr_name);
33418 fprintf (file, "\tmtctr r12\n");
33419 fprintf (file, "\tbctr\n");
33420 }
33421
33422 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33423 fprintf (file, "%s:\n", lazy_ptr_name);
33424 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33425 fprintf (file, "%sdyld_stub_binding_helper\n",
33426 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33427 }
33428
33429 /* Legitimize PIC addresses. If the address is already
33430 position-independent, we return ORIG. Newly generated
33431 position-independent addresses go into a reg. This is REG if non
33432 zero, otherwise we allocate register(s) as necessary. */
33433
33434 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33435
33436 rtx
33437 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33438 rtx reg)
33439 {
33440 rtx base, offset;
33441
33442 if (reg == NULL && !reload_completed)
33443 reg = gen_reg_rtx (Pmode);
33444
33445 if (GET_CODE (orig) == CONST)
33446 {
33447 rtx reg_temp;
33448
33449 if (GET_CODE (XEXP (orig, 0)) == PLUS
33450 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33451 return orig;
33452
33453 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33454
33455 /* Use a different reg for the intermediate value, as
33456 it will be marked UNCHANGING. */
33457 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33458 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33459 Pmode, reg_temp);
33460 offset =
33461 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33462 Pmode, reg);
33463
33464 if (CONST_INT_P (offset))
33465 {
33466 if (SMALL_INT (offset))
33467 return plus_constant (Pmode, base, INTVAL (offset));
33468 else if (!reload_completed)
33469 offset = force_reg (Pmode, offset);
33470 else
33471 {
33472 rtx mem = force_const_mem (Pmode, orig);
33473 return machopic_legitimize_pic_address (mem, Pmode, reg);
33474 }
33475 }
33476 return gen_rtx_PLUS (Pmode, base, offset);
33477 }
33478
33479 /* Fall back on generic machopic code. */
33480 return machopic_legitimize_pic_address (orig, mode, reg);
33481 }
33482
33483 /* Output a .machine directive for the Darwin assembler, and call
33484 the generic start_file routine. */
33485
33486 static void
33487 rs6000_darwin_file_start (void)
33488 {
33489 static const struct
33490 {
33491 const char *arg;
33492 const char *name;
33493 HOST_WIDE_INT if_set;
33494 } mapping[] = {
33495 { "ppc64", "ppc64", MASK_64BIT },
33496 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33497 { "power4", "ppc970", 0 },
33498 { "G5", "ppc970", 0 },
33499 { "7450", "ppc7450", 0 },
33500 { "7400", "ppc7400", MASK_ALTIVEC },
33501 { "G4", "ppc7400", 0 },
33502 { "750", "ppc750", 0 },
33503 { "740", "ppc750", 0 },
33504 { "G3", "ppc750", 0 },
33505 { "604e", "ppc604e", 0 },
33506 { "604", "ppc604", 0 },
33507 { "603e", "ppc603", 0 },
33508 { "603", "ppc603", 0 },
33509 { "601", "ppc601", 0 },
33510 { NULL, "ppc", 0 } };
33511 const char *cpu_id = "";
33512 size_t i;
33513
33514 rs6000_file_start ();
33515 darwin_file_start ();
33516
33517 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33518
33519 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33520 cpu_id = rs6000_default_cpu;
33521
33522 if (global_options_set.x_rs6000_cpu_index)
33523 cpu_id = processor_target_table[rs6000_cpu_index].name;
33524
33525 /* Look through the mapping array. Pick the first name that either
33526 matches the argument, has a bit set in IF_SET that is also set
33527 in the target flags, or has a NULL name. */
33528
33529 i = 0;
33530 while (mapping[i].arg != NULL
33531 && strcmp (mapping[i].arg, cpu_id) != 0
33532 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33533 i++;
33534
33535 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33536 }
33537
33538 #endif /* TARGET_MACHO */
33539
33540 #if TARGET_ELF
33541 static int
33542 rs6000_elf_reloc_rw_mask (void)
33543 {
33544 if (flag_pic)
33545 return 3;
33546 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33547 return 2;
33548 else
33549 return 0;
33550 }
33551
33552 /* Record an element in the table of global constructors. SYMBOL is
33553 a SYMBOL_REF of the function to be called; PRIORITY is a number
33554 between 0 and MAX_INIT_PRIORITY.
33555
33556 This differs from default_named_section_asm_out_constructor in
33557 that we have special handling for -mrelocatable. */
33558
33559 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33560 static void
33561 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33562 {
33563 const char *section = ".ctors";
33564 char buf[18];
33565
33566 if (priority != DEFAULT_INIT_PRIORITY)
33567 {
33568 sprintf (buf, ".ctors.%.5u",
33569 /* Invert the numbering so the linker puts us in the proper
33570 order; constructors are run from right to left, and the
33571 linker sorts in increasing order. */
33572 MAX_INIT_PRIORITY - priority);
33573 section = buf;
33574 }
33575
33576 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33577 assemble_align (POINTER_SIZE);
33578
33579 if (DEFAULT_ABI == ABI_V4
33580 && (TARGET_RELOCATABLE || flag_pic > 1))
33581 {
33582 fputs ("\t.long (", asm_out_file);
33583 output_addr_const (asm_out_file, symbol);
33584 fputs (")@fixup\n", asm_out_file);
33585 }
33586 else
33587 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33588 }
33589
33590 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33591 static void
33592 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33593 {
33594 const char *section = ".dtors";
33595 char buf[18];
33596
33597 if (priority != DEFAULT_INIT_PRIORITY)
33598 {
33599 sprintf (buf, ".dtors.%.5u",
33600 /* Invert the numbering so the linker puts us in the proper
33601 order; constructors are run from right to left, and the
33602 linker sorts in increasing order. */
33603 MAX_INIT_PRIORITY - priority);
33604 section = buf;
33605 }
33606
33607 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33608 assemble_align (POINTER_SIZE);
33609
33610 if (DEFAULT_ABI == ABI_V4
33611 && (TARGET_RELOCATABLE || flag_pic > 1))
33612 {
33613 fputs ("\t.long (", asm_out_file);
33614 output_addr_const (asm_out_file, symbol);
33615 fputs (")@fixup\n", asm_out_file);
33616 }
33617 else
33618 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33619 }
33620
33621 void
33622 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33623 {
33624 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33625 {
33626 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33627 ASM_OUTPUT_LABEL (file, name);
33628 fputs (DOUBLE_INT_ASM_OP, file);
33629 rs6000_output_function_entry (file, name);
33630 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33631 if (DOT_SYMBOLS)
33632 {
33633 fputs ("\t.size\t", file);
33634 assemble_name (file, name);
33635 fputs (",24\n\t.type\t.", file);
33636 assemble_name (file, name);
33637 fputs (",@function\n", file);
33638 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33639 {
33640 fputs ("\t.globl\t.", file);
33641 assemble_name (file, name);
33642 putc ('\n', file);
33643 }
33644 }
33645 else
33646 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33647 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33648 rs6000_output_function_entry (file, name);
33649 fputs (":\n", file);
33650 return;
33651 }
33652
33653 int uses_toc;
33654 if (DEFAULT_ABI == ABI_V4
33655 && (TARGET_RELOCATABLE || flag_pic > 1)
33656 && !TARGET_SECURE_PLT
33657 && (!constant_pool_empty_p () || crtl->profile)
33658 && (uses_toc = uses_TOC ()))
33659 {
33660 char buf[256];
33661
33662 if (uses_toc == 2)
33663 switch_to_other_text_partition ();
33664 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33665
33666 fprintf (file, "\t.long ");
33667 assemble_name (file, toc_label_name);
33668 need_toc_init = 1;
33669 putc ('-', file);
33670 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33671 assemble_name (file, buf);
33672 putc ('\n', file);
33673 if (uses_toc == 2)
33674 switch_to_other_text_partition ();
33675 }
33676
33677 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33678 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33679
33680 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33681 {
33682 char buf[256];
33683
33684 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33685
33686 fprintf (file, "\t.quad .TOC.-");
33687 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33688 assemble_name (file, buf);
33689 putc ('\n', file);
33690 }
33691
33692 if (DEFAULT_ABI == ABI_AIX)
33693 {
33694 const char *desc_name, *orig_name;
33695
33696 orig_name = (*targetm.strip_name_encoding) (name);
33697 desc_name = orig_name;
33698 while (*desc_name == '.')
33699 desc_name++;
33700
33701 if (TREE_PUBLIC (decl))
33702 fprintf (file, "\t.globl %s\n", desc_name);
33703
33704 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33705 fprintf (file, "%s:\n", desc_name);
33706 fprintf (file, "\t.long %s\n", orig_name);
33707 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33708 fputs ("\t.long 0\n", file);
33709 fprintf (file, "\t.previous\n");
33710 }
33711 ASM_OUTPUT_LABEL (file, name);
33712 }
33713
33714 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33715 static void
33716 rs6000_elf_file_end (void)
33717 {
33718 #ifdef HAVE_AS_GNU_ATTRIBUTE
33719 /* ??? The value emitted depends on options active at file end.
33720 Assume anyone using #pragma or attributes that might change
33721 options knows what they are doing. */
33722 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33723 && rs6000_passes_float)
33724 {
33725 int fp;
33726
33727 if (TARGET_HARD_FLOAT)
33728 fp = 1;
33729 else
33730 fp = 2;
33731 if (rs6000_passes_long_double)
33732 {
33733 if (!TARGET_LONG_DOUBLE_128)
33734 fp |= 2 * 4;
33735 else if (TARGET_IEEEQUAD)
33736 fp |= 3 * 4;
33737 else
33738 fp |= 1 * 4;
33739 }
33740 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33741 }
33742 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33743 {
33744 if (rs6000_passes_vector)
33745 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33746 (TARGET_ALTIVEC_ABI ? 2 : 1));
33747 if (rs6000_returns_struct)
33748 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33749 aix_struct_return ? 2 : 1);
33750 }
33751 #endif
33752 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33753 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33754 file_end_indicate_exec_stack ();
33755 #endif
33756
33757 if (flag_split_stack)
33758 file_end_indicate_split_stack ();
33759
33760 if (cpu_builtin_p)
33761 {
33762 /* We have expanded a CPU builtin, so we need to emit a reference to
33763 the special symbol that LIBC uses to declare it supports the
33764 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33765 switch_to_section (data_section);
33766 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33767 fprintf (asm_out_file, "\t%s %s\n",
33768 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33769 }
33770 }
33771 #endif
33772
33773 #if TARGET_XCOFF
33774
33775 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33776 #define HAVE_XCOFF_DWARF_EXTRAS 0
33777 #endif
33778
33779 static enum unwind_info_type
33780 rs6000_xcoff_debug_unwind_info (void)
33781 {
33782 return UI_NONE;
33783 }
33784
33785 static void
33786 rs6000_xcoff_asm_output_anchor (rtx symbol)
33787 {
33788 char buffer[100];
33789
33790 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33791 SYMBOL_REF_BLOCK_OFFSET (symbol));
33792 fprintf (asm_out_file, "%s", SET_ASM_OP);
33793 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33794 fprintf (asm_out_file, ",");
33795 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33796 fprintf (asm_out_file, "\n");
33797 }
33798
33799 static void
33800 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33801 {
33802 fputs (GLOBAL_ASM_OP, stream);
33803 RS6000_OUTPUT_BASENAME (stream, name);
33804 putc ('\n', stream);
33805 }
33806
33807 /* A get_unnamed_decl callback, used for read-only sections. PTR
33808 points to the section string variable. */
33809
33810 static void
33811 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33812 {
33813 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33814 *(const char *const *) directive,
33815 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33816 }
33817
33818 /* Likewise for read-write sections. */
33819
33820 static void
33821 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33822 {
33823 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33824 *(const char *const *) directive,
33825 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33826 }
33827
33828 static void
33829 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33830 {
33831 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33832 *(const char *const *) directive,
33833 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33834 }
33835
33836 /* A get_unnamed_section callback, used for switching to toc_section. */
33837
33838 static void
33839 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33840 {
33841 if (TARGET_MINIMAL_TOC)
33842 {
33843 /* toc_section is always selected at least once from
33844 rs6000_xcoff_file_start, so this is guaranteed to
33845 always be defined once and only once in each file. */
33846 if (!toc_initialized)
33847 {
33848 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33849 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33850 toc_initialized = 1;
33851 }
33852 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33853 (TARGET_32BIT ? "" : ",3"));
33854 }
33855 else
33856 fputs ("\t.toc\n", asm_out_file);
33857 }
33858
33859 /* Implement TARGET_ASM_INIT_SECTIONS. */
33860
33861 static void
33862 rs6000_xcoff_asm_init_sections (void)
33863 {
33864 read_only_data_section
33865 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33866 &xcoff_read_only_section_name);
33867
33868 private_data_section
33869 = get_unnamed_section (SECTION_WRITE,
33870 rs6000_xcoff_output_readwrite_section_asm_op,
33871 &xcoff_private_data_section_name);
33872
33873 read_only_private_data_section
33874 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33875 &xcoff_private_rodata_section_name);
33876
33877 tls_data_section
33878 = get_unnamed_section (SECTION_TLS,
33879 rs6000_xcoff_output_tls_section_asm_op,
33880 &xcoff_tls_data_section_name);
33881
33882 tls_private_data_section
33883 = get_unnamed_section (SECTION_TLS,
33884 rs6000_xcoff_output_tls_section_asm_op,
33885 &xcoff_private_data_section_name);
33886
33887 toc_section
33888 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33889
33890 readonly_data_section = read_only_data_section;
33891 }
33892
33893 static int
33894 rs6000_xcoff_reloc_rw_mask (void)
33895 {
33896 return 3;
33897 }
33898
33899 static void
33900 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33901 tree decl ATTRIBUTE_UNUSED)
33902 {
33903 int smclass;
33904 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33905
33906 if (flags & SECTION_EXCLUDE)
33907 smclass = 4;
33908 else if (flags & SECTION_DEBUG)
33909 {
33910 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33911 return;
33912 }
33913 else if (flags & SECTION_CODE)
33914 smclass = 0;
33915 else if (flags & SECTION_TLS)
33916 smclass = 3;
33917 else if (flags & SECTION_WRITE)
33918 smclass = 2;
33919 else
33920 smclass = 1;
33921
33922 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33923 (flags & SECTION_CODE) ? "." : "",
33924 name, suffix[smclass], flags & SECTION_ENTSIZE);
33925 }
33926
33927 #define IN_NAMED_SECTION(DECL) \
33928 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33929 && DECL_SECTION_NAME (DECL) != NULL)
33930
33931 static section *
33932 rs6000_xcoff_select_section (tree decl, int reloc,
33933 unsigned HOST_WIDE_INT align)
33934 {
33935 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33936 named section. */
33937 if (align > BIGGEST_ALIGNMENT)
33938 {
33939 resolve_unique_section (decl, reloc, true);
33940 if (IN_NAMED_SECTION (decl))
33941 return get_named_section (decl, NULL, reloc);
33942 }
33943
33944 if (decl_readonly_section (decl, reloc))
33945 {
33946 if (TREE_PUBLIC (decl))
33947 return read_only_data_section;
33948 else
33949 return read_only_private_data_section;
33950 }
33951 else
33952 {
33953 #if HAVE_AS_TLS
33954 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33955 {
33956 if (TREE_PUBLIC (decl))
33957 return tls_data_section;
33958 else if (bss_initializer_p (decl))
33959 {
33960 /* Convert to COMMON to emit in BSS. */
33961 DECL_COMMON (decl) = 1;
33962 return tls_comm_section;
33963 }
33964 else
33965 return tls_private_data_section;
33966 }
33967 else
33968 #endif
33969 if (TREE_PUBLIC (decl))
33970 return data_section;
33971 else
33972 return private_data_section;
33973 }
33974 }
33975
33976 static void
33977 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33978 {
33979 const char *name;
33980
33981 /* Use select_section for private data and uninitialized data with
33982 alignment <= BIGGEST_ALIGNMENT. */
33983 if (!TREE_PUBLIC (decl)
33984 || DECL_COMMON (decl)
33985 || (DECL_INITIAL (decl) == NULL_TREE
33986 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33987 || DECL_INITIAL (decl) == error_mark_node
33988 || (flag_zero_initialized_in_bss
33989 && initializer_zerop (DECL_INITIAL (decl))))
33990 return;
33991
33992 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33993 name = (*targetm.strip_name_encoding) (name);
33994 set_decl_section_name (decl, name);
33995 }
33996
33997 /* Select section for constant in constant pool.
33998
33999 On RS/6000, all constants are in the private read-only data area.
34000 However, if this is being placed in the TOC it must be output as a
34001 toc entry. */
34002
34003 static section *
34004 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
34005 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
34006 {
34007 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
34008 return toc_section;
34009 else
34010 return read_only_private_data_section;
34011 }
34012
34013 /* Remove any trailing [DS] or the like from the symbol name. */
34014
34015 static const char *
34016 rs6000_xcoff_strip_name_encoding (const char *name)
34017 {
34018 size_t len;
34019 if (*name == '*')
34020 name++;
34021 len = strlen (name);
34022 if (name[len - 1] == ']')
34023 return ggc_alloc_string (name, len - 4);
34024 else
34025 return name;
34026 }
34027
34028 /* Section attributes. AIX is always PIC. */
34029
34030 static unsigned int
34031 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
34032 {
34033 unsigned int align;
34034 unsigned int flags = default_section_type_flags (decl, name, reloc);
34035
34036 /* Align to at least UNIT size. */
34037 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
34038 align = MIN_UNITS_PER_WORD;
34039 else
34040 /* Increase alignment of large objects if not already stricter. */
34041 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
34042 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
34043 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
34044
34045 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
34046 }
34047
34048 /* Output at beginning of assembler file.
34049
34050 Initialize the section names for the RS/6000 at this point.
34051
34052 Specify filename, including full path, to assembler.
34053
34054 We want to go into the TOC section so at least one .toc will be emitted.
34055 Also, in order to output proper .bs/.es pairs, we need at least one static
34056 [RW] section emitted.
34057
34058 Finally, declare mcount when profiling to make the assembler happy. */
34059
34060 static void
34061 rs6000_xcoff_file_start (void)
34062 {
34063 rs6000_gen_section_name (&xcoff_bss_section_name,
34064 main_input_filename, ".bss_");
34065 rs6000_gen_section_name (&xcoff_private_data_section_name,
34066 main_input_filename, ".rw_");
34067 rs6000_gen_section_name (&xcoff_private_rodata_section_name,
34068 main_input_filename, ".rop_");
34069 rs6000_gen_section_name (&xcoff_read_only_section_name,
34070 main_input_filename, ".ro_");
34071 rs6000_gen_section_name (&xcoff_tls_data_section_name,
34072 main_input_filename, ".tls_");
34073 rs6000_gen_section_name (&xcoff_tbss_section_name,
34074 main_input_filename, ".tbss_[UL]");
34075
34076 fputs ("\t.file\t", asm_out_file);
34077 output_quoted_string (asm_out_file, main_input_filename);
34078 fputc ('\n', asm_out_file);
34079 if (write_symbols != NO_DEBUG)
34080 switch_to_section (private_data_section);
34081 switch_to_section (toc_section);
34082 switch_to_section (text_section);
34083 if (profile_flag)
34084 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
34085 rs6000_file_start ();
34086 }
34087
34088 /* Output at end of assembler file.
34089 On the RS/6000, referencing data should automatically pull in text. */
34090
34091 static void
34092 rs6000_xcoff_file_end (void)
34093 {
34094 switch_to_section (text_section);
34095 fputs ("_section_.text:\n", asm_out_file);
34096 switch_to_section (data_section);
34097 fputs (TARGET_32BIT
34098 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34099 asm_out_file);
34100 }
34101
34102 struct declare_alias_data
34103 {
34104 FILE *file;
34105 bool function_descriptor;
34106 };
34107
34108 /* Declare alias N. A helper function for for_node_and_aliases. */
34109
34110 static bool
34111 rs6000_declare_alias (struct symtab_node *n, void *d)
34112 {
34113 struct declare_alias_data *data = (struct declare_alias_data *)d;
34114 /* Main symbol is output specially, because varasm machinery does part of
34115 the job for us - we do not need to declare .globl/lglobs and such. */
34116 if (!n->alias || n->weakref)
34117 return false;
34118
34119 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34120 return false;
34121
34122 /* Prevent assemble_alias from trying to use .set pseudo operation
34123 that does not behave as expected by the middle-end. */
34124 TREE_ASM_WRITTEN (n->decl) = true;
34125
34126 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34127 char *buffer = (char *) alloca (strlen (name) + 2);
34128 char *p;
34129 int dollar_inside = 0;
34130
34131 strcpy (buffer, name);
34132 p = strchr (buffer, '$');
34133 while (p) {
34134 *p = '_';
34135 dollar_inside++;
34136 p = strchr (p + 1, '$');
34137 }
34138 if (TREE_PUBLIC (n->decl))
34139 {
34140 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34141 {
34142 if (dollar_inside) {
34143 if (data->function_descriptor)
34144 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34145 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34146 }
34147 if (data->function_descriptor)
34148 {
34149 fputs ("\t.globl .", data->file);
34150 RS6000_OUTPUT_BASENAME (data->file, buffer);
34151 putc ('\n', data->file);
34152 }
34153 fputs ("\t.globl ", data->file);
34154 RS6000_OUTPUT_BASENAME (data->file, buffer);
34155 putc ('\n', data->file);
34156 }
34157 #ifdef ASM_WEAKEN_DECL
34158 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34159 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34160 #endif
34161 }
34162 else
34163 {
34164 if (dollar_inside)
34165 {
34166 if (data->function_descriptor)
34167 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34168 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34169 }
34170 if (data->function_descriptor)
34171 {
34172 fputs ("\t.lglobl .", data->file);
34173 RS6000_OUTPUT_BASENAME (data->file, buffer);
34174 putc ('\n', data->file);
34175 }
34176 fputs ("\t.lglobl ", data->file);
34177 RS6000_OUTPUT_BASENAME (data->file, buffer);
34178 putc ('\n', data->file);
34179 }
34180 if (data->function_descriptor)
34181 fputs (".", data->file);
34182 RS6000_OUTPUT_BASENAME (data->file, buffer);
34183 fputs (":\n", data->file);
34184 return false;
34185 }
34186
34187
34188 #ifdef HAVE_GAS_HIDDEN
34189 /* Helper function to calculate visibility of a DECL
34190 and return the value as a const string. */
34191
34192 static const char *
34193 rs6000_xcoff_visibility (tree decl)
34194 {
34195 static const char * const visibility_types[] = {
34196 "", ",protected", ",hidden", ",internal"
34197 };
34198
34199 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34200 return visibility_types[vis];
34201 }
34202 #endif
34203
34204
34205 /* This macro produces the initial definition of a function name.
34206 On the RS/6000, we need to place an extra '.' in the function name and
34207 output the function descriptor.
34208 Dollar signs are converted to underscores.
34209
34210 The csect for the function will have already been created when
34211 text_section was selected. We do have to go back to that csect, however.
34212
34213 The third and fourth parameters to the .function pseudo-op (16 and 044)
34214 are placeholders which no longer have any use.
34215
34216 Because AIX assembler's .set command has unexpected semantics, we output
34217 all aliases as alternative labels in front of the definition. */
34218
34219 void
34220 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34221 {
34222 char *buffer = (char *) alloca (strlen (name) + 1);
34223 char *p;
34224 int dollar_inside = 0;
34225 struct declare_alias_data data = {file, false};
34226
34227 strcpy (buffer, name);
34228 p = strchr (buffer, '$');
34229 while (p) {
34230 *p = '_';
34231 dollar_inside++;
34232 p = strchr (p + 1, '$');
34233 }
34234 if (TREE_PUBLIC (decl))
34235 {
34236 if (!RS6000_WEAK || !DECL_WEAK (decl))
34237 {
34238 if (dollar_inside) {
34239 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34240 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34241 }
34242 fputs ("\t.globl .", file);
34243 RS6000_OUTPUT_BASENAME (file, buffer);
34244 #ifdef HAVE_GAS_HIDDEN
34245 fputs (rs6000_xcoff_visibility (decl), file);
34246 #endif
34247 putc ('\n', file);
34248 }
34249 }
34250 else
34251 {
34252 if (dollar_inside) {
34253 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34254 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34255 }
34256 fputs ("\t.lglobl .", file);
34257 RS6000_OUTPUT_BASENAME (file, buffer);
34258 putc ('\n', file);
34259 }
34260 fputs ("\t.csect ", file);
34261 RS6000_OUTPUT_BASENAME (file, buffer);
34262 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34263 RS6000_OUTPUT_BASENAME (file, buffer);
34264 fputs (":\n", file);
34265 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34266 &data, true);
34267 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34268 RS6000_OUTPUT_BASENAME (file, buffer);
34269 fputs (", TOC[tc0], 0\n", file);
34270 in_section = NULL;
34271 switch_to_section (function_section (decl));
34272 putc ('.', file);
34273 RS6000_OUTPUT_BASENAME (file, buffer);
34274 fputs (":\n", file);
34275 data.function_descriptor = true;
34276 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34277 &data, true);
34278 if (!DECL_IGNORED_P (decl))
34279 {
34280 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34281 xcoffout_declare_function (file, decl, buffer);
34282 else if (write_symbols == DWARF2_DEBUG)
34283 {
34284 name = (*targetm.strip_name_encoding) (name);
34285 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34286 }
34287 }
34288 return;
34289 }
34290
34291
34292 /* Output assembly language to globalize a symbol from a DECL,
34293 possibly with visibility. */
34294
34295 void
34296 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34297 {
34298 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34299 fputs (GLOBAL_ASM_OP, stream);
34300 RS6000_OUTPUT_BASENAME (stream, name);
34301 #ifdef HAVE_GAS_HIDDEN
34302 fputs (rs6000_xcoff_visibility (decl), stream);
34303 #endif
34304 putc ('\n', stream);
34305 }
34306
34307 /* Output assembly language to define a symbol as COMMON from a DECL,
34308 possibly with visibility. */
34309
34310 void
34311 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34312 tree decl ATTRIBUTE_UNUSED,
34313 const char *name,
34314 unsigned HOST_WIDE_INT size,
34315 unsigned HOST_WIDE_INT align)
34316 {
34317 unsigned HOST_WIDE_INT align2 = 2;
34318
34319 if (align > 32)
34320 align2 = floor_log2 (align / BITS_PER_UNIT);
34321 else if (size > 4)
34322 align2 = 3;
34323
34324 fputs (COMMON_ASM_OP, stream);
34325 RS6000_OUTPUT_BASENAME (stream, name);
34326
34327 fprintf (stream,
34328 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34329 size, align2);
34330
34331 #ifdef HAVE_GAS_HIDDEN
34332 if (decl != NULL)
34333 fputs (rs6000_xcoff_visibility (decl), stream);
34334 #endif
34335 putc ('\n', stream);
34336 }
34337
34338 /* This macro produces the initial definition of a object (variable) name.
34339 Because AIX assembler's .set command has unexpected semantics, we output
34340 all aliases as alternative labels in front of the definition. */
34341
34342 void
34343 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34344 {
34345 struct declare_alias_data data = {file, false};
34346 RS6000_OUTPUT_BASENAME (file, name);
34347 fputs (":\n", file);
34348 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34349 &data, true);
34350 }
34351
34352 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34353
34354 void
34355 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34356 {
34357 fputs (integer_asm_op (size, FALSE), file);
34358 assemble_name (file, label);
34359 fputs ("-$", file);
34360 }
34361
34362 /* Output a symbol offset relative to the dbase for the current object.
34363 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34364 signed offsets.
34365
34366 __gcc_unwind_dbase is embedded in all executables/libraries through
34367 libgcc/config/rs6000/crtdbase.S. */
34368
34369 void
34370 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34371 {
34372 fputs (integer_asm_op (size, FALSE), file);
34373 assemble_name (file, label);
34374 fputs("-__gcc_unwind_dbase", file);
34375 }
34376
34377 #ifdef HAVE_AS_TLS
34378 static void
34379 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34380 {
34381 rtx symbol;
34382 int flags;
34383 const char *symname;
34384
34385 default_encode_section_info (decl, rtl, first);
34386
34387 /* Careful not to prod global register variables. */
34388 if (!MEM_P (rtl))
34389 return;
34390 symbol = XEXP (rtl, 0);
34391 if (!SYMBOL_REF_P (symbol))
34392 return;
34393
34394 flags = SYMBOL_REF_FLAGS (symbol);
34395
34396 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34397 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34398
34399 SYMBOL_REF_FLAGS (symbol) = flags;
34400
34401 /* Append mapping class to extern decls. */
34402 symname = XSTR (symbol, 0);
34403 if (decl /* sync condition with assemble_external () */
34404 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34405 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34406 || TREE_CODE (decl) == FUNCTION_DECL)
34407 && symname[strlen (symname) - 1] != ']')
34408 {
34409 char *newname = (char *) alloca (strlen (symname) + 5);
34410 strcpy (newname, symname);
34411 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34412 ? "[DS]" : "[UA]"));
34413 XSTR (symbol, 0) = ggc_strdup (newname);
34414 }
34415 }
34416 #endif /* HAVE_AS_TLS */
34417 #endif /* TARGET_XCOFF */
34418
34419 void
34420 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34421 const char *name, const char *val)
34422 {
34423 fputs ("\t.weak\t", stream);
34424 RS6000_OUTPUT_BASENAME (stream, name);
34425 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34426 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34427 {
34428 if (TARGET_XCOFF)
34429 fputs ("[DS]", stream);
34430 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34431 if (TARGET_XCOFF)
34432 fputs (rs6000_xcoff_visibility (decl), stream);
34433 #endif
34434 fputs ("\n\t.weak\t.", stream);
34435 RS6000_OUTPUT_BASENAME (stream, name);
34436 }
34437 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34438 if (TARGET_XCOFF)
34439 fputs (rs6000_xcoff_visibility (decl), stream);
34440 #endif
34441 fputc ('\n', stream);
34442 if (val)
34443 {
34444 #ifdef ASM_OUTPUT_DEF
34445 ASM_OUTPUT_DEF (stream, name, val);
34446 #endif
34447 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34448 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34449 {
34450 fputs ("\t.set\t.", stream);
34451 RS6000_OUTPUT_BASENAME (stream, name);
34452 fputs (",.", stream);
34453 RS6000_OUTPUT_BASENAME (stream, val);
34454 fputc ('\n', stream);
34455 }
34456 }
34457 }
34458
34459
34460 /* Return true if INSN should not be copied. */
34461
34462 static bool
34463 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34464 {
34465 return recog_memoized (insn) >= 0
34466 && get_attr_cannot_copy (insn);
34467 }
34468
34469 /* Compute a (partial) cost for rtx X. Return true if the complete
34470 cost has been computed, and false if subexpressions should be
34471 scanned. In either case, *TOTAL contains the cost result. */
34472
34473 static bool
34474 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34475 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34476 {
34477 int code = GET_CODE (x);
34478
34479 switch (code)
34480 {
34481 /* On the RS/6000, if it is valid in the insn, it is free. */
34482 case CONST_INT:
34483 if (((outer_code == SET
34484 || outer_code == PLUS
34485 || outer_code == MINUS)
34486 && (satisfies_constraint_I (x)
34487 || satisfies_constraint_L (x)))
34488 || (outer_code == AND
34489 && (satisfies_constraint_K (x)
34490 || (mode == SImode
34491 ? satisfies_constraint_L (x)
34492 : satisfies_constraint_J (x))))
34493 || ((outer_code == IOR || outer_code == XOR)
34494 && (satisfies_constraint_K (x)
34495 || (mode == SImode
34496 ? satisfies_constraint_L (x)
34497 : satisfies_constraint_J (x))))
34498 || outer_code == ASHIFT
34499 || outer_code == ASHIFTRT
34500 || outer_code == LSHIFTRT
34501 || outer_code == ROTATE
34502 || outer_code == ROTATERT
34503 || outer_code == ZERO_EXTRACT
34504 || (outer_code == MULT
34505 && satisfies_constraint_I (x))
34506 || ((outer_code == DIV || outer_code == UDIV
34507 || outer_code == MOD || outer_code == UMOD)
34508 && exact_log2 (INTVAL (x)) >= 0)
34509 || (outer_code == COMPARE
34510 && (satisfies_constraint_I (x)
34511 || satisfies_constraint_K (x)))
34512 || ((outer_code == EQ || outer_code == NE)
34513 && (satisfies_constraint_I (x)
34514 || satisfies_constraint_K (x)
34515 || (mode == SImode
34516 ? satisfies_constraint_L (x)
34517 : satisfies_constraint_J (x))))
34518 || (outer_code == GTU
34519 && satisfies_constraint_I (x))
34520 || (outer_code == LTU
34521 && satisfies_constraint_P (x)))
34522 {
34523 *total = 0;
34524 return true;
34525 }
34526 else if ((outer_code == PLUS
34527 && reg_or_add_cint_operand (x, VOIDmode))
34528 || (outer_code == MINUS
34529 && reg_or_sub_cint_operand (x, VOIDmode))
34530 || ((outer_code == SET
34531 || outer_code == IOR
34532 || outer_code == XOR)
34533 && (INTVAL (x)
34534 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34535 {
34536 *total = COSTS_N_INSNS (1);
34537 return true;
34538 }
34539 /* FALLTHRU */
34540
34541 case CONST_DOUBLE:
34542 case CONST_WIDE_INT:
34543 case CONST:
34544 case HIGH:
34545 case SYMBOL_REF:
34546 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34547 return true;
34548
34549 case MEM:
34550 /* When optimizing for size, MEM should be slightly more expensive
34551 than generating address, e.g., (plus (reg) (const)).
34552 L1 cache latency is about two instructions. */
34553 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34554 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34555 *total += COSTS_N_INSNS (100);
34556 return true;
34557
34558 case LABEL_REF:
34559 *total = 0;
34560 return true;
34561
34562 case PLUS:
34563 case MINUS:
34564 if (FLOAT_MODE_P (mode))
34565 *total = rs6000_cost->fp;
34566 else
34567 *total = COSTS_N_INSNS (1);
34568 return false;
34569
34570 case MULT:
34571 if (CONST_INT_P (XEXP (x, 1))
34572 && satisfies_constraint_I (XEXP (x, 1)))
34573 {
34574 if (INTVAL (XEXP (x, 1)) >= -256
34575 && INTVAL (XEXP (x, 1)) <= 255)
34576 *total = rs6000_cost->mulsi_const9;
34577 else
34578 *total = rs6000_cost->mulsi_const;
34579 }
34580 else if (mode == SFmode)
34581 *total = rs6000_cost->fp;
34582 else if (FLOAT_MODE_P (mode))
34583 *total = rs6000_cost->dmul;
34584 else if (mode == DImode)
34585 *total = rs6000_cost->muldi;
34586 else
34587 *total = rs6000_cost->mulsi;
34588 return false;
34589
34590 case FMA:
34591 if (mode == SFmode)
34592 *total = rs6000_cost->fp;
34593 else
34594 *total = rs6000_cost->dmul;
34595 break;
34596
34597 case DIV:
34598 case MOD:
34599 if (FLOAT_MODE_P (mode))
34600 {
34601 *total = mode == DFmode ? rs6000_cost->ddiv
34602 : rs6000_cost->sdiv;
34603 return false;
34604 }
34605 /* FALLTHRU */
34606
34607 case UDIV:
34608 case UMOD:
34609 if (CONST_INT_P (XEXP (x, 1))
34610 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34611 {
34612 if (code == DIV || code == MOD)
34613 /* Shift, addze */
34614 *total = COSTS_N_INSNS (2);
34615 else
34616 /* Shift */
34617 *total = COSTS_N_INSNS (1);
34618 }
34619 else
34620 {
34621 if (GET_MODE (XEXP (x, 1)) == DImode)
34622 *total = rs6000_cost->divdi;
34623 else
34624 *total = rs6000_cost->divsi;
34625 }
34626 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34627 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34628 *total += COSTS_N_INSNS (2);
34629 return false;
34630
34631 case CTZ:
34632 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34633 return false;
34634
34635 case FFS:
34636 *total = COSTS_N_INSNS (4);
34637 return false;
34638
34639 case POPCOUNT:
34640 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34641 return false;
34642
34643 case PARITY:
34644 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34645 return false;
34646
34647 case NOT:
34648 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34649 *total = 0;
34650 else
34651 *total = COSTS_N_INSNS (1);
34652 return false;
34653
34654 case AND:
34655 if (CONST_INT_P (XEXP (x, 1)))
34656 {
34657 rtx left = XEXP (x, 0);
34658 rtx_code left_code = GET_CODE (left);
34659
34660 /* rotate-and-mask: 1 insn. */
34661 if ((left_code == ROTATE
34662 || left_code == ASHIFT
34663 || left_code == LSHIFTRT)
34664 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34665 {
34666 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34667 if (!CONST_INT_P (XEXP (left, 1)))
34668 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34669 *total += COSTS_N_INSNS (1);
34670 return true;
34671 }
34672
34673 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34674 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34675 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34676 || (val & 0xffff) == val
34677 || (val & 0xffff0000) == val
34678 || ((val & 0xffff) == 0 && mode == SImode))
34679 {
34680 *total = rtx_cost (left, mode, AND, 0, speed);
34681 *total += COSTS_N_INSNS (1);
34682 return true;
34683 }
34684
34685 /* 2 insns. */
34686 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34687 {
34688 *total = rtx_cost (left, mode, AND, 0, speed);
34689 *total += COSTS_N_INSNS (2);
34690 return true;
34691 }
34692 }
34693
34694 *total = COSTS_N_INSNS (1);
34695 return false;
34696
34697 case IOR:
34698 /* FIXME */
34699 *total = COSTS_N_INSNS (1);
34700 return true;
34701
34702 case CLZ:
34703 case XOR:
34704 case ZERO_EXTRACT:
34705 *total = COSTS_N_INSNS (1);
34706 return false;
34707
34708 case ASHIFT:
34709 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34710 the sign extend and shift separately within the insn. */
34711 if (TARGET_EXTSWSLI && mode == DImode
34712 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34713 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34714 {
34715 *total = 0;
34716 return false;
34717 }
34718 /* fall through */
34719
34720 case ASHIFTRT:
34721 case LSHIFTRT:
34722 case ROTATE:
34723 case ROTATERT:
34724 /* Handle mul_highpart. */
34725 if (outer_code == TRUNCATE
34726 && GET_CODE (XEXP (x, 0)) == MULT)
34727 {
34728 if (mode == DImode)
34729 *total = rs6000_cost->muldi;
34730 else
34731 *total = rs6000_cost->mulsi;
34732 return true;
34733 }
34734 else if (outer_code == AND)
34735 *total = 0;
34736 else
34737 *total = COSTS_N_INSNS (1);
34738 return false;
34739
34740 case SIGN_EXTEND:
34741 case ZERO_EXTEND:
34742 if (MEM_P (XEXP (x, 0)))
34743 *total = 0;
34744 else
34745 *total = COSTS_N_INSNS (1);
34746 return false;
34747
34748 case COMPARE:
34749 case NEG:
34750 case ABS:
34751 if (!FLOAT_MODE_P (mode))
34752 {
34753 *total = COSTS_N_INSNS (1);
34754 return false;
34755 }
34756 /* FALLTHRU */
34757
34758 case FLOAT:
34759 case UNSIGNED_FLOAT:
34760 case FIX:
34761 case UNSIGNED_FIX:
34762 case FLOAT_TRUNCATE:
34763 *total = rs6000_cost->fp;
34764 return false;
34765
34766 case FLOAT_EXTEND:
34767 if (mode == DFmode)
34768 *total = rs6000_cost->sfdf_convert;
34769 else
34770 *total = rs6000_cost->fp;
34771 return false;
34772
34773 case UNSPEC:
34774 switch (XINT (x, 1))
34775 {
34776 case UNSPEC_FRSP:
34777 *total = rs6000_cost->fp;
34778 return true;
34779
34780 default:
34781 break;
34782 }
34783 break;
34784
34785 case CALL:
34786 case IF_THEN_ELSE:
34787 if (!speed)
34788 {
34789 *total = COSTS_N_INSNS (1);
34790 return true;
34791 }
34792 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34793 {
34794 *total = rs6000_cost->fp;
34795 return false;
34796 }
34797 break;
34798
34799 case NE:
34800 case EQ:
34801 case GTU:
34802 case LTU:
34803 /* Carry bit requires mode == Pmode.
34804 NEG or PLUS already counted so only add one. */
34805 if (mode == Pmode
34806 && (outer_code == NEG || outer_code == PLUS))
34807 {
34808 *total = COSTS_N_INSNS (1);
34809 return true;
34810 }
34811 /* FALLTHRU */
34812
34813 case GT:
34814 case LT:
34815 case UNORDERED:
34816 if (outer_code == SET)
34817 {
34818 if (XEXP (x, 1) == const0_rtx)
34819 {
34820 *total = COSTS_N_INSNS (2);
34821 return true;
34822 }
34823 else
34824 {
34825 *total = COSTS_N_INSNS (3);
34826 return false;
34827 }
34828 }
34829 /* CC COMPARE. */
34830 if (outer_code == COMPARE)
34831 {
34832 *total = 0;
34833 return true;
34834 }
34835 break;
34836
34837 default:
34838 break;
34839 }
34840
34841 return false;
34842 }
34843
34844 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34845
34846 static bool
34847 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34848 int opno, int *total, bool speed)
34849 {
34850 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34851
34852 fprintf (stderr,
34853 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34854 "opno = %d, total = %d, speed = %s, x:\n",
34855 ret ? "complete" : "scan inner",
34856 GET_MODE_NAME (mode),
34857 GET_RTX_NAME (outer_code),
34858 opno,
34859 *total,
34860 speed ? "true" : "false");
34861
34862 debug_rtx (x);
34863
34864 return ret;
34865 }
34866
34867 static int
34868 rs6000_insn_cost (rtx_insn *insn, bool speed)
34869 {
34870 if (recog_memoized (insn) < 0)
34871 return 0;
34872
34873 if (!speed)
34874 return get_attr_length (insn);
34875
34876 int cost = get_attr_cost (insn);
34877 if (cost > 0)
34878 return cost;
34879
34880 int n = get_attr_length (insn) / 4;
34881 enum attr_type type = get_attr_type (insn);
34882
34883 switch (type)
34884 {
34885 case TYPE_LOAD:
34886 case TYPE_FPLOAD:
34887 case TYPE_VECLOAD:
34888 cost = COSTS_N_INSNS (n + 1);
34889 break;
34890
34891 case TYPE_MUL:
34892 switch (get_attr_size (insn))
34893 {
34894 case SIZE_8:
34895 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34896 break;
34897 case SIZE_16:
34898 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34899 break;
34900 case SIZE_32:
34901 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34902 break;
34903 case SIZE_64:
34904 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34905 break;
34906 default:
34907 gcc_unreachable ();
34908 }
34909 break;
34910 case TYPE_DIV:
34911 switch (get_attr_size (insn))
34912 {
34913 case SIZE_32:
34914 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34915 break;
34916 case SIZE_64:
34917 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34918 break;
34919 default:
34920 gcc_unreachable ();
34921 }
34922 break;
34923
34924 case TYPE_FP:
34925 cost = n * rs6000_cost->fp;
34926 break;
34927 case TYPE_DMUL:
34928 cost = n * rs6000_cost->dmul;
34929 break;
34930 case TYPE_SDIV:
34931 cost = n * rs6000_cost->sdiv;
34932 break;
34933 case TYPE_DDIV:
34934 cost = n * rs6000_cost->ddiv;
34935 break;
34936
34937 case TYPE_SYNC:
34938 case TYPE_LOAD_L:
34939 case TYPE_MFCR:
34940 case TYPE_MFCRF:
34941 cost = COSTS_N_INSNS (n + 2);
34942 break;
34943
34944 default:
34945 cost = COSTS_N_INSNS (n);
34946 }
34947
34948 return cost;
34949 }
34950
34951 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34952
34953 static int
34954 rs6000_debug_address_cost (rtx x, machine_mode mode,
34955 addr_space_t as, bool speed)
34956 {
34957 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34958
34959 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34960 ret, speed ? "true" : "false");
34961 debug_rtx (x);
34962
34963 return ret;
34964 }
34965
34966
34967 /* A C expression returning the cost of moving data from a register of class
34968 CLASS1 to one of CLASS2. */
34969
34970 static int
34971 rs6000_register_move_cost (machine_mode mode,
34972 reg_class_t from, reg_class_t to)
34973 {
34974 int ret;
34975
34976 if (TARGET_DEBUG_COST)
34977 dbg_cost_ctrl++;
34978
34979 /* Moves from/to GENERAL_REGS. */
34980 if (reg_classes_intersect_p (to, GENERAL_REGS)
34981 || reg_classes_intersect_p (from, GENERAL_REGS))
34982 {
34983 reg_class_t rclass = from;
34984
34985 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34986 rclass = to;
34987
34988 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34989 ret = (rs6000_memory_move_cost (mode, rclass, false)
34990 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34991
34992 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34993 shift. */
34994 else if (rclass == CR_REGS)
34995 ret = 4;
34996
34997 /* For those processors that have slow LR/CTR moves, make them more
34998 expensive than memory in order to bias spills to memory .*/
34999 else if ((rs6000_tune == PROCESSOR_POWER6
35000 || rs6000_tune == PROCESSOR_POWER7
35001 || rs6000_tune == PROCESSOR_POWER8
35002 || rs6000_tune == PROCESSOR_POWER9)
35003 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
35004 ret = 6 * hard_regno_nregs (0, mode);
35005
35006 else
35007 /* A move will cost one instruction per GPR moved. */
35008 ret = 2 * hard_regno_nregs (0, mode);
35009 }
35010
35011 /* If we have VSX, we can easily move between FPR or Altivec registers. */
35012 else if (VECTOR_MEM_VSX_P (mode)
35013 && reg_classes_intersect_p (to, VSX_REGS)
35014 && reg_classes_intersect_p (from, VSX_REGS))
35015 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
35016
35017 /* Moving between two similar registers is just one instruction. */
35018 else if (reg_classes_intersect_p (to, from))
35019 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
35020
35021 /* Everything else has to go through GENERAL_REGS. */
35022 else
35023 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
35024 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
35025
35026 if (TARGET_DEBUG_COST)
35027 {
35028 if (dbg_cost_ctrl == 1)
35029 fprintf (stderr,
35030 "rs6000_register_move_cost: ret=%d, mode=%s, from=%s, to=%s\n",
35031 ret, GET_MODE_NAME (mode), reg_class_names[from],
35032 reg_class_names[to]);
35033 dbg_cost_ctrl--;
35034 }
35035
35036 return ret;
35037 }
35038
35039 /* A C expressions returning the cost of moving data of MODE from a register to
35040 or from memory. */
35041
35042 static int
35043 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
35044 bool in ATTRIBUTE_UNUSED)
35045 {
35046 int ret;
35047
35048 if (TARGET_DEBUG_COST)
35049 dbg_cost_ctrl++;
35050
35051 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
35052 ret = 4 * hard_regno_nregs (0, mode);
35053 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
35054 || reg_classes_intersect_p (rclass, VSX_REGS)))
35055 ret = 4 * hard_regno_nregs (32, mode);
35056 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
35057 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
35058 else
35059 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
35060
35061 if (TARGET_DEBUG_COST)
35062 {
35063 if (dbg_cost_ctrl == 1)
35064 fprintf (stderr,
35065 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35066 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
35067 dbg_cost_ctrl--;
35068 }
35069
35070 return ret;
35071 }
35072
35073 /* Returns a code for a target-specific builtin that implements
35074 reciprocal of the function, or NULL_TREE if not available. */
35075
35076 static tree
35077 rs6000_builtin_reciprocal (tree fndecl)
35078 {
35079 switch (DECL_FUNCTION_CODE (fndecl))
35080 {
35081 case VSX_BUILTIN_XVSQRTDP:
35082 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
35083 return NULL_TREE;
35084
35085 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
35086
35087 case VSX_BUILTIN_XVSQRTSP:
35088 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
35089 return NULL_TREE;
35090
35091 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
35092
35093 default:
35094 return NULL_TREE;
35095 }
35096 }
35097
35098 /* Load up a constant. If the mode is a vector mode, splat the value across
35099 all of the vector elements. */
35100
35101 static rtx
35102 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
35103 {
35104 rtx reg;
35105
35106 if (mode == SFmode || mode == DFmode)
35107 {
35108 rtx d = const_double_from_real_value (dconst, mode);
35109 reg = force_reg (mode, d);
35110 }
35111 else if (mode == V4SFmode)
35112 {
35113 rtx d = const_double_from_real_value (dconst, SFmode);
35114 rtvec v = gen_rtvec (4, d, d, d, d);
35115 reg = gen_reg_rtx (mode);
35116 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35117 }
35118 else if (mode == V2DFmode)
35119 {
35120 rtx d = const_double_from_real_value (dconst, DFmode);
35121 rtvec v = gen_rtvec (2, d, d);
35122 reg = gen_reg_rtx (mode);
35123 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35124 }
35125 else
35126 gcc_unreachable ();
35127
35128 return reg;
35129 }
35130
35131 /* Generate an FMA instruction. */
35132
35133 static void
35134 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35135 {
35136 machine_mode mode = GET_MODE (target);
35137 rtx dst;
35138
35139 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35140 gcc_assert (dst != NULL);
35141
35142 if (dst != target)
35143 emit_move_insn (target, dst);
35144 }
35145
35146 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35147
35148 static void
35149 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35150 {
35151 machine_mode mode = GET_MODE (dst);
35152 rtx r;
35153
35154 /* This is a tad more complicated, since the fnma_optab is for
35155 a different expression: fma(-m1, m2, a), which is the same
35156 thing except in the case of signed zeros.
35157
35158 Fortunately we know that if FMA is supported that FNMSUB is
35159 also supported in the ISA. Just expand it directly. */
35160
35161 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35162
35163 r = gen_rtx_NEG (mode, a);
35164 r = gen_rtx_FMA (mode, m1, m2, r);
35165 r = gen_rtx_NEG (mode, r);
35166 emit_insn (gen_rtx_SET (dst, r));
35167 }
35168
35169 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35170 add a reg_note saying that this was a division. Support both scalar and
35171 vector divide. Assumes no trapping math and finite arguments. */
35172
35173 void
35174 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35175 {
35176 machine_mode mode = GET_MODE (dst);
35177 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35178 int i;
35179
35180 /* Low precision estimates guarantee 5 bits of accuracy. High
35181 precision estimates guarantee 14 bits of accuracy. SFmode
35182 requires 23 bits of accuracy. DFmode requires 52 bits of
35183 accuracy. Each pass at least doubles the accuracy, leading
35184 to the following. */
35185 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35186 if (mode == DFmode || mode == V2DFmode)
35187 passes++;
35188
35189 enum insn_code code = optab_handler (smul_optab, mode);
35190 insn_gen_fn gen_mul = GEN_FCN (code);
35191
35192 gcc_assert (code != CODE_FOR_nothing);
35193
35194 one = rs6000_load_constant_and_splat (mode, dconst1);
35195
35196 /* x0 = 1./d estimate */
35197 x0 = gen_reg_rtx (mode);
35198 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35199 UNSPEC_FRES)));
35200
35201 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35202 if (passes > 1) {
35203
35204 /* e0 = 1. - d * x0 */
35205 e0 = gen_reg_rtx (mode);
35206 rs6000_emit_nmsub (e0, d, x0, one);
35207
35208 /* x1 = x0 + e0 * x0 */
35209 x1 = gen_reg_rtx (mode);
35210 rs6000_emit_madd (x1, e0, x0, x0);
35211
35212 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35213 ++i, xprev = xnext, eprev = enext) {
35214
35215 /* enext = eprev * eprev */
35216 enext = gen_reg_rtx (mode);
35217 emit_insn (gen_mul (enext, eprev, eprev));
35218
35219 /* xnext = xprev + enext * xprev */
35220 xnext = gen_reg_rtx (mode);
35221 rs6000_emit_madd (xnext, enext, xprev, xprev);
35222 }
35223
35224 } else
35225 xprev = x0;
35226
35227 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35228
35229 /* u = n * xprev */
35230 u = gen_reg_rtx (mode);
35231 emit_insn (gen_mul (u, n, xprev));
35232
35233 /* v = n - (d * u) */
35234 v = gen_reg_rtx (mode);
35235 rs6000_emit_nmsub (v, d, u, n);
35236
35237 /* dst = (v * xprev) + u */
35238 rs6000_emit_madd (dst, v, xprev, u);
35239
35240 if (note_p)
35241 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35242 }
35243
35244 /* Goldschmidt's Algorithm for single/double-precision floating point
35245 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35246
35247 void
35248 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35249 {
35250 machine_mode mode = GET_MODE (src);
35251 rtx e = gen_reg_rtx (mode);
35252 rtx g = gen_reg_rtx (mode);
35253 rtx h = gen_reg_rtx (mode);
35254
35255 /* Low precision estimates guarantee 5 bits of accuracy. High
35256 precision estimates guarantee 14 bits of accuracy. SFmode
35257 requires 23 bits of accuracy. DFmode requires 52 bits of
35258 accuracy. Each pass at least doubles the accuracy, leading
35259 to the following. */
35260 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35261 if (mode == DFmode || mode == V2DFmode)
35262 passes++;
35263
35264 int i;
35265 rtx mhalf;
35266 enum insn_code code = optab_handler (smul_optab, mode);
35267 insn_gen_fn gen_mul = GEN_FCN (code);
35268
35269 gcc_assert (code != CODE_FOR_nothing);
35270
35271 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35272
35273 /* e = rsqrt estimate */
35274 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35275 UNSPEC_RSQRT)));
35276
35277 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35278 if (!recip)
35279 {
35280 rtx zero = force_reg (mode, CONST0_RTX (mode));
35281
35282 if (mode == SFmode)
35283 {
35284 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35285 e, zero, mode, 0);
35286 if (target != e)
35287 emit_move_insn (e, target);
35288 }
35289 else
35290 {
35291 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35292 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35293 }
35294 }
35295
35296 /* g = sqrt estimate. */
35297 emit_insn (gen_mul (g, e, src));
35298 /* h = 1/(2*sqrt) estimate. */
35299 emit_insn (gen_mul (h, e, mhalf));
35300
35301 if (recip)
35302 {
35303 if (passes == 1)
35304 {
35305 rtx t = gen_reg_rtx (mode);
35306 rs6000_emit_nmsub (t, g, h, mhalf);
35307 /* Apply correction directly to 1/rsqrt estimate. */
35308 rs6000_emit_madd (dst, e, t, e);
35309 }
35310 else
35311 {
35312 for (i = 0; i < passes; i++)
35313 {
35314 rtx t1 = gen_reg_rtx (mode);
35315 rtx g1 = gen_reg_rtx (mode);
35316 rtx h1 = gen_reg_rtx (mode);
35317
35318 rs6000_emit_nmsub (t1, g, h, mhalf);
35319 rs6000_emit_madd (g1, g, t1, g);
35320 rs6000_emit_madd (h1, h, t1, h);
35321
35322 g = g1;
35323 h = h1;
35324 }
35325 /* Multiply by 2 for 1/rsqrt. */
35326 emit_insn (gen_add3_insn (dst, h, h));
35327 }
35328 }
35329 else
35330 {
35331 rtx t = gen_reg_rtx (mode);
35332 rs6000_emit_nmsub (t, g, h, mhalf);
35333 rs6000_emit_madd (dst, g, t, g);
35334 }
35335
35336 return;
35337 }
35338
35339 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35340 (Power7) targets. DST is the target, and SRC is the argument operand. */
35341
35342 void
35343 rs6000_emit_popcount (rtx dst, rtx src)
35344 {
35345 machine_mode mode = GET_MODE (dst);
35346 rtx tmp1, tmp2;
35347
35348 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35349 if (TARGET_POPCNTD)
35350 {
35351 if (mode == SImode)
35352 emit_insn (gen_popcntdsi2 (dst, src));
35353 else
35354 emit_insn (gen_popcntddi2 (dst, src));
35355 return;
35356 }
35357
35358 tmp1 = gen_reg_rtx (mode);
35359
35360 if (mode == SImode)
35361 {
35362 emit_insn (gen_popcntbsi2 (tmp1, src));
35363 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35364 NULL_RTX, 0);
35365 tmp2 = force_reg (SImode, tmp2);
35366 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35367 }
35368 else
35369 {
35370 emit_insn (gen_popcntbdi2 (tmp1, src));
35371 tmp2 = expand_mult (DImode, tmp1,
35372 GEN_INT ((HOST_WIDE_INT)
35373 0x01010101 << 32 | 0x01010101),
35374 NULL_RTX, 0);
35375 tmp2 = force_reg (DImode, tmp2);
35376 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35377 }
35378 }
35379
35380
35381 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35382 target, and SRC is the argument operand. */
35383
35384 void
35385 rs6000_emit_parity (rtx dst, rtx src)
35386 {
35387 machine_mode mode = GET_MODE (dst);
35388 rtx tmp;
35389
35390 tmp = gen_reg_rtx (mode);
35391
35392 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35393 if (TARGET_CMPB)
35394 {
35395 if (mode == SImode)
35396 {
35397 emit_insn (gen_popcntbsi2 (tmp, src));
35398 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35399 }
35400 else
35401 {
35402 emit_insn (gen_popcntbdi2 (tmp, src));
35403 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35404 }
35405 return;
35406 }
35407
35408 if (mode == SImode)
35409 {
35410 /* Is mult+shift >= shift+xor+shift+xor? */
35411 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35412 {
35413 rtx tmp1, tmp2, tmp3, tmp4;
35414
35415 tmp1 = gen_reg_rtx (SImode);
35416 emit_insn (gen_popcntbsi2 (tmp1, src));
35417
35418 tmp2 = gen_reg_rtx (SImode);
35419 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35420 tmp3 = gen_reg_rtx (SImode);
35421 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35422
35423 tmp4 = gen_reg_rtx (SImode);
35424 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35425 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35426 }
35427 else
35428 rs6000_emit_popcount (tmp, src);
35429 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35430 }
35431 else
35432 {
35433 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35434 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35435 {
35436 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35437
35438 tmp1 = gen_reg_rtx (DImode);
35439 emit_insn (gen_popcntbdi2 (tmp1, src));
35440
35441 tmp2 = gen_reg_rtx (DImode);
35442 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35443 tmp3 = gen_reg_rtx (DImode);
35444 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35445
35446 tmp4 = gen_reg_rtx (DImode);
35447 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35448 tmp5 = gen_reg_rtx (DImode);
35449 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35450
35451 tmp6 = gen_reg_rtx (DImode);
35452 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35453 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35454 }
35455 else
35456 rs6000_emit_popcount (tmp, src);
35457 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35458 }
35459 }
35460
35461 /* Expand an Altivec constant permutation for little endian mode.
35462 OP0 and OP1 are the input vectors and TARGET is the output vector.
35463 SEL specifies the constant permutation vector.
35464
35465 There are two issues: First, the two input operands must be
35466 swapped so that together they form a double-wide array in LE
35467 order. Second, the vperm instruction has surprising behavior
35468 in LE mode: it interprets the elements of the source vectors
35469 in BE mode ("left to right") and interprets the elements of
35470 the destination vector in LE mode ("right to left"). To
35471 correct for this, we must subtract each element of the permute
35472 control vector from 31.
35473
35474 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35475 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35476 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35477 serve as the permute control vector. Then, in BE mode,
35478
35479 vperm 9,10,11,12
35480
35481 places the desired result in vr9. However, in LE mode the
35482 vector contents will be
35483
35484 vr10 = 00000003 00000002 00000001 00000000
35485 vr11 = 00000007 00000006 00000005 00000004
35486
35487 The result of the vperm using the same permute control vector is
35488
35489 vr9 = 05000000 07000000 01000000 03000000
35490
35491 That is, the leftmost 4 bytes of vr10 are interpreted as the
35492 source for the rightmost 4 bytes of vr9, and so on.
35493
35494 If we change the permute control vector to
35495
35496 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35497
35498 and issue
35499
35500 vperm 9,11,10,12
35501
35502 we get the desired
35503
35504 vr9 = 00000006 00000004 00000002 00000000. */
35505
35506 static void
35507 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35508 const vec_perm_indices &sel)
35509 {
35510 unsigned int i;
35511 rtx perm[16];
35512 rtx constv, unspec;
35513
35514 /* Unpack and adjust the constant selector. */
35515 for (i = 0; i < 16; ++i)
35516 {
35517 unsigned int elt = 31 - (sel[i] & 31);
35518 perm[i] = GEN_INT (elt);
35519 }
35520
35521 /* Expand to a permute, swapping the inputs and using the
35522 adjusted selector. */
35523 if (!REG_P (op0))
35524 op0 = force_reg (V16QImode, op0);
35525 if (!REG_P (op1))
35526 op1 = force_reg (V16QImode, op1);
35527
35528 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35529 constv = force_reg (V16QImode, constv);
35530 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35531 UNSPEC_VPERM);
35532 if (!REG_P (target))
35533 {
35534 rtx tmp = gen_reg_rtx (V16QImode);
35535 emit_move_insn (tmp, unspec);
35536 unspec = tmp;
35537 }
35538
35539 emit_move_insn (target, unspec);
35540 }
35541
35542 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35543 permute control vector. But here it's not a constant, so we must
35544 generate a vector NAND or NOR to do the adjustment. */
35545
35546 void
35547 altivec_expand_vec_perm_le (rtx operands[4])
35548 {
35549 rtx notx, iorx, unspec;
35550 rtx target = operands[0];
35551 rtx op0 = operands[1];
35552 rtx op1 = operands[2];
35553 rtx sel = operands[3];
35554 rtx tmp = target;
35555 rtx norreg = gen_reg_rtx (V16QImode);
35556 machine_mode mode = GET_MODE (target);
35557
35558 /* Get everything in regs so the pattern matches. */
35559 if (!REG_P (op0))
35560 op0 = force_reg (mode, op0);
35561 if (!REG_P (op1))
35562 op1 = force_reg (mode, op1);
35563 if (!REG_P (sel))
35564 sel = force_reg (V16QImode, sel);
35565 if (!REG_P (target))
35566 tmp = gen_reg_rtx (mode);
35567
35568 if (TARGET_P9_VECTOR)
35569 {
35570 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35571 UNSPEC_VPERMR);
35572 }
35573 else
35574 {
35575 /* Invert the selector with a VNAND if available, else a VNOR.
35576 The VNAND is preferred for future fusion opportunities. */
35577 notx = gen_rtx_NOT (V16QImode, sel);
35578 iorx = (TARGET_P8_VECTOR
35579 ? gen_rtx_IOR (V16QImode, notx, notx)
35580 : gen_rtx_AND (V16QImode, notx, notx));
35581 emit_insn (gen_rtx_SET (norreg, iorx));
35582
35583 /* Permute with operands reversed and adjusted selector. */
35584 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35585 UNSPEC_VPERM);
35586 }
35587
35588 /* Copy into target, possibly by way of a register. */
35589 if (!REG_P (target))
35590 {
35591 emit_move_insn (tmp, unspec);
35592 unspec = tmp;
35593 }
35594
35595 emit_move_insn (target, unspec);
35596 }
35597
35598 /* Expand an Altivec constant permutation. Return true if we match
35599 an efficient implementation; false to fall back to VPERM.
35600
35601 OP0 and OP1 are the input vectors and TARGET is the output vector.
35602 SEL specifies the constant permutation vector. */
35603
35604 static bool
35605 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35606 const vec_perm_indices &sel)
35607 {
35608 struct altivec_perm_insn {
35609 HOST_WIDE_INT mask;
35610 enum insn_code impl;
35611 unsigned char perm[16];
35612 };
35613 static const struct altivec_perm_insn patterns[] = {
35614 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35615 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35616 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35617 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35618 { OPTION_MASK_ALTIVEC,
35619 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35620 : CODE_FOR_altivec_vmrglb_direct),
35621 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35622 { OPTION_MASK_ALTIVEC,
35623 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35624 : CODE_FOR_altivec_vmrglh_direct),
35625 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35626 { OPTION_MASK_ALTIVEC,
35627 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35628 : CODE_FOR_altivec_vmrglw_direct),
35629 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35630 { OPTION_MASK_ALTIVEC,
35631 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35632 : CODE_FOR_altivec_vmrghb_direct),
35633 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35634 { OPTION_MASK_ALTIVEC,
35635 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35636 : CODE_FOR_altivec_vmrghh_direct),
35637 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35638 { OPTION_MASK_ALTIVEC,
35639 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35640 : CODE_FOR_altivec_vmrghw_direct),
35641 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35642 { OPTION_MASK_P8_VECTOR,
35643 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35644 : CODE_FOR_p8_vmrgow_v4sf_direct),
35645 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35646 { OPTION_MASK_P8_VECTOR,
35647 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35648 : CODE_FOR_p8_vmrgew_v4sf_direct),
35649 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35650 };
35651
35652 unsigned int i, j, elt, which;
35653 unsigned char perm[16];
35654 rtx x;
35655 bool one_vec;
35656
35657 /* Unpack the constant selector. */
35658 for (i = which = 0; i < 16; ++i)
35659 {
35660 elt = sel[i] & 31;
35661 which |= (elt < 16 ? 1 : 2);
35662 perm[i] = elt;
35663 }
35664
35665 /* Simplify the constant selector based on operands. */
35666 switch (which)
35667 {
35668 default:
35669 gcc_unreachable ();
35670
35671 case 3:
35672 one_vec = false;
35673 if (!rtx_equal_p (op0, op1))
35674 break;
35675 /* FALLTHRU */
35676
35677 case 2:
35678 for (i = 0; i < 16; ++i)
35679 perm[i] &= 15;
35680 op0 = op1;
35681 one_vec = true;
35682 break;
35683
35684 case 1:
35685 op1 = op0;
35686 one_vec = true;
35687 break;
35688 }
35689
35690 /* Look for splat patterns. */
35691 if (one_vec)
35692 {
35693 elt = perm[0];
35694
35695 for (i = 0; i < 16; ++i)
35696 if (perm[i] != elt)
35697 break;
35698 if (i == 16)
35699 {
35700 if (!BYTES_BIG_ENDIAN)
35701 elt = 15 - elt;
35702 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35703 return true;
35704 }
35705
35706 if (elt % 2 == 0)
35707 {
35708 for (i = 0; i < 16; i += 2)
35709 if (perm[i] != elt || perm[i + 1] != elt + 1)
35710 break;
35711 if (i == 16)
35712 {
35713 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35714 x = gen_reg_rtx (V8HImode);
35715 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35716 GEN_INT (field)));
35717 emit_move_insn (target, gen_lowpart (V16QImode, x));
35718 return true;
35719 }
35720 }
35721
35722 if (elt % 4 == 0)
35723 {
35724 for (i = 0; i < 16; i += 4)
35725 if (perm[i] != elt
35726 || perm[i + 1] != elt + 1
35727 || perm[i + 2] != elt + 2
35728 || perm[i + 3] != elt + 3)
35729 break;
35730 if (i == 16)
35731 {
35732 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35733 x = gen_reg_rtx (V4SImode);
35734 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35735 GEN_INT (field)));
35736 emit_move_insn (target, gen_lowpart (V16QImode, x));
35737 return true;
35738 }
35739 }
35740 }
35741
35742 /* Look for merge and pack patterns. */
35743 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35744 {
35745 bool swapped;
35746
35747 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35748 continue;
35749
35750 elt = patterns[j].perm[0];
35751 if (perm[0] == elt)
35752 swapped = false;
35753 else if (perm[0] == elt + 16)
35754 swapped = true;
35755 else
35756 continue;
35757 for (i = 1; i < 16; ++i)
35758 {
35759 elt = patterns[j].perm[i];
35760 if (swapped)
35761 elt = (elt >= 16 ? elt - 16 : elt + 16);
35762 else if (one_vec && elt >= 16)
35763 elt -= 16;
35764 if (perm[i] != elt)
35765 break;
35766 }
35767 if (i == 16)
35768 {
35769 enum insn_code icode = patterns[j].impl;
35770 machine_mode omode = insn_data[icode].operand[0].mode;
35771 machine_mode imode = insn_data[icode].operand[1].mode;
35772
35773 /* For little-endian, don't use vpkuwum and vpkuhum if the
35774 underlying vector type is not V4SI and V8HI, respectively.
35775 For example, using vpkuwum with a V8HI picks up the even
35776 halfwords (BE numbering) when the even halfwords (LE
35777 numbering) are what we need. */
35778 if (!BYTES_BIG_ENDIAN
35779 && icode == CODE_FOR_altivec_vpkuwum_direct
35780 && ((REG_P (op0)
35781 && GET_MODE (op0) != V4SImode)
35782 || (SUBREG_P (op0)
35783 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35784 continue;
35785 if (!BYTES_BIG_ENDIAN
35786 && icode == CODE_FOR_altivec_vpkuhum_direct
35787 && ((REG_P (op0)
35788 && GET_MODE (op0) != V8HImode)
35789 || (SUBREG_P (op0)
35790 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35791 continue;
35792
35793 /* For little-endian, the two input operands must be swapped
35794 (or swapped back) to ensure proper right-to-left numbering
35795 from 0 to 2N-1. */
35796 if (swapped ^ !BYTES_BIG_ENDIAN)
35797 std::swap (op0, op1);
35798 if (imode != V16QImode)
35799 {
35800 op0 = gen_lowpart (imode, op0);
35801 op1 = gen_lowpart (imode, op1);
35802 }
35803 if (omode == V16QImode)
35804 x = target;
35805 else
35806 x = gen_reg_rtx (omode);
35807 emit_insn (GEN_FCN (icode) (x, op0, op1));
35808 if (omode != V16QImode)
35809 emit_move_insn (target, gen_lowpart (V16QImode, x));
35810 return true;
35811 }
35812 }
35813
35814 if (!BYTES_BIG_ENDIAN)
35815 {
35816 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35817 return true;
35818 }
35819
35820 return false;
35821 }
35822
35823 /* Expand a VSX Permute Doubleword constant permutation.
35824 Return true if we match an efficient implementation. */
35825
35826 static bool
35827 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35828 unsigned char perm0, unsigned char perm1)
35829 {
35830 rtx x;
35831
35832 /* If both selectors come from the same operand, fold to single op. */
35833 if ((perm0 & 2) == (perm1 & 2))
35834 {
35835 if (perm0 & 2)
35836 op0 = op1;
35837 else
35838 op1 = op0;
35839 }
35840 /* If both operands are equal, fold to simpler permutation. */
35841 if (rtx_equal_p (op0, op1))
35842 {
35843 perm0 = perm0 & 1;
35844 perm1 = (perm1 & 1) + 2;
35845 }
35846 /* If the first selector comes from the second operand, swap. */
35847 else if (perm0 & 2)
35848 {
35849 if (perm1 & 2)
35850 return false;
35851 perm0 -= 2;
35852 perm1 += 2;
35853 std::swap (op0, op1);
35854 }
35855 /* If the second selector does not come from the second operand, fail. */
35856 else if ((perm1 & 2) == 0)
35857 return false;
35858
35859 /* Success! */
35860 if (target != NULL)
35861 {
35862 machine_mode vmode, dmode;
35863 rtvec v;
35864
35865 vmode = GET_MODE (target);
35866 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35867 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35868 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35869 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35870 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35871 emit_insn (gen_rtx_SET (target, x));
35872 }
35873 return true;
35874 }
35875
35876 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35877
35878 static bool
35879 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35880 rtx op1, const vec_perm_indices &sel)
35881 {
35882 bool testing_p = !target;
35883
35884 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35885 if (TARGET_ALTIVEC && testing_p)
35886 return true;
35887
35888 /* Check for ps_merge* or xxpermdi insns. */
35889 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35890 {
35891 if (testing_p)
35892 {
35893 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35894 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35895 }
35896 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35897 return true;
35898 }
35899
35900 if (TARGET_ALTIVEC)
35901 {
35902 /* Force the target-independent code to lower to V16QImode. */
35903 if (vmode != V16QImode)
35904 return false;
35905 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35906 return true;
35907 }
35908
35909 return false;
35910 }
35911
35912 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35913 OP0 and OP1 are the input vectors and TARGET is the output vector.
35914 PERM specifies the constant permutation vector. */
35915
35916 static void
35917 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35918 machine_mode vmode, const vec_perm_builder &perm)
35919 {
35920 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35921 if (x != target)
35922 emit_move_insn (target, x);
35923 }
35924
35925 /* Expand an extract even operation. */
35926
35927 void
35928 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35929 {
35930 machine_mode vmode = GET_MODE (target);
35931 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35932 vec_perm_builder perm (nelt, nelt, 1);
35933
35934 for (i = 0; i < nelt; i++)
35935 perm.quick_push (i * 2);
35936
35937 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35938 }
35939
35940 /* Expand a vector interleave operation. */
35941
35942 void
35943 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35944 {
35945 machine_mode vmode = GET_MODE (target);
35946 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35947 vec_perm_builder perm (nelt, nelt, 1);
35948
35949 high = (highp ? 0 : nelt / 2);
35950 for (i = 0; i < nelt / 2; i++)
35951 {
35952 perm.quick_push (i + high);
35953 perm.quick_push (i + nelt + high);
35954 }
35955
35956 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35957 }
35958
35959 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35960 void
35961 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35962 {
35963 HOST_WIDE_INT hwi_scale (scale);
35964 REAL_VALUE_TYPE r_pow;
35965 rtvec v = rtvec_alloc (2);
35966 rtx elt;
35967 rtx scale_vec = gen_reg_rtx (V2DFmode);
35968 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35969 elt = const_double_from_real_value (r_pow, DFmode);
35970 RTVEC_ELT (v, 0) = elt;
35971 RTVEC_ELT (v, 1) = elt;
35972 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35973 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35974 }
35975
35976 /* Return an RTX representing where to find the function value of a
35977 function returning MODE. */
35978 static rtx
35979 rs6000_complex_function_value (machine_mode mode)
35980 {
35981 unsigned int regno;
35982 rtx r1, r2;
35983 machine_mode inner = GET_MODE_INNER (mode);
35984 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35985
35986 if (TARGET_FLOAT128_TYPE
35987 && (mode == KCmode
35988 || (mode == TCmode && TARGET_IEEEQUAD)))
35989 regno = ALTIVEC_ARG_RETURN;
35990
35991 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35992 regno = FP_ARG_RETURN;
35993
35994 else
35995 {
35996 regno = GP_ARG_RETURN;
35997
35998 /* 32-bit is OK since it'll go in r3/r4. */
35999 if (TARGET_32BIT && inner_bytes >= 4)
36000 return gen_rtx_REG (mode, regno);
36001 }
36002
36003 if (inner_bytes >= 8)
36004 return gen_rtx_REG (mode, regno);
36005
36006 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
36007 const0_rtx);
36008 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
36009 GEN_INT (inner_bytes));
36010 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
36011 }
36012
36013 /* Return an rtx describing a return value of MODE as a PARALLEL
36014 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
36015 stride REG_STRIDE. */
36016
36017 static rtx
36018 rs6000_parallel_return (machine_mode mode,
36019 int n_elts, machine_mode elt_mode,
36020 unsigned int regno, unsigned int reg_stride)
36021 {
36022 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
36023
36024 int i;
36025 for (i = 0; i < n_elts; i++)
36026 {
36027 rtx r = gen_rtx_REG (elt_mode, regno);
36028 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
36029 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
36030 regno += reg_stride;
36031 }
36032
36033 return par;
36034 }
36035
36036 /* Target hook for TARGET_FUNCTION_VALUE.
36037
36038 An integer value is in r3 and a floating-point value is in fp1,
36039 unless -msoft-float. */
36040
36041 static rtx
36042 rs6000_function_value (const_tree valtype,
36043 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
36044 bool outgoing ATTRIBUTE_UNUSED)
36045 {
36046 machine_mode mode;
36047 unsigned int regno;
36048 machine_mode elt_mode;
36049 int n_elts;
36050
36051 /* Special handling for structs in darwin64. */
36052 if (TARGET_MACHO
36053 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
36054 {
36055 CUMULATIVE_ARGS valcum;
36056 rtx valret;
36057
36058 valcum.words = 0;
36059 valcum.fregno = FP_ARG_MIN_REG;
36060 valcum.vregno = ALTIVEC_ARG_MIN_REG;
36061 /* Do a trial code generation as if this were going to be passed as
36062 an argument; if any part goes in memory, we return NULL. */
36063 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
36064 if (valret)
36065 return valret;
36066 /* Otherwise fall through to standard ABI rules. */
36067 }
36068
36069 mode = TYPE_MODE (valtype);
36070
36071 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36072 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
36073 {
36074 int first_reg, n_regs;
36075
36076 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
36077 {
36078 /* _Decimal128 must use even/odd register pairs. */
36079 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36080 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
36081 }
36082 else
36083 {
36084 first_reg = ALTIVEC_ARG_RETURN;
36085 n_regs = 1;
36086 }
36087
36088 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
36089 }
36090
36091 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36092 if (TARGET_32BIT && TARGET_POWERPC64)
36093 switch (mode)
36094 {
36095 default:
36096 break;
36097 case E_DImode:
36098 case E_SCmode:
36099 case E_DCmode:
36100 case E_TCmode:
36101 int count = GET_MODE_SIZE (mode) / 4;
36102 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
36103 }
36104
36105 if ((INTEGRAL_TYPE_P (valtype)
36106 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
36107 || POINTER_TYPE_P (valtype))
36108 mode = TARGET_32BIT ? SImode : DImode;
36109
36110 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36111 /* _Decimal128 must use an even/odd register pair. */
36112 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36113 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
36114 && !FLOAT128_VECTOR_P (mode))
36115 regno = FP_ARG_RETURN;
36116 else if (TREE_CODE (valtype) == COMPLEX_TYPE
36117 && targetm.calls.split_complex_arg)
36118 return rs6000_complex_function_value (mode);
36119 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36120 return register is used in both cases, and we won't see V2DImode/V2DFmode
36121 for pure altivec, combine the two cases. */
36122 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
36123 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
36124 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
36125 regno = ALTIVEC_ARG_RETURN;
36126 else
36127 regno = GP_ARG_RETURN;
36128
36129 return gen_rtx_REG (mode, regno);
36130 }
36131
36132 /* Define how to find the value returned by a library function
36133 assuming the value has mode MODE. */
36134 rtx
36135 rs6000_libcall_value (machine_mode mode)
36136 {
36137 unsigned int regno;
36138
36139 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36140 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36141 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36142
36143 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36144 /* _Decimal128 must use an even/odd register pair. */
36145 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36146 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
36147 regno = FP_ARG_RETURN;
36148 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36149 return register is used in both cases, and we won't see V2DImode/V2DFmode
36150 for pure altivec, combine the two cases. */
36151 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36152 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36153 regno = ALTIVEC_ARG_RETURN;
36154 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36155 return rs6000_complex_function_value (mode);
36156 else
36157 regno = GP_ARG_RETURN;
36158
36159 return gen_rtx_REG (mode, regno);
36160 }
36161
36162 /* Compute register pressure classes. We implement the target hook to avoid
36163 IRA picking something like GEN_OR_FLOAT_REGS as a pressure class, which can
36164 lead to incorrect estimates of number of available registers and therefor
36165 increased register pressure/spill. */
36166 static int
36167 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36168 {
36169 int n;
36170
36171 n = 0;
36172 pressure_classes[n++] = GENERAL_REGS;
36173 if (TARGET_VSX)
36174 pressure_classes[n++] = VSX_REGS;
36175 else
36176 {
36177 if (TARGET_ALTIVEC)
36178 pressure_classes[n++] = ALTIVEC_REGS;
36179 if (TARGET_HARD_FLOAT)
36180 pressure_classes[n++] = FLOAT_REGS;
36181 }
36182 pressure_classes[n++] = CR_REGS;
36183 pressure_classes[n++] = SPECIAL_REGS;
36184
36185 return n;
36186 }
36187
36188 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36189 Frame pointer elimination is automatically handled.
36190
36191 For the RS/6000, if frame pointer elimination is being done, we would like
36192 to convert ap into fp, not sp.
36193
36194 We need r30 if -mminimal-toc was specified, and there are constant pool
36195 references. */
36196
36197 static bool
36198 rs6000_can_eliminate (const int from, const int to)
36199 {
36200 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36201 ? ! frame_pointer_needed
36202 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36203 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36204 || constant_pool_empty_p ()
36205 : true);
36206 }
36207
36208 /* Define the offset between two registers, FROM to be eliminated and its
36209 replacement TO, at the start of a routine. */
36210 HOST_WIDE_INT
36211 rs6000_initial_elimination_offset (int from, int to)
36212 {
36213 rs6000_stack_t *info = rs6000_stack_info ();
36214 HOST_WIDE_INT offset;
36215
36216 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36217 offset = info->push_p ? 0 : -info->total_size;
36218 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36219 {
36220 offset = info->push_p ? 0 : -info->total_size;
36221 if (FRAME_GROWS_DOWNWARD)
36222 offset += info->fixed_size + info->vars_size + info->parm_size;
36223 }
36224 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36225 offset = FRAME_GROWS_DOWNWARD
36226 ? info->fixed_size + info->vars_size + info->parm_size
36227 : 0;
36228 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36229 offset = info->total_size;
36230 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36231 offset = info->push_p ? info->total_size : 0;
36232 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36233 offset = 0;
36234 else
36235 gcc_unreachable ();
36236
36237 return offset;
36238 }
36239
36240 /* Fill in sizes of registers used by unwinder. */
36241
36242 static void
36243 rs6000_init_dwarf_reg_sizes_extra (tree address)
36244 {
36245 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36246 {
36247 int i;
36248 machine_mode mode = TYPE_MODE (char_type_node);
36249 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36250 rtx mem = gen_rtx_MEM (BLKmode, addr);
36251 rtx value = gen_int_mode (16, mode);
36252
36253 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36254 The unwinder still needs to know the size of Altivec registers. */
36255
36256 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36257 {
36258 int column = DWARF_REG_TO_UNWIND_COLUMN
36259 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36260 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36261
36262 emit_move_insn (adjust_address (mem, mode, offset), value);
36263 }
36264 }
36265 }
36266
36267 /* Map internal gcc register numbers to debug format register numbers.
36268 FORMAT specifies the type of debug register number to use:
36269 0 -- debug information, except for frame-related sections
36270 1 -- DWARF .debug_frame section
36271 2 -- DWARF .eh_frame section */
36272
36273 unsigned int
36274 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36275 {
36276 /* On some platforms, we use the standard DWARF register
36277 numbering for .debug_info and .debug_frame. */
36278 if ((format == 0 && write_symbols == DWARF2_DEBUG) || format == 1)
36279 {
36280 #ifdef RS6000_USE_DWARF_NUMBERING
36281 if (regno <= 31)
36282 return regno;
36283 if (FP_REGNO_P (regno))
36284 return regno - FIRST_FPR_REGNO + 32;
36285 if (ALTIVEC_REGNO_P (regno))
36286 return regno - FIRST_ALTIVEC_REGNO + 1124;
36287 if (regno == LR_REGNO)
36288 return 108;
36289 if (regno == CTR_REGNO)
36290 return 109;
36291 if (regno == CA_REGNO)
36292 return 101; /* XER */
36293 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36294 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36295 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36296 to the DWARF reg for CR. */
36297 if (format == 1 && regno == CR2_REGNO)
36298 return 64;
36299 if (CR_REGNO_P (regno))
36300 return regno - CR0_REGNO + 86;
36301 if (regno == VRSAVE_REGNO)
36302 return 356;
36303 if (regno == VSCR_REGNO)
36304 return 67;
36305 if (regno == TFHAR_REGNO)
36306 return 228;
36307 if (regno == TFIAR_REGNO)
36308 return 229;
36309 if (regno == TEXASR_REGNO)
36310 return 230;
36311
36312 /* These do not make much sense. */
36313 if (regno == FRAME_POINTER_REGNUM)
36314 return 111;
36315 if (regno == ARG_POINTER_REGNUM)
36316 return 67;
36317 if (regno == 64)
36318 return 100;
36319
36320 gcc_unreachable ();
36321 #endif
36322 }
36323
36324 /* We use the GCC 7 (and before) internal number for non-DWARF debug
36325 information, and also for .eh_frame. */
36326 /* Translate the regnos to their numbers in GCC 7 (and before). */
36327 if (regno <= 31)
36328 return regno;
36329 if (FP_REGNO_P (regno))
36330 return regno - FIRST_FPR_REGNO + 32;
36331 if (ALTIVEC_REGNO_P (regno))
36332 return regno - FIRST_ALTIVEC_REGNO + 77;
36333 if (regno == LR_REGNO)
36334 return 65;
36335 if (regno == CTR_REGNO)
36336 return 66;
36337 if (regno == CA_REGNO)
36338 return 76; /* XER */
36339 if (CR_REGNO_P (regno))
36340 return regno - CR0_REGNO + 68;
36341 if (regno == VRSAVE_REGNO)
36342 return 109;
36343 if (regno == VSCR_REGNO)
36344 return 110;
36345 if (regno == TFHAR_REGNO)
36346 return 114;
36347 if (regno == TFIAR_REGNO)
36348 return 115;
36349 if (regno == TEXASR_REGNO)
36350 return 116;
36351
36352 if (regno == FRAME_POINTER_REGNUM)
36353 return 111;
36354 if (regno == ARG_POINTER_REGNUM)
36355 return 67;
36356 if (regno == 64)
36357 return 64;
36358
36359 gcc_unreachable ();
36360 }
36361
36362 /* target hook eh_return_filter_mode */
36363 static scalar_int_mode
36364 rs6000_eh_return_filter_mode (void)
36365 {
36366 return TARGET_32BIT ? SImode : word_mode;
36367 }
36368
36369 /* Target hook for translate_mode_attribute. */
36370 static machine_mode
36371 rs6000_translate_mode_attribute (machine_mode mode)
36372 {
36373 if ((FLOAT128_IEEE_P (mode)
36374 && ieee128_float_type_node == long_double_type_node)
36375 || (FLOAT128_IBM_P (mode)
36376 && ibm128_float_type_node == long_double_type_node))
36377 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36378 return mode;
36379 }
36380
36381 /* Target hook for scalar_mode_supported_p. */
36382 static bool
36383 rs6000_scalar_mode_supported_p (scalar_mode mode)
36384 {
36385 /* -m32 does not support TImode. This is the default, from
36386 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36387 same ABI as for -m32. But default_scalar_mode_supported_p allows
36388 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36389 for -mpowerpc64. */
36390 if (TARGET_32BIT && mode == TImode)
36391 return false;
36392
36393 if (DECIMAL_FLOAT_MODE_P (mode))
36394 return default_decimal_float_supported_p ();
36395 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36396 return true;
36397 else
36398 return default_scalar_mode_supported_p (mode);
36399 }
36400
36401 /* Target hook for vector_mode_supported_p. */
36402 static bool
36403 rs6000_vector_mode_supported_p (machine_mode mode)
36404 {
36405 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36406 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36407 double-double. */
36408 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36409 return true;
36410
36411 else
36412 return false;
36413 }
36414
36415 /* Target hook for floatn_mode. */
36416 static opt_scalar_float_mode
36417 rs6000_floatn_mode (int n, bool extended)
36418 {
36419 if (extended)
36420 {
36421 switch (n)
36422 {
36423 case 32:
36424 return DFmode;
36425
36426 case 64:
36427 if (TARGET_FLOAT128_TYPE)
36428 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36429 else
36430 return opt_scalar_float_mode ();
36431
36432 case 128:
36433 return opt_scalar_float_mode ();
36434
36435 default:
36436 /* Those are the only valid _FloatNx types. */
36437 gcc_unreachable ();
36438 }
36439 }
36440 else
36441 {
36442 switch (n)
36443 {
36444 case 32:
36445 return SFmode;
36446
36447 case 64:
36448 return DFmode;
36449
36450 case 128:
36451 if (TARGET_FLOAT128_TYPE)
36452 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36453 else
36454 return opt_scalar_float_mode ();
36455
36456 default:
36457 return opt_scalar_float_mode ();
36458 }
36459 }
36460
36461 }
36462
36463 /* Target hook for c_mode_for_suffix. */
36464 static machine_mode
36465 rs6000_c_mode_for_suffix (char suffix)
36466 {
36467 if (TARGET_FLOAT128_TYPE)
36468 {
36469 if (suffix == 'q' || suffix == 'Q')
36470 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36471
36472 /* At the moment, we are not defining a suffix for IBM extended double.
36473 If/when the default for -mabi=ieeelongdouble is changed, and we want
36474 to support __ibm128 constants in legacy library code, we may need to
36475 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36476 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36477 __float80 constants. */
36478 }
36479
36480 return VOIDmode;
36481 }
36482
36483 /* Target hook for invalid_arg_for_unprototyped_fn. */
36484 static const char *
36485 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36486 {
36487 return (!rs6000_darwin64_abi
36488 && typelist == 0
36489 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36490 && (funcdecl == NULL_TREE
36491 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36492 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36493 ? N_("AltiVec argument passed to unprototyped function")
36494 : NULL;
36495 }
36496
36497 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36498 setup by using __stack_chk_fail_local hidden function instead of
36499 calling __stack_chk_fail directly. Otherwise it is better to call
36500 __stack_chk_fail directly. */
36501
36502 static tree ATTRIBUTE_UNUSED
36503 rs6000_stack_protect_fail (void)
36504 {
36505 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36506 ? default_hidden_stack_protect_fail ()
36507 : default_external_stack_protect_fail ();
36508 }
36509
36510 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36511
36512 #if TARGET_ELF
36513 static unsigned HOST_WIDE_INT
36514 rs6000_asan_shadow_offset (void)
36515 {
36516 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36517 }
36518 #endif
36519 \f
36520 /* Mask options that we want to support inside of attribute((target)) and
36521 #pragma GCC target operations. Note, we do not include things like
36522 64/32-bit, endianness, hard/soft floating point, etc. that would have
36523 different calling sequences. */
36524
36525 struct rs6000_opt_mask {
36526 const char *name; /* option name */
36527 HOST_WIDE_INT mask; /* mask to set */
36528 bool invert; /* invert sense of mask */
36529 bool valid_target; /* option is a target option */
36530 };
36531
36532 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36533 {
36534 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36535 { "cmpb", OPTION_MASK_CMPB, false, true },
36536 { "crypto", OPTION_MASK_CRYPTO, false, true },
36537 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36538 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36539 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36540 false, true },
36541 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36542 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36543 { "fprnd", OPTION_MASK_FPRND, false, true },
36544 { "hard-dfp", OPTION_MASK_DFP, false, true },
36545 { "htm", OPTION_MASK_HTM, false, true },
36546 { "isel", OPTION_MASK_ISEL, false, true },
36547 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36548 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36549 { "modulo", OPTION_MASK_MODULO, false, true },
36550 { "mulhw", OPTION_MASK_MULHW, false, true },
36551 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36552 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36553 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36554 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36555 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36556 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36557 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36558 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36559 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36560 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36561 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36562 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36563 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36564 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36565 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36566 { "string", 0, false, true },
36567 { "update", OPTION_MASK_NO_UPDATE, true , true },
36568 { "vsx", OPTION_MASK_VSX, false, true },
36569 #ifdef OPTION_MASK_64BIT
36570 #if TARGET_AIX_OS
36571 { "aix64", OPTION_MASK_64BIT, false, false },
36572 { "aix32", OPTION_MASK_64BIT, true, false },
36573 #else
36574 { "64", OPTION_MASK_64BIT, false, false },
36575 { "32", OPTION_MASK_64BIT, true, false },
36576 #endif
36577 #endif
36578 #ifdef OPTION_MASK_EABI
36579 { "eabi", OPTION_MASK_EABI, false, false },
36580 #endif
36581 #ifdef OPTION_MASK_LITTLE_ENDIAN
36582 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36583 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36584 #endif
36585 #ifdef OPTION_MASK_RELOCATABLE
36586 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36587 #endif
36588 #ifdef OPTION_MASK_STRICT_ALIGN
36589 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36590 #endif
36591 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36592 { "string", 0, false, false },
36593 };
36594
36595 /* Builtin mask mapping for printing the flags. */
36596 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36597 {
36598 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36599 { "vsx", RS6000_BTM_VSX, false, false },
36600 { "fre", RS6000_BTM_FRE, false, false },
36601 { "fres", RS6000_BTM_FRES, false, false },
36602 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36603 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36604 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36605 { "cell", RS6000_BTM_CELL, false, false },
36606 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36607 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36608 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36609 { "crypto", RS6000_BTM_CRYPTO, false, false },
36610 { "htm", RS6000_BTM_HTM, false, false },
36611 { "hard-dfp", RS6000_BTM_DFP, false, false },
36612 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36613 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36614 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36615 { "float128", RS6000_BTM_FLOAT128, false, false },
36616 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36617 };
36618
36619 /* Option variables that we want to support inside attribute((target)) and
36620 #pragma GCC target operations. */
36621
36622 struct rs6000_opt_var {
36623 const char *name; /* option name */
36624 size_t global_offset; /* offset of the option in global_options. */
36625 size_t target_offset; /* offset of the option in target options. */
36626 };
36627
36628 static struct rs6000_opt_var const rs6000_opt_vars[] =
36629 {
36630 { "friz",
36631 offsetof (struct gcc_options, x_TARGET_FRIZ),
36632 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36633 { "avoid-indexed-addresses",
36634 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36635 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36636 { "longcall",
36637 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36638 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36639 { "optimize-swaps",
36640 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36641 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36642 { "allow-movmisalign",
36643 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36644 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36645 { "sched-groups",
36646 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36647 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36648 { "always-hint",
36649 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36650 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36651 { "align-branch-targets",
36652 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36653 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36654 { "tls-markers",
36655 offsetof (struct gcc_options, x_tls_markers),
36656 offsetof (struct cl_target_option, x_tls_markers), },
36657 { "sched-prolog",
36658 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36659 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36660 { "sched-epilog",
36661 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36662 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36663 { "speculate-indirect-jumps",
36664 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36665 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36666 };
36667
36668 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36669 parsing. Return true if there were no errors. */
36670
36671 static bool
36672 rs6000_inner_target_options (tree args, bool attr_p)
36673 {
36674 bool ret = true;
36675
36676 if (args == NULL_TREE)
36677 ;
36678
36679 else if (TREE_CODE (args) == STRING_CST)
36680 {
36681 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36682 char *q;
36683
36684 while ((q = strtok (p, ",")) != NULL)
36685 {
36686 bool error_p = false;
36687 bool not_valid_p = false;
36688 const char *cpu_opt = NULL;
36689
36690 p = NULL;
36691 if (strncmp (q, "cpu=", 4) == 0)
36692 {
36693 int cpu_index = rs6000_cpu_name_lookup (q+4);
36694 if (cpu_index >= 0)
36695 rs6000_cpu_index = cpu_index;
36696 else
36697 {
36698 error_p = true;
36699 cpu_opt = q+4;
36700 }
36701 }
36702 else if (strncmp (q, "tune=", 5) == 0)
36703 {
36704 int tune_index = rs6000_cpu_name_lookup (q+5);
36705 if (tune_index >= 0)
36706 rs6000_tune_index = tune_index;
36707 else
36708 {
36709 error_p = true;
36710 cpu_opt = q+5;
36711 }
36712 }
36713 else
36714 {
36715 size_t i;
36716 bool invert = false;
36717 char *r = q;
36718
36719 error_p = true;
36720 if (strncmp (r, "no-", 3) == 0)
36721 {
36722 invert = true;
36723 r += 3;
36724 }
36725
36726 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36727 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36728 {
36729 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36730
36731 if (!rs6000_opt_masks[i].valid_target)
36732 not_valid_p = true;
36733 else
36734 {
36735 error_p = false;
36736 rs6000_isa_flags_explicit |= mask;
36737
36738 /* VSX needs altivec, so -mvsx automagically sets
36739 altivec and disables -mavoid-indexed-addresses. */
36740 if (!invert)
36741 {
36742 if (mask == OPTION_MASK_VSX)
36743 {
36744 mask |= OPTION_MASK_ALTIVEC;
36745 TARGET_AVOID_XFORM = 0;
36746 }
36747 }
36748
36749 if (rs6000_opt_masks[i].invert)
36750 invert = !invert;
36751
36752 if (invert)
36753 rs6000_isa_flags &= ~mask;
36754 else
36755 rs6000_isa_flags |= mask;
36756 }
36757 break;
36758 }
36759
36760 if (error_p && !not_valid_p)
36761 {
36762 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36763 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36764 {
36765 size_t j = rs6000_opt_vars[i].global_offset;
36766 *((int *) ((char *)&global_options + j)) = !invert;
36767 error_p = false;
36768 not_valid_p = false;
36769 break;
36770 }
36771 }
36772 }
36773
36774 if (error_p)
36775 {
36776 const char *eprefix, *esuffix;
36777
36778 ret = false;
36779 if (attr_p)
36780 {
36781 eprefix = "__attribute__((__target__(";
36782 esuffix = ")))";
36783 }
36784 else
36785 {
36786 eprefix = "#pragma GCC target ";
36787 esuffix = "";
36788 }
36789
36790 if (cpu_opt)
36791 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36792 q, esuffix);
36793 else if (not_valid_p)
36794 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36795 else
36796 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36797 }
36798 }
36799 }
36800
36801 else if (TREE_CODE (args) == TREE_LIST)
36802 {
36803 do
36804 {
36805 tree value = TREE_VALUE (args);
36806 if (value)
36807 {
36808 bool ret2 = rs6000_inner_target_options (value, attr_p);
36809 if (!ret2)
36810 ret = false;
36811 }
36812 args = TREE_CHAIN (args);
36813 }
36814 while (args != NULL_TREE);
36815 }
36816
36817 else
36818 {
36819 error ("attribute %<target%> argument not a string");
36820 return false;
36821 }
36822
36823 return ret;
36824 }
36825
36826 /* Print out the target options as a list for -mdebug=target. */
36827
36828 static void
36829 rs6000_debug_target_options (tree args, const char *prefix)
36830 {
36831 if (args == NULL_TREE)
36832 fprintf (stderr, "%s<NULL>", prefix);
36833
36834 else if (TREE_CODE (args) == STRING_CST)
36835 {
36836 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36837 char *q;
36838
36839 while ((q = strtok (p, ",")) != NULL)
36840 {
36841 p = NULL;
36842 fprintf (stderr, "%s\"%s\"", prefix, q);
36843 prefix = ", ";
36844 }
36845 }
36846
36847 else if (TREE_CODE (args) == TREE_LIST)
36848 {
36849 do
36850 {
36851 tree value = TREE_VALUE (args);
36852 if (value)
36853 {
36854 rs6000_debug_target_options (value, prefix);
36855 prefix = ", ";
36856 }
36857 args = TREE_CHAIN (args);
36858 }
36859 while (args != NULL_TREE);
36860 }
36861
36862 else
36863 gcc_unreachable ();
36864
36865 return;
36866 }
36867
36868 \f
36869 /* Hook to validate attribute((target("..."))). */
36870
36871 static bool
36872 rs6000_valid_attribute_p (tree fndecl,
36873 tree ARG_UNUSED (name),
36874 tree args,
36875 int flags)
36876 {
36877 struct cl_target_option cur_target;
36878 bool ret;
36879 tree old_optimize;
36880 tree new_target, new_optimize;
36881 tree func_optimize;
36882
36883 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36884
36885 if (TARGET_DEBUG_TARGET)
36886 {
36887 tree tname = DECL_NAME (fndecl);
36888 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36889 if (tname)
36890 fprintf (stderr, "function: %.*s\n",
36891 (int) IDENTIFIER_LENGTH (tname),
36892 IDENTIFIER_POINTER (tname));
36893 else
36894 fprintf (stderr, "function: unknown\n");
36895
36896 fprintf (stderr, "args:");
36897 rs6000_debug_target_options (args, " ");
36898 fprintf (stderr, "\n");
36899
36900 if (flags)
36901 fprintf (stderr, "flags: 0x%x\n", flags);
36902
36903 fprintf (stderr, "--------------------\n");
36904 }
36905
36906 /* attribute((target("default"))) does nothing, beyond
36907 affecting multi-versioning. */
36908 if (TREE_VALUE (args)
36909 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36910 && TREE_CHAIN (args) == NULL_TREE
36911 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36912 return true;
36913
36914 old_optimize = build_optimization_node (&global_options);
36915 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36916
36917 /* If the function changed the optimization levels as well as setting target
36918 options, start with the optimizations specified. */
36919 if (func_optimize && func_optimize != old_optimize)
36920 cl_optimization_restore (&global_options,
36921 TREE_OPTIMIZATION (func_optimize));
36922
36923 /* The target attributes may also change some optimization flags, so update
36924 the optimization options if necessary. */
36925 cl_target_option_save (&cur_target, &global_options);
36926 rs6000_cpu_index = rs6000_tune_index = -1;
36927 ret = rs6000_inner_target_options (args, true);
36928
36929 /* Set up any additional state. */
36930 if (ret)
36931 {
36932 ret = rs6000_option_override_internal (false);
36933 new_target = build_target_option_node (&global_options);
36934 }
36935 else
36936 new_target = NULL;
36937
36938 new_optimize = build_optimization_node (&global_options);
36939
36940 if (!new_target)
36941 ret = false;
36942
36943 else if (fndecl)
36944 {
36945 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36946
36947 if (old_optimize != new_optimize)
36948 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36949 }
36950
36951 cl_target_option_restore (&global_options, &cur_target);
36952
36953 if (old_optimize != new_optimize)
36954 cl_optimization_restore (&global_options,
36955 TREE_OPTIMIZATION (old_optimize));
36956
36957 return ret;
36958 }
36959
36960 \f
36961 /* Hook to validate the current #pragma GCC target and set the state, and
36962 update the macros based on what was changed. If ARGS is NULL, then
36963 POP_TARGET is used to reset the options. */
36964
36965 bool
36966 rs6000_pragma_target_parse (tree args, tree pop_target)
36967 {
36968 tree prev_tree = build_target_option_node (&global_options);
36969 tree cur_tree;
36970 struct cl_target_option *prev_opt, *cur_opt;
36971 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36972 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36973
36974 if (TARGET_DEBUG_TARGET)
36975 {
36976 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36977 fprintf (stderr, "args:");
36978 rs6000_debug_target_options (args, " ");
36979 fprintf (stderr, "\n");
36980
36981 if (pop_target)
36982 {
36983 fprintf (stderr, "pop_target:\n");
36984 debug_tree (pop_target);
36985 }
36986 else
36987 fprintf (stderr, "pop_target: <NULL>\n");
36988
36989 fprintf (stderr, "--------------------\n");
36990 }
36991
36992 if (! args)
36993 {
36994 cur_tree = ((pop_target)
36995 ? pop_target
36996 : target_option_default_node);
36997 cl_target_option_restore (&global_options,
36998 TREE_TARGET_OPTION (cur_tree));
36999 }
37000 else
37001 {
37002 rs6000_cpu_index = rs6000_tune_index = -1;
37003 if (!rs6000_inner_target_options (args, false)
37004 || !rs6000_option_override_internal (false)
37005 || (cur_tree = build_target_option_node (&global_options))
37006 == NULL_TREE)
37007 {
37008 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
37009 fprintf (stderr, "invalid pragma\n");
37010
37011 return false;
37012 }
37013 }
37014
37015 target_option_current_node = cur_tree;
37016 rs6000_activate_target_options (target_option_current_node);
37017
37018 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
37019 change the macros that are defined. */
37020 if (rs6000_target_modify_macros_ptr)
37021 {
37022 prev_opt = TREE_TARGET_OPTION (prev_tree);
37023 prev_bumask = prev_opt->x_rs6000_builtin_mask;
37024 prev_flags = prev_opt->x_rs6000_isa_flags;
37025
37026 cur_opt = TREE_TARGET_OPTION (cur_tree);
37027 cur_flags = cur_opt->x_rs6000_isa_flags;
37028 cur_bumask = cur_opt->x_rs6000_builtin_mask;
37029
37030 diff_bumask = (prev_bumask ^ cur_bumask);
37031 diff_flags = (prev_flags ^ cur_flags);
37032
37033 if ((diff_flags != 0) || (diff_bumask != 0))
37034 {
37035 /* Delete old macros. */
37036 rs6000_target_modify_macros_ptr (false,
37037 prev_flags & diff_flags,
37038 prev_bumask & diff_bumask);
37039
37040 /* Define new macros. */
37041 rs6000_target_modify_macros_ptr (true,
37042 cur_flags & diff_flags,
37043 cur_bumask & diff_bumask);
37044 }
37045 }
37046
37047 return true;
37048 }
37049
37050 \f
37051 /* Remember the last target of rs6000_set_current_function. */
37052 static GTY(()) tree rs6000_previous_fndecl;
37053
37054 /* Restore target's globals from NEW_TREE and invalidate the
37055 rs6000_previous_fndecl cache. */
37056
37057 void
37058 rs6000_activate_target_options (tree new_tree)
37059 {
37060 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
37061 if (TREE_TARGET_GLOBALS (new_tree))
37062 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
37063 else if (new_tree == target_option_default_node)
37064 restore_target_globals (&default_target_globals);
37065 else
37066 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
37067 rs6000_previous_fndecl = NULL_TREE;
37068 }
37069
37070 /* Establish appropriate back-end context for processing the function
37071 FNDECL. The argument might be NULL to indicate processing at top
37072 level, outside of any function scope. */
37073 static void
37074 rs6000_set_current_function (tree fndecl)
37075 {
37076 if (TARGET_DEBUG_TARGET)
37077 {
37078 fprintf (stderr, "\n==================== rs6000_set_current_function");
37079
37080 if (fndecl)
37081 fprintf (stderr, ", fndecl %s (%p)",
37082 (DECL_NAME (fndecl)
37083 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
37084 : "<unknown>"), (void *)fndecl);
37085
37086 if (rs6000_previous_fndecl)
37087 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
37088
37089 fprintf (stderr, "\n");
37090 }
37091
37092 /* Only change the context if the function changes. This hook is called
37093 several times in the course of compiling a function, and we don't want to
37094 slow things down too much or call target_reinit when it isn't safe. */
37095 if (fndecl == rs6000_previous_fndecl)
37096 return;
37097
37098 tree old_tree;
37099 if (rs6000_previous_fndecl == NULL_TREE)
37100 old_tree = target_option_current_node;
37101 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
37102 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
37103 else
37104 old_tree = target_option_default_node;
37105
37106 tree new_tree;
37107 if (fndecl == NULL_TREE)
37108 {
37109 if (old_tree != target_option_current_node)
37110 new_tree = target_option_current_node;
37111 else
37112 new_tree = NULL_TREE;
37113 }
37114 else
37115 {
37116 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37117 if (new_tree == NULL_TREE)
37118 new_tree = target_option_default_node;
37119 }
37120
37121 if (TARGET_DEBUG_TARGET)
37122 {
37123 if (new_tree)
37124 {
37125 fprintf (stderr, "\nnew fndecl target specific options:\n");
37126 debug_tree (new_tree);
37127 }
37128
37129 if (old_tree)
37130 {
37131 fprintf (stderr, "\nold fndecl target specific options:\n");
37132 debug_tree (old_tree);
37133 }
37134
37135 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
37136 fprintf (stderr, "--------------------\n");
37137 }
37138
37139 if (new_tree && old_tree != new_tree)
37140 rs6000_activate_target_options (new_tree);
37141
37142 if (fndecl)
37143 rs6000_previous_fndecl = fndecl;
37144 }
37145
37146 \f
37147 /* Save the current options */
37148
37149 static void
37150 rs6000_function_specific_save (struct cl_target_option *ptr,
37151 struct gcc_options *opts)
37152 {
37153 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
37154 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
37155 }
37156
37157 /* Restore the current options */
37158
37159 static void
37160 rs6000_function_specific_restore (struct gcc_options *opts,
37161 struct cl_target_option *ptr)
37162
37163 {
37164 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
37165 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
37166 (void) rs6000_option_override_internal (false);
37167 }
37168
37169 /* Print the current options */
37170
37171 static void
37172 rs6000_function_specific_print (FILE *file, int indent,
37173 struct cl_target_option *ptr)
37174 {
37175 rs6000_print_isa_options (file, indent, "Isa options set",
37176 ptr->x_rs6000_isa_flags);
37177
37178 rs6000_print_isa_options (file, indent, "Isa options explicit",
37179 ptr->x_rs6000_isa_flags_explicit);
37180 }
37181
37182 /* Helper function to print the current isa or misc options on a line. */
37183
37184 static void
37185 rs6000_print_options_internal (FILE *file,
37186 int indent,
37187 const char *string,
37188 HOST_WIDE_INT flags,
37189 const char *prefix,
37190 const struct rs6000_opt_mask *opts,
37191 size_t num_elements)
37192 {
37193 size_t i;
37194 size_t start_column = 0;
37195 size_t cur_column;
37196 size_t max_column = 120;
37197 size_t prefix_len = strlen (prefix);
37198 size_t comma_len = 0;
37199 const char *comma = "";
37200
37201 if (indent)
37202 start_column += fprintf (file, "%*s", indent, "");
37203
37204 if (!flags)
37205 {
37206 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37207 return;
37208 }
37209
37210 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37211
37212 /* Print the various mask options. */
37213 cur_column = start_column;
37214 for (i = 0; i < num_elements; i++)
37215 {
37216 bool invert = opts[i].invert;
37217 const char *name = opts[i].name;
37218 const char *no_str = "";
37219 HOST_WIDE_INT mask = opts[i].mask;
37220 size_t len = comma_len + prefix_len + strlen (name);
37221
37222 if (!invert)
37223 {
37224 if ((flags & mask) == 0)
37225 {
37226 no_str = "no-";
37227 len += sizeof ("no-") - 1;
37228 }
37229
37230 flags &= ~mask;
37231 }
37232
37233 else
37234 {
37235 if ((flags & mask) != 0)
37236 {
37237 no_str = "no-";
37238 len += sizeof ("no-") - 1;
37239 }
37240
37241 flags |= mask;
37242 }
37243
37244 cur_column += len;
37245 if (cur_column > max_column)
37246 {
37247 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37248 cur_column = start_column + len;
37249 comma = "";
37250 }
37251
37252 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37253 comma = ", ";
37254 comma_len = sizeof (", ") - 1;
37255 }
37256
37257 fputs ("\n", file);
37258 }
37259
37260 /* Helper function to print the current isa options on a line. */
37261
37262 static void
37263 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37264 HOST_WIDE_INT flags)
37265 {
37266 rs6000_print_options_internal (file, indent, string, flags, "-m",
37267 &rs6000_opt_masks[0],
37268 ARRAY_SIZE (rs6000_opt_masks));
37269 }
37270
37271 static void
37272 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37273 HOST_WIDE_INT flags)
37274 {
37275 rs6000_print_options_internal (file, indent, string, flags, "",
37276 &rs6000_builtin_mask_names[0],
37277 ARRAY_SIZE (rs6000_builtin_mask_names));
37278 }
37279
37280 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37281 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37282 -mupper-regs-df, etc.).
37283
37284 If the user used -mno-power8-vector, we need to turn off all of the implicit
37285 ISA 2.07 and 3.0 options that relate to the vector unit.
37286
37287 If the user used -mno-power9-vector, we need to turn off all of the implicit
37288 ISA 3.0 options that relate to the vector unit.
37289
37290 This function does not handle explicit options such as the user specifying
37291 -mdirect-move. These are handled in rs6000_option_override_internal, and
37292 the appropriate error is given if needed.
37293
37294 We return a mask of all of the implicit options that should not be enabled
37295 by default. */
37296
37297 static HOST_WIDE_INT
37298 rs6000_disable_incompatible_switches (void)
37299 {
37300 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37301 size_t i, j;
37302
37303 static const struct {
37304 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37305 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37306 const char *const name; /* name of the switch. */
37307 } flags[] = {
37308 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37309 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37310 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37311 };
37312
37313 for (i = 0; i < ARRAY_SIZE (flags); i++)
37314 {
37315 HOST_WIDE_INT no_flag = flags[i].no_flag;
37316
37317 if ((rs6000_isa_flags & no_flag) == 0
37318 && (rs6000_isa_flags_explicit & no_flag) != 0)
37319 {
37320 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37321 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37322 & rs6000_isa_flags
37323 & dep_flags);
37324
37325 if (set_flags)
37326 {
37327 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37328 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37329 {
37330 set_flags &= ~rs6000_opt_masks[j].mask;
37331 error ("%<-mno-%s%> turns off %<-m%s%>",
37332 flags[i].name,
37333 rs6000_opt_masks[j].name);
37334 }
37335
37336 gcc_assert (!set_flags);
37337 }
37338
37339 rs6000_isa_flags &= ~dep_flags;
37340 ignore_masks |= no_flag | dep_flags;
37341 }
37342 }
37343
37344 return ignore_masks;
37345 }
37346
37347 \f
37348 /* Helper function for printing the function name when debugging. */
37349
37350 static const char *
37351 get_decl_name (tree fn)
37352 {
37353 tree name;
37354
37355 if (!fn)
37356 return "<null>";
37357
37358 name = DECL_NAME (fn);
37359 if (!name)
37360 return "<no-name>";
37361
37362 return IDENTIFIER_POINTER (name);
37363 }
37364
37365 /* Return the clone id of the target we are compiling code for in a target
37366 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37367 the priority list for the target clones (ordered from lowest to
37368 highest). */
37369
37370 static int
37371 rs6000_clone_priority (tree fndecl)
37372 {
37373 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37374 HOST_WIDE_INT isa_masks;
37375 int ret = CLONE_DEFAULT;
37376 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37377 const char *attrs_str = NULL;
37378
37379 attrs = TREE_VALUE (TREE_VALUE (attrs));
37380 attrs_str = TREE_STRING_POINTER (attrs);
37381
37382 /* Return priority zero for default function. Return the ISA needed for the
37383 function if it is not the default. */
37384 if (strcmp (attrs_str, "default") != 0)
37385 {
37386 if (fn_opts == NULL_TREE)
37387 fn_opts = target_option_default_node;
37388
37389 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37390 isa_masks = rs6000_isa_flags;
37391 else
37392 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37393
37394 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37395 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37396 break;
37397 }
37398
37399 if (TARGET_DEBUG_TARGET)
37400 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37401 get_decl_name (fndecl), ret);
37402
37403 return ret;
37404 }
37405
37406 /* This compares the priority of target features in function DECL1 and DECL2.
37407 It returns positive value if DECL1 is higher priority, negative value if
37408 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37409 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37410
37411 static int
37412 rs6000_compare_version_priority (tree decl1, tree decl2)
37413 {
37414 int priority1 = rs6000_clone_priority (decl1);
37415 int priority2 = rs6000_clone_priority (decl2);
37416 int ret = priority1 - priority2;
37417
37418 if (TARGET_DEBUG_TARGET)
37419 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37420 get_decl_name (decl1), get_decl_name (decl2), ret);
37421
37422 return ret;
37423 }
37424
37425 /* Make a dispatcher declaration for the multi-versioned function DECL.
37426 Calls to DECL function will be replaced with calls to the dispatcher
37427 by the front-end. Returns the decl of the dispatcher function. */
37428
37429 static tree
37430 rs6000_get_function_versions_dispatcher (void *decl)
37431 {
37432 tree fn = (tree) decl;
37433 struct cgraph_node *node = NULL;
37434 struct cgraph_node *default_node = NULL;
37435 struct cgraph_function_version_info *node_v = NULL;
37436 struct cgraph_function_version_info *first_v = NULL;
37437
37438 tree dispatch_decl = NULL;
37439
37440 struct cgraph_function_version_info *default_version_info = NULL;
37441 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37442
37443 if (TARGET_DEBUG_TARGET)
37444 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37445 get_decl_name (fn));
37446
37447 node = cgraph_node::get (fn);
37448 gcc_assert (node != NULL);
37449
37450 node_v = node->function_version ();
37451 gcc_assert (node_v != NULL);
37452
37453 if (node_v->dispatcher_resolver != NULL)
37454 return node_v->dispatcher_resolver;
37455
37456 /* Find the default version and make it the first node. */
37457 first_v = node_v;
37458 /* Go to the beginning of the chain. */
37459 while (first_v->prev != NULL)
37460 first_v = first_v->prev;
37461
37462 default_version_info = first_v;
37463 while (default_version_info != NULL)
37464 {
37465 const tree decl2 = default_version_info->this_node->decl;
37466 if (is_function_default_version (decl2))
37467 break;
37468 default_version_info = default_version_info->next;
37469 }
37470
37471 /* If there is no default node, just return NULL. */
37472 if (default_version_info == NULL)
37473 return NULL;
37474
37475 /* Make default info the first node. */
37476 if (first_v != default_version_info)
37477 {
37478 default_version_info->prev->next = default_version_info->next;
37479 if (default_version_info->next)
37480 default_version_info->next->prev = default_version_info->prev;
37481 first_v->prev = default_version_info;
37482 default_version_info->next = first_v;
37483 default_version_info->prev = NULL;
37484 }
37485
37486 default_node = default_version_info->this_node;
37487
37488 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37489 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37490 "target_clones attribute needs GLIBC (2.23 and newer) that "
37491 "exports hardware capability bits");
37492 #else
37493
37494 if (targetm.has_ifunc_p ())
37495 {
37496 struct cgraph_function_version_info *it_v = NULL;
37497 struct cgraph_node *dispatcher_node = NULL;
37498 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37499
37500 /* Right now, the dispatching is done via ifunc. */
37501 dispatch_decl = make_dispatcher_decl (default_node->decl);
37502
37503 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37504 gcc_assert (dispatcher_node != NULL);
37505 dispatcher_node->dispatcher_function = 1;
37506 dispatcher_version_info
37507 = dispatcher_node->insert_new_function_version ();
37508 dispatcher_version_info->next = default_version_info;
37509 dispatcher_node->definition = 1;
37510
37511 /* Set the dispatcher for all the versions. */
37512 it_v = default_version_info;
37513 while (it_v != NULL)
37514 {
37515 it_v->dispatcher_resolver = dispatch_decl;
37516 it_v = it_v->next;
37517 }
37518 }
37519 else
37520 {
37521 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37522 "multiversioning needs ifunc which is not supported "
37523 "on this target");
37524 }
37525 #endif
37526
37527 return dispatch_decl;
37528 }
37529
37530 /* Make the resolver function decl to dispatch the versions of a multi-
37531 versioned function, DEFAULT_DECL. Create an empty basic block in the
37532 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37533 function. */
37534
37535 static tree
37536 make_resolver_func (const tree default_decl,
37537 const tree dispatch_decl,
37538 basic_block *empty_bb)
37539 {
37540 /* Make the resolver function static. The resolver function returns
37541 void *. */
37542 tree decl_name = clone_function_name (default_decl, "resolver");
37543 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37544 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37545 tree decl = build_fn_decl (resolver_name, type);
37546 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37547
37548 DECL_NAME (decl) = decl_name;
37549 TREE_USED (decl) = 1;
37550 DECL_ARTIFICIAL (decl) = 1;
37551 DECL_IGNORED_P (decl) = 0;
37552 TREE_PUBLIC (decl) = 0;
37553 DECL_UNINLINABLE (decl) = 1;
37554
37555 /* Resolver is not external, body is generated. */
37556 DECL_EXTERNAL (decl) = 0;
37557 DECL_EXTERNAL (dispatch_decl) = 0;
37558
37559 DECL_CONTEXT (decl) = NULL_TREE;
37560 DECL_INITIAL (decl) = make_node (BLOCK);
37561 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37562
37563 /* Build result decl and add to function_decl. */
37564 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37565 DECL_CONTEXT (t) = decl;
37566 DECL_ARTIFICIAL (t) = 1;
37567 DECL_IGNORED_P (t) = 1;
37568 DECL_RESULT (decl) = t;
37569
37570 gimplify_function_tree (decl);
37571 push_cfun (DECL_STRUCT_FUNCTION (decl));
37572 *empty_bb = init_lowered_empty_function (decl, false,
37573 profile_count::uninitialized ());
37574
37575 cgraph_node::add_new_function (decl, true);
37576 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37577
37578 pop_cfun ();
37579
37580 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37581 DECL_ATTRIBUTES (dispatch_decl)
37582 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37583
37584 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37585
37586 return decl;
37587 }
37588
37589 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37590 return a pointer to VERSION_DECL if we are running on a machine that
37591 supports the index CLONE_ISA hardware architecture bits. This function will
37592 be called during version dispatch to decide which function version to
37593 execute. It returns the basic block at the end, to which more conditions
37594 can be added. */
37595
37596 static basic_block
37597 add_condition_to_bb (tree function_decl, tree version_decl,
37598 int clone_isa, basic_block new_bb)
37599 {
37600 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37601
37602 gcc_assert (new_bb != NULL);
37603 gimple_seq gseq = bb_seq (new_bb);
37604
37605
37606 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37607 build_fold_addr_expr (version_decl));
37608 tree result_var = create_tmp_var (ptr_type_node);
37609 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37610 gimple *return_stmt = gimple_build_return (result_var);
37611
37612 if (clone_isa == CLONE_DEFAULT)
37613 {
37614 gimple_seq_add_stmt (&gseq, convert_stmt);
37615 gimple_seq_add_stmt (&gseq, return_stmt);
37616 set_bb_seq (new_bb, gseq);
37617 gimple_set_bb (convert_stmt, new_bb);
37618 gimple_set_bb (return_stmt, new_bb);
37619 pop_cfun ();
37620 return new_bb;
37621 }
37622
37623 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37624 tree cond_var = create_tmp_var (bool_int_type_node);
37625 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37626 const char *arg_str = rs6000_clone_map[clone_isa].name;
37627 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37628 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37629 gimple_call_set_lhs (call_cond_stmt, cond_var);
37630
37631 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37632 gimple_set_bb (call_cond_stmt, new_bb);
37633 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37634
37635 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37636 NULL_TREE, NULL_TREE);
37637 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37638 gimple_set_bb (if_else_stmt, new_bb);
37639 gimple_seq_add_stmt (&gseq, if_else_stmt);
37640
37641 gimple_seq_add_stmt (&gseq, convert_stmt);
37642 gimple_seq_add_stmt (&gseq, return_stmt);
37643 set_bb_seq (new_bb, gseq);
37644
37645 basic_block bb1 = new_bb;
37646 edge e12 = split_block (bb1, if_else_stmt);
37647 basic_block bb2 = e12->dest;
37648 e12->flags &= ~EDGE_FALLTHRU;
37649 e12->flags |= EDGE_TRUE_VALUE;
37650
37651 edge e23 = split_block (bb2, return_stmt);
37652 gimple_set_bb (convert_stmt, bb2);
37653 gimple_set_bb (return_stmt, bb2);
37654
37655 basic_block bb3 = e23->dest;
37656 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37657
37658 remove_edge (e23);
37659 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37660
37661 pop_cfun ();
37662 return bb3;
37663 }
37664
37665 /* This function generates the dispatch function for multi-versioned functions.
37666 DISPATCH_DECL is the function which will contain the dispatch logic.
37667 FNDECLS are the function choices for dispatch, and is a tree chain.
37668 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37669 code is generated. */
37670
37671 static int
37672 dispatch_function_versions (tree dispatch_decl,
37673 void *fndecls_p,
37674 basic_block *empty_bb)
37675 {
37676 int ix;
37677 tree ele;
37678 vec<tree> *fndecls;
37679 tree clones[CLONE_MAX];
37680
37681 if (TARGET_DEBUG_TARGET)
37682 fputs ("dispatch_function_versions, top\n", stderr);
37683
37684 gcc_assert (dispatch_decl != NULL
37685 && fndecls_p != NULL
37686 && empty_bb != NULL);
37687
37688 /* fndecls_p is actually a vector. */
37689 fndecls = static_cast<vec<tree> *> (fndecls_p);
37690
37691 /* At least one more version other than the default. */
37692 gcc_assert (fndecls->length () >= 2);
37693
37694 /* The first version in the vector is the default decl. */
37695 memset ((void *) clones, '\0', sizeof (clones));
37696 clones[CLONE_DEFAULT] = (*fndecls)[0];
37697
37698 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37699 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37700 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37701 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37702 to insert the code here to do the call. */
37703
37704 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37705 {
37706 int priority = rs6000_clone_priority (ele);
37707 if (!clones[priority])
37708 clones[priority] = ele;
37709 }
37710
37711 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37712 if (clones[ix])
37713 {
37714 if (TARGET_DEBUG_TARGET)
37715 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37716 ix, get_decl_name (clones[ix]));
37717
37718 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37719 *empty_bb);
37720 }
37721
37722 return 0;
37723 }
37724
37725 /* Generate the dispatching code body to dispatch multi-versioned function
37726 DECL. The target hook is called to process the "target" attributes and
37727 provide the code to dispatch the right function at run-time. NODE points
37728 to the dispatcher decl whose body will be created. */
37729
37730 static tree
37731 rs6000_generate_version_dispatcher_body (void *node_p)
37732 {
37733 tree resolver;
37734 basic_block empty_bb;
37735 struct cgraph_node *node = (cgraph_node *) node_p;
37736 struct cgraph_function_version_info *ninfo = node->function_version ();
37737
37738 if (ninfo->dispatcher_resolver)
37739 return ninfo->dispatcher_resolver;
37740
37741 /* node is going to be an alias, so remove the finalized bit. */
37742 node->definition = false;
37743
37744 /* The first version in the chain corresponds to the default version. */
37745 ninfo->dispatcher_resolver = resolver
37746 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37747
37748 if (TARGET_DEBUG_TARGET)
37749 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37750 get_decl_name (resolver));
37751
37752 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37753 auto_vec<tree, 2> fn_ver_vec;
37754
37755 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37756 vinfo;
37757 vinfo = vinfo->next)
37758 {
37759 struct cgraph_node *version = vinfo->this_node;
37760 /* Check for virtual functions here again, as by this time it should
37761 have been determined if this function needs a vtable index or
37762 not. This happens for methods in derived classes that override
37763 virtual methods in base classes but are not explicitly marked as
37764 virtual. */
37765 if (DECL_VINDEX (version->decl))
37766 sorry ("Virtual function multiversioning not supported");
37767
37768 fn_ver_vec.safe_push (version->decl);
37769 }
37770
37771 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37772 cgraph_edge::rebuild_edges ();
37773 pop_cfun ();
37774 return resolver;
37775 }
37776
37777 \f
37778 /* Hook to determine if one function can safely inline another. */
37779
37780 static bool
37781 rs6000_can_inline_p (tree caller, tree callee)
37782 {
37783 bool ret = false;
37784 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37785 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37786
37787 /* If callee has no option attributes, then it is ok to inline. */
37788 if (!callee_tree)
37789 ret = true;
37790
37791 /* If caller has no option attributes, but callee does then it is not ok to
37792 inline. */
37793 else if (!caller_tree)
37794 ret = false;
37795
37796 else
37797 {
37798 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37799 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37800
37801 /* Callee's options should a subset of the caller's, i.e. a vsx function
37802 can inline an altivec function but a non-vsx function can't inline a
37803 vsx function. */
37804 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37805 == callee_opts->x_rs6000_isa_flags)
37806 ret = true;
37807 }
37808
37809 if (TARGET_DEBUG_TARGET)
37810 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37811 get_decl_name (caller), get_decl_name (callee),
37812 (ret ? "can" : "cannot"));
37813
37814 return ret;
37815 }
37816 \f
37817 /* Allocate a stack temp and fixup the address so it meets the particular
37818 memory requirements (either offetable or REG+REG addressing). */
37819
37820 rtx
37821 rs6000_allocate_stack_temp (machine_mode mode,
37822 bool offsettable_p,
37823 bool reg_reg_p)
37824 {
37825 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37826 rtx addr = XEXP (stack, 0);
37827 int strict_p = reload_completed;
37828
37829 if (!legitimate_indirect_address_p (addr, strict_p))
37830 {
37831 if (offsettable_p
37832 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37833 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37834
37835 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37836 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37837 }
37838
37839 return stack;
37840 }
37841
37842 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37843 convert to such a form to deal with memory reference instructions
37844 like STFIWX and LDBRX that only take reg+reg addressing. */
37845
37846 rtx
37847 rs6000_force_indexed_or_indirect_mem (rtx x)
37848 {
37849 machine_mode mode = GET_MODE (x);
37850
37851 gcc_assert (MEM_P (x));
37852 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37853 {
37854 rtx addr = XEXP (x, 0);
37855 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37856 {
37857 rtx reg = XEXP (addr, 0);
37858 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37859 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37860 gcc_assert (REG_P (reg));
37861 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37862 addr = reg;
37863 }
37864 else if (GET_CODE (addr) == PRE_MODIFY)
37865 {
37866 rtx reg = XEXP (addr, 0);
37867 rtx expr = XEXP (addr, 1);
37868 gcc_assert (REG_P (reg));
37869 gcc_assert (GET_CODE (expr) == PLUS);
37870 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37871 addr = reg;
37872 }
37873
37874 x = replace_equiv_address (x, force_reg (Pmode, addr));
37875 }
37876
37877 return x;
37878 }
37879
37880 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37881
37882 On the RS/6000, all integer constants are acceptable, most won't be valid
37883 for particular insns, though. Only easy FP constants are acceptable. */
37884
37885 static bool
37886 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37887 {
37888 if (TARGET_ELF && tls_referenced_p (x))
37889 return false;
37890
37891 if (CONST_DOUBLE_P (x))
37892 return easy_fp_constant (x, mode);
37893
37894 if (GET_CODE (x) == CONST_VECTOR)
37895 return easy_vector_constant (x, mode);
37896
37897 return true;
37898 }
37899
37900 \f
37901 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37902
37903 static bool
37904 chain_already_loaded (rtx_insn *last)
37905 {
37906 for (; last != NULL; last = PREV_INSN (last))
37907 {
37908 if (NONJUMP_INSN_P (last))
37909 {
37910 rtx patt = PATTERN (last);
37911
37912 if (GET_CODE (patt) == SET)
37913 {
37914 rtx lhs = XEXP (patt, 0);
37915
37916 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37917 return true;
37918 }
37919 }
37920 }
37921 return false;
37922 }
37923
37924 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37925
37926 void
37927 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37928 {
37929 rtx func = func_desc;
37930 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37931 rtx toc_load = NULL_RTX;
37932 rtx toc_restore = NULL_RTX;
37933 rtx func_addr;
37934 rtx abi_reg = NULL_RTX;
37935 rtx call[4];
37936 int n_call;
37937 rtx insn;
37938 bool is_pltseq_longcall;
37939
37940 if (global_tlsarg)
37941 tlsarg = global_tlsarg;
37942
37943 /* Handle longcall attributes. */
37944 is_pltseq_longcall = false;
37945 if ((INTVAL (cookie) & CALL_LONG) != 0
37946 && GET_CODE (func_desc) == SYMBOL_REF)
37947 {
37948 func = rs6000_longcall_ref (func_desc, tlsarg);
37949 if (TARGET_PLTSEQ)
37950 is_pltseq_longcall = true;
37951 }
37952
37953 /* Handle indirect calls. */
37954 if (!SYMBOL_REF_P (func)
37955 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37956 {
37957 /* Save the TOC into its reserved slot before the call,
37958 and prepare to restore it after the call. */
37959 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37960 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37961 gen_rtvec (1, stack_toc_offset),
37962 UNSPEC_TOCSLOT);
37963 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37964
37965 /* Can we optimize saving the TOC in the prologue or
37966 do we need to do it at every call? */
37967 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37968 cfun->machine->save_toc_in_prologue = true;
37969 else
37970 {
37971 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37972 rtx stack_toc_mem = gen_frame_mem (Pmode,
37973 gen_rtx_PLUS (Pmode, stack_ptr,
37974 stack_toc_offset));
37975 MEM_VOLATILE_P (stack_toc_mem) = 1;
37976 if (is_pltseq_longcall)
37977 {
37978 /* Use USPEC_PLTSEQ here to emit every instruction in an
37979 inline PLT call sequence with a reloc, enabling the
37980 linker to edit the sequence back to a direct call
37981 when that makes sense. */
37982 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37983 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37984 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37985 }
37986 else
37987 emit_move_insn (stack_toc_mem, toc_reg);
37988 }
37989
37990 if (DEFAULT_ABI == ABI_ELFv2)
37991 {
37992 /* A function pointer in the ELFv2 ABI is just a plain address, but
37993 the ABI requires it to be loaded into r12 before the call. */
37994 func_addr = gen_rtx_REG (Pmode, 12);
37995 if (!rtx_equal_p (func_addr, func))
37996 emit_move_insn (func_addr, func);
37997 abi_reg = func_addr;
37998 /* Indirect calls via CTR are strongly preferred over indirect
37999 calls via LR, so move the address there. Needed to mark
38000 this insn for linker plt sequence editing too. */
38001 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38002 if (is_pltseq_longcall)
38003 {
38004 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
38005 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38006 emit_insn (gen_rtx_SET (func_addr, mark_func));
38007 v = gen_rtvec (2, func_addr, func_desc);
38008 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38009 }
38010 else
38011 emit_move_insn (func_addr, abi_reg);
38012 }
38013 else
38014 {
38015 /* A function pointer under AIX is a pointer to a data area whose
38016 first word contains the actual address of the function, whose
38017 second word contains a pointer to its TOC, and whose third word
38018 contains a value to place in the static chain register (r11).
38019 Note that if we load the static chain, our "trampoline" need
38020 not have any executable code. */
38021
38022 /* Load up address of the actual function. */
38023 func = force_reg (Pmode, func);
38024 func_addr = gen_reg_rtx (Pmode);
38025 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
38026
38027 /* Indirect calls via CTR are strongly preferred over indirect
38028 calls via LR, so move the address there. */
38029 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
38030 emit_move_insn (ctr_reg, func_addr);
38031 func_addr = ctr_reg;
38032
38033 /* Prepare to load the TOC of the called function. Note that the
38034 TOC load must happen immediately before the actual call so
38035 that unwinding the TOC registers works correctly. See the
38036 comment in frob_update_context. */
38037 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
38038 rtx func_toc_mem = gen_rtx_MEM (Pmode,
38039 gen_rtx_PLUS (Pmode, func,
38040 func_toc_offset));
38041 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
38042
38043 /* If we have a static chain, load it up. But, if the call was
38044 originally direct, the 3rd word has not been written since no
38045 trampoline has been built, so we ought not to load it, lest we
38046 override a static chain value. */
38047 if (!(GET_CODE (func_desc) == SYMBOL_REF
38048 && SYMBOL_REF_FUNCTION_P (func_desc))
38049 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
38050 && !chain_already_loaded (get_current_sequence ()->next->last))
38051 {
38052 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
38053 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
38054 rtx func_sc_mem = gen_rtx_MEM (Pmode,
38055 gen_rtx_PLUS (Pmode, func,
38056 func_sc_offset));
38057 emit_move_insn (sc_reg, func_sc_mem);
38058 abi_reg = sc_reg;
38059 }
38060 }
38061 }
38062 else
38063 {
38064 /* Direct calls use the TOC: for local calls, the callee will
38065 assume the TOC register is set; for non-local calls, the
38066 PLT stub needs the TOC register. */
38067 abi_reg = toc_reg;
38068 func_addr = func;
38069 }
38070
38071 /* Create the call. */
38072 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38073 if (value != NULL_RTX)
38074 call[0] = gen_rtx_SET (value, call[0]);
38075 n_call = 1;
38076
38077 if (toc_load)
38078 call[n_call++] = toc_load;
38079 if (toc_restore)
38080 call[n_call++] = toc_restore;
38081
38082 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38083
38084 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
38085 insn = emit_call_insn (insn);
38086
38087 /* Mention all registers defined by the ABI to hold information
38088 as uses in CALL_INSN_FUNCTION_USAGE. */
38089 if (abi_reg)
38090 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38091 }
38092
38093 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
38094
38095 void
38096 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38097 {
38098 rtx call[2];
38099 rtx insn;
38100
38101 gcc_assert (INTVAL (cookie) == 0);
38102
38103 if (global_tlsarg)
38104 tlsarg = global_tlsarg;
38105
38106 /* Create the call. */
38107 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
38108 if (value != NULL_RTX)
38109 call[0] = gen_rtx_SET (value, call[0]);
38110
38111 call[1] = simple_return_rtx;
38112
38113 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
38114 insn = emit_call_insn (insn);
38115
38116 /* Note use of the TOC register. */
38117 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
38118 }
38119
38120 /* Expand code to perform a call under the SYSV4 ABI. */
38121
38122 void
38123 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38124 {
38125 rtx func = func_desc;
38126 rtx func_addr;
38127 rtx call[4];
38128 rtx insn;
38129 rtx abi_reg = NULL_RTX;
38130 int n;
38131
38132 if (global_tlsarg)
38133 tlsarg = global_tlsarg;
38134
38135 /* Handle longcall attributes. */
38136 if ((INTVAL (cookie) & CALL_LONG) != 0
38137 && GET_CODE (func_desc) == SYMBOL_REF)
38138 {
38139 func = rs6000_longcall_ref (func_desc, tlsarg);
38140 /* If the longcall was implemented as an inline PLT call using
38141 PLT unspecs then func will be REG:r11. If not, func will be
38142 a pseudo reg. The inline PLT call sequence supports lazy
38143 linking (and longcalls to functions in dlopen'd libraries).
38144 The other style of longcalls don't. The lazy linking entry
38145 to the dynamic symbol resolver requires r11 be the function
38146 address (as it is for linker generated PLT stubs). Ensure
38147 r11 stays valid to the bctrl by marking r11 used by the call. */
38148 if (TARGET_PLTSEQ)
38149 abi_reg = func;
38150 }
38151
38152 /* Handle indirect calls. */
38153 if (GET_CODE (func) != SYMBOL_REF)
38154 {
38155 func = force_reg (Pmode, func);
38156
38157 /* Indirect calls via CTR are strongly preferred over indirect
38158 calls via LR, so move the address there. That can't be left
38159 to reload because we want to mark every instruction in an
38160 inline PLT call sequence with a reloc, enabling the linker to
38161 edit the sequence back to a direct call when that makes sense. */
38162 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38163 if (abi_reg)
38164 {
38165 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38166 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38167 emit_insn (gen_rtx_SET (func_addr, mark_func));
38168 v = gen_rtvec (2, func_addr, func_desc);
38169 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38170 }
38171 else
38172 emit_move_insn (func_addr, func);
38173 }
38174 else
38175 func_addr = func;
38176
38177 /* Create the call. */
38178 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38179 if (value != NULL_RTX)
38180 call[0] = gen_rtx_SET (value, call[0]);
38181
38182 call[1] = gen_rtx_USE (VOIDmode, cookie);
38183 n = 2;
38184 if (TARGET_SECURE_PLT
38185 && flag_pic
38186 && GET_CODE (func_addr) == SYMBOL_REF
38187 && !SYMBOL_REF_LOCAL_P (func_addr))
38188 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
38189
38190 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38191
38192 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
38193 insn = emit_call_insn (insn);
38194 if (abi_reg)
38195 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38196 }
38197
38198 /* Expand code to perform a sibling call under the SysV4 ABI. */
38199
38200 void
38201 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38202 {
38203 rtx func = func_desc;
38204 rtx func_addr;
38205 rtx call[3];
38206 rtx insn;
38207 rtx abi_reg = NULL_RTX;
38208
38209 if (global_tlsarg)
38210 tlsarg = global_tlsarg;
38211
38212 /* Handle longcall attributes. */
38213 if ((INTVAL (cookie) & CALL_LONG) != 0
38214 && GET_CODE (func_desc) == SYMBOL_REF)
38215 {
38216 func = rs6000_longcall_ref (func_desc, tlsarg);
38217 /* If the longcall was implemented as an inline PLT call using
38218 PLT unspecs then func will be REG:r11. If not, func will be
38219 a pseudo reg. The inline PLT call sequence supports lazy
38220 linking (and longcalls to functions in dlopen'd libraries).
38221 The other style of longcalls don't. The lazy linking entry
38222 to the dynamic symbol resolver requires r11 be the function
38223 address (as it is for linker generated PLT stubs). Ensure
38224 r11 stays valid to the bctr by marking r11 used by the call. */
38225 if (TARGET_PLTSEQ)
38226 abi_reg = func;
38227 }
38228
38229 /* Handle indirect calls. */
38230 if (GET_CODE (func) != SYMBOL_REF)
38231 {
38232 func = force_reg (Pmode, func);
38233
38234 /* Indirect sibcalls must go via CTR. That can't be left to
38235 reload because we want to mark every instruction in an inline
38236 PLT call sequence with a reloc, enabling the linker to edit
38237 the sequence back to a direct call when that makes sense. */
38238 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38239 if (abi_reg)
38240 {
38241 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38242 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38243 emit_insn (gen_rtx_SET (func_addr, mark_func));
38244 v = gen_rtvec (2, func_addr, func_desc);
38245 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38246 }
38247 else
38248 emit_move_insn (func_addr, func);
38249 }
38250 else
38251 func_addr = func;
38252
38253 /* Create the call. */
38254 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38255 if (value != NULL_RTX)
38256 call[0] = gen_rtx_SET (value, call[0]);
38257
38258 call[1] = gen_rtx_USE (VOIDmode, cookie);
38259 call[2] = simple_return_rtx;
38260
38261 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38262 insn = emit_call_insn (insn);
38263 if (abi_reg)
38264 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38265 }
38266
38267 #if TARGET_MACHO
38268
38269 /* Expand code to perform a call under the Darwin ABI.
38270 Modulo handling of mlongcall, this is much the same as sysv.
38271 if/when the longcall optimisation is removed, we could drop this
38272 code and use the sysv case (taking care to avoid the tls stuff).
38273
38274 We can use this for sibcalls too, if needed. */
38275
38276 void
38277 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38278 rtx cookie, bool sibcall)
38279 {
38280 rtx func = func_desc;
38281 rtx func_addr;
38282 rtx call[3];
38283 rtx insn;
38284 int cookie_val = INTVAL (cookie);
38285 bool make_island = false;
38286
38287 /* Handle longcall attributes, there are two cases for Darwin:
38288 1) Newer linkers are capable of synthesising any branch islands needed.
38289 2) We need a helper branch island synthesised by the compiler.
38290 The second case has mostly been retired and we don't use it for m64.
38291 In fact, it's is an optimisation, we could just indirect as sysv does..
38292 ... however, backwards compatibility for now.
38293 If we're going to use this, then we need to keep the CALL_LONG bit set,
38294 so that we can pick up the special insn form later. */
38295 if ((cookie_val & CALL_LONG) != 0
38296 && GET_CODE (func_desc) == SYMBOL_REF)
38297 {
38298 if (darwin_emit_branch_islands && TARGET_32BIT)
38299 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38300 else
38301 {
38302 /* The linker is capable of doing this, but the user explicitly
38303 asked for -mlongcall, so we'll do the 'normal' version. */
38304 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38305 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38306 }
38307 }
38308
38309 /* Handle indirect calls. */
38310 if (GET_CODE (func) != SYMBOL_REF)
38311 {
38312 func = force_reg (Pmode, func);
38313
38314 /* Indirect calls via CTR are strongly preferred over indirect
38315 calls via LR, and are required for indirect sibcalls, so move
38316 the address there. */
38317 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38318 emit_move_insn (func_addr, func);
38319 }
38320 else
38321 func_addr = func;
38322
38323 /* Create the call. */
38324 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38325 if (value != NULL_RTX)
38326 call[0] = gen_rtx_SET (value, call[0]);
38327
38328 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38329
38330 if (sibcall)
38331 call[2] = simple_return_rtx;
38332 else
38333 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38334
38335 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38336 insn = emit_call_insn (insn);
38337 /* Now we have the debug info in the insn, we can set up the branch island
38338 if we're using one. */
38339 if (make_island)
38340 {
38341 tree funname = get_identifier (XSTR (func_desc, 0));
38342
38343 if (no_previous_def (funname))
38344 {
38345 rtx label_rtx = gen_label_rtx ();
38346 char *label_buf, temp_buf[256];
38347 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38348 CODE_LABEL_NUMBER (label_rtx));
38349 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38350 tree labelname = get_identifier (label_buf);
38351 add_compiler_branch_island (labelname, funname,
38352 insn_line ((const rtx_insn*)insn));
38353 }
38354 }
38355 }
38356 #endif
38357
38358 void
38359 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38360 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38361 {
38362 #if TARGET_MACHO
38363 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38364 #else
38365 gcc_unreachable();
38366 #endif
38367 }
38368
38369
38370 void
38371 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38372 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38373 {
38374 #if TARGET_MACHO
38375 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38376 #else
38377 gcc_unreachable();
38378 #endif
38379 }
38380
38381
38382 /* Return whether we need to always update the saved TOC pointer when we update
38383 the stack pointer. */
38384
38385 static bool
38386 rs6000_save_toc_in_prologue_p (void)
38387 {
38388 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38389 }
38390
38391 #ifdef HAVE_GAS_HIDDEN
38392 # define USE_HIDDEN_LINKONCE 1
38393 #else
38394 # define USE_HIDDEN_LINKONCE 0
38395 #endif
38396
38397 /* Fills in the label name that should be used for a 476 link stack thunk. */
38398
38399 void
38400 get_ppc476_thunk_name (char name[32])
38401 {
38402 gcc_assert (TARGET_LINK_STACK);
38403
38404 if (USE_HIDDEN_LINKONCE)
38405 sprintf (name, "__ppc476.get_thunk");
38406 else
38407 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38408 }
38409
38410 /* This function emits the simple thunk routine that is used to preserve
38411 the link stack on the 476 cpu. */
38412
38413 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38414 static void
38415 rs6000_code_end (void)
38416 {
38417 char name[32];
38418 tree decl;
38419
38420 if (!TARGET_LINK_STACK)
38421 return;
38422
38423 get_ppc476_thunk_name (name);
38424
38425 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38426 build_function_type_list (void_type_node, NULL_TREE));
38427 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38428 NULL_TREE, void_type_node);
38429 TREE_PUBLIC (decl) = 1;
38430 TREE_STATIC (decl) = 1;
38431
38432 #if RS6000_WEAK
38433 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38434 {
38435 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38436 targetm.asm_out.unique_section (decl, 0);
38437 switch_to_section (get_named_section (decl, NULL, 0));
38438 DECL_WEAK (decl) = 1;
38439 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38440 targetm.asm_out.globalize_label (asm_out_file, name);
38441 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38442 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38443 }
38444 else
38445 #endif
38446 {
38447 switch_to_section (text_section);
38448 ASM_OUTPUT_LABEL (asm_out_file, name);
38449 }
38450
38451 DECL_INITIAL (decl) = make_node (BLOCK);
38452 current_function_decl = decl;
38453 allocate_struct_function (decl, false);
38454 init_function_start (decl);
38455 first_function_block_is_cold = false;
38456 /* Make sure unwind info is emitted for the thunk if needed. */
38457 final_start_function (emit_barrier (), asm_out_file, 1);
38458
38459 fputs ("\tblr\n", asm_out_file);
38460
38461 final_end_function ();
38462 init_insn_lengths ();
38463 free_after_compilation (cfun);
38464 set_cfun (NULL);
38465 current_function_decl = NULL;
38466 }
38467
38468 /* Add r30 to hard reg set if the prologue sets it up and it is not
38469 pic_offset_table_rtx. */
38470
38471 static void
38472 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38473 {
38474 if (!TARGET_SINGLE_PIC_BASE
38475 && TARGET_TOC
38476 && TARGET_MINIMAL_TOC
38477 && !constant_pool_empty_p ())
38478 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38479 if (cfun->machine->split_stack_argp_used)
38480 add_to_hard_reg_set (&set->set, Pmode, 12);
38481
38482 /* Make sure the hard reg set doesn't include r2, which was possibly added
38483 via PIC_OFFSET_TABLE_REGNUM. */
38484 if (TARGET_TOC)
38485 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38486 }
38487
38488 \f
38489 /* Helper function for rs6000_split_logical to emit a logical instruction after
38490 spliting the operation to single GPR registers.
38491
38492 DEST is the destination register.
38493 OP1 and OP2 are the input source registers.
38494 CODE is the base operation (AND, IOR, XOR, NOT).
38495 MODE is the machine mode.
38496 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38497 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38498 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38499
38500 static void
38501 rs6000_split_logical_inner (rtx dest,
38502 rtx op1,
38503 rtx op2,
38504 enum rtx_code code,
38505 machine_mode mode,
38506 bool complement_final_p,
38507 bool complement_op1_p,
38508 bool complement_op2_p)
38509 {
38510 rtx bool_rtx;
38511
38512 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38513 if (op2 && CONST_INT_P (op2)
38514 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38515 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38516 {
38517 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38518 HOST_WIDE_INT value = INTVAL (op2) & mask;
38519
38520 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38521 if (code == AND)
38522 {
38523 if (value == 0)
38524 {
38525 emit_insn (gen_rtx_SET (dest, const0_rtx));
38526 return;
38527 }
38528
38529 else if (value == mask)
38530 {
38531 if (!rtx_equal_p (dest, op1))
38532 emit_insn (gen_rtx_SET (dest, op1));
38533 return;
38534 }
38535 }
38536
38537 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38538 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38539 else if (code == IOR || code == XOR)
38540 {
38541 if (value == 0)
38542 {
38543 if (!rtx_equal_p (dest, op1))
38544 emit_insn (gen_rtx_SET (dest, op1));
38545 return;
38546 }
38547 }
38548 }
38549
38550 if (code == AND && mode == SImode
38551 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38552 {
38553 emit_insn (gen_andsi3 (dest, op1, op2));
38554 return;
38555 }
38556
38557 if (complement_op1_p)
38558 op1 = gen_rtx_NOT (mode, op1);
38559
38560 if (complement_op2_p)
38561 op2 = gen_rtx_NOT (mode, op2);
38562
38563 /* For canonical RTL, if only one arm is inverted it is the first. */
38564 if (!complement_op1_p && complement_op2_p)
38565 std::swap (op1, op2);
38566
38567 bool_rtx = ((code == NOT)
38568 ? gen_rtx_NOT (mode, op1)
38569 : gen_rtx_fmt_ee (code, mode, op1, op2));
38570
38571 if (complement_final_p)
38572 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38573
38574 emit_insn (gen_rtx_SET (dest, bool_rtx));
38575 }
38576
38577 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38578 operations are split immediately during RTL generation to allow for more
38579 optimizations of the AND/IOR/XOR.
38580
38581 OPERANDS is an array containing the destination and two input operands.
38582 CODE is the base operation (AND, IOR, XOR, NOT).
38583 MODE is the machine mode.
38584 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38585 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38586 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38587 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38588 formation of the AND instructions. */
38589
38590 static void
38591 rs6000_split_logical_di (rtx operands[3],
38592 enum rtx_code code,
38593 bool complement_final_p,
38594 bool complement_op1_p,
38595 bool complement_op2_p)
38596 {
38597 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38598 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38599 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38600 enum hi_lo { hi = 0, lo = 1 };
38601 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38602 size_t i;
38603
38604 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38605 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38606 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38607 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38608
38609 if (code == NOT)
38610 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38611 else
38612 {
38613 if (!CONST_INT_P (operands[2]))
38614 {
38615 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38616 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38617 }
38618 else
38619 {
38620 HOST_WIDE_INT value = INTVAL (operands[2]);
38621 HOST_WIDE_INT value_hi_lo[2];
38622
38623 gcc_assert (!complement_final_p);
38624 gcc_assert (!complement_op1_p);
38625 gcc_assert (!complement_op2_p);
38626
38627 value_hi_lo[hi] = value >> 32;
38628 value_hi_lo[lo] = value & lower_32bits;
38629
38630 for (i = 0; i < 2; i++)
38631 {
38632 HOST_WIDE_INT sub_value = value_hi_lo[i];
38633
38634 if (sub_value & sign_bit)
38635 sub_value |= upper_32bits;
38636
38637 op2_hi_lo[i] = GEN_INT (sub_value);
38638
38639 /* If this is an AND instruction, check to see if we need to load
38640 the value in a register. */
38641 if (code == AND && sub_value != -1 && sub_value != 0
38642 && !and_operand (op2_hi_lo[i], SImode))
38643 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38644 }
38645 }
38646 }
38647
38648 for (i = 0; i < 2; i++)
38649 {
38650 /* Split large IOR/XOR operations. */
38651 if ((code == IOR || code == XOR)
38652 && CONST_INT_P (op2_hi_lo[i])
38653 && !complement_final_p
38654 && !complement_op1_p
38655 && !complement_op2_p
38656 && !logical_const_operand (op2_hi_lo[i], SImode))
38657 {
38658 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38659 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38660 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38661 rtx tmp = gen_reg_rtx (SImode);
38662
38663 /* Make sure the constant is sign extended. */
38664 if ((hi_16bits & sign_bit) != 0)
38665 hi_16bits |= upper_32bits;
38666
38667 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38668 code, SImode, false, false, false);
38669
38670 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38671 code, SImode, false, false, false);
38672 }
38673 else
38674 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38675 code, SImode, complement_final_p,
38676 complement_op1_p, complement_op2_p);
38677 }
38678
38679 return;
38680 }
38681
38682 /* Split the insns that make up boolean operations operating on multiple GPR
38683 registers. The boolean MD patterns ensure that the inputs either are
38684 exactly the same as the output registers, or there is no overlap.
38685
38686 OPERANDS is an array containing the destination and two input operands.
38687 CODE is the base operation (AND, IOR, XOR, NOT).
38688 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38689 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38690 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38691
38692 void
38693 rs6000_split_logical (rtx operands[3],
38694 enum rtx_code code,
38695 bool complement_final_p,
38696 bool complement_op1_p,
38697 bool complement_op2_p)
38698 {
38699 machine_mode mode = GET_MODE (operands[0]);
38700 machine_mode sub_mode;
38701 rtx op0, op1, op2;
38702 int sub_size, regno0, regno1, nregs, i;
38703
38704 /* If this is DImode, use the specialized version that can run before
38705 register allocation. */
38706 if (mode == DImode && !TARGET_POWERPC64)
38707 {
38708 rs6000_split_logical_di (operands, code, complement_final_p,
38709 complement_op1_p, complement_op2_p);
38710 return;
38711 }
38712
38713 op0 = operands[0];
38714 op1 = operands[1];
38715 op2 = (code == NOT) ? NULL_RTX : operands[2];
38716 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38717 sub_size = GET_MODE_SIZE (sub_mode);
38718 regno0 = REGNO (op0);
38719 regno1 = REGNO (op1);
38720
38721 gcc_assert (reload_completed);
38722 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38723 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38724
38725 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38726 gcc_assert (nregs > 1);
38727
38728 if (op2 && REG_P (op2))
38729 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38730
38731 for (i = 0; i < nregs; i++)
38732 {
38733 int offset = i * sub_size;
38734 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38735 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38736 rtx sub_op2 = ((code == NOT)
38737 ? NULL_RTX
38738 : simplify_subreg (sub_mode, op2, mode, offset));
38739
38740 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38741 complement_final_p, complement_op1_p,
38742 complement_op2_p);
38743 }
38744
38745 return;
38746 }
38747
38748 \f
38749 /* Return true if the peephole2 can combine a load involving a combination of
38750 an addis instruction and a load with an offset that can be fused together on
38751 a power8. */
38752
38753 bool
38754 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38755 rtx addis_value, /* addis value. */
38756 rtx target, /* target register that is loaded. */
38757 rtx mem) /* bottom part of the memory addr. */
38758 {
38759 rtx addr;
38760 rtx base_reg;
38761
38762 /* Validate arguments. */
38763 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38764 return false;
38765
38766 if (!base_reg_operand (target, GET_MODE (target)))
38767 return false;
38768
38769 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38770 return false;
38771
38772 /* Allow sign/zero extension. */
38773 if (GET_CODE (mem) == ZERO_EXTEND
38774 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38775 mem = XEXP (mem, 0);
38776
38777 if (!MEM_P (mem))
38778 return false;
38779
38780 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38781 return false;
38782
38783 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38784 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38785 return false;
38786
38787 /* Validate that the register used to load the high value is either the
38788 register being loaded, or we can safely replace its use.
38789
38790 This function is only called from the peephole2 pass and we assume that
38791 there are 2 instructions in the peephole (addis and load), so we want to
38792 check if the target register was not used in the memory address and the
38793 register to hold the addis result is dead after the peephole. */
38794 if (REGNO (addis_reg) != REGNO (target))
38795 {
38796 if (reg_mentioned_p (target, mem))
38797 return false;
38798
38799 if (!peep2_reg_dead_p (2, addis_reg))
38800 return false;
38801
38802 /* If the target register being loaded is the stack pointer, we must
38803 avoid loading any other value into it, even temporarily. */
38804 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38805 return false;
38806 }
38807
38808 base_reg = XEXP (addr, 0);
38809 return REGNO (addis_reg) == REGNO (base_reg);
38810 }
38811
38812 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38813 sequence. We adjust the addis register to use the target register. If the
38814 load sign extends, we adjust the code to do the zero extending load, and an
38815 explicit sign extension later since the fusion only covers zero extending
38816 loads.
38817
38818 The operands are:
38819 operands[0] register set with addis (to be replaced with target)
38820 operands[1] value set via addis
38821 operands[2] target register being loaded
38822 operands[3] D-form memory reference using operands[0]. */
38823
38824 void
38825 expand_fusion_gpr_load (rtx *operands)
38826 {
38827 rtx addis_value = operands[1];
38828 rtx target = operands[2];
38829 rtx orig_mem = operands[3];
38830 rtx new_addr, new_mem, orig_addr, offset;
38831 enum rtx_code plus_or_lo_sum;
38832 machine_mode target_mode = GET_MODE (target);
38833 machine_mode extend_mode = target_mode;
38834 machine_mode ptr_mode = Pmode;
38835 enum rtx_code extend = UNKNOWN;
38836
38837 if (GET_CODE (orig_mem) == ZERO_EXTEND
38838 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38839 {
38840 extend = GET_CODE (orig_mem);
38841 orig_mem = XEXP (orig_mem, 0);
38842 target_mode = GET_MODE (orig_mem);
38843 }
38844
38845 gcc_assert (MEM_P (orig_mem));
38846
38847 orig_addr = XEXP (orig_mem, 0);
38848 plus_or_lo_sum = GET_CODE (orig_addr);
38849 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38850
38851 offset = XEXP (orig_addr, 1);
38852 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38853 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38854
38855 if (extend != UNKNOWN)
38856 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38857
38858 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38859 UNSPEC_FUSION_GPR);
38860 emit_insn (gen_rtx_SET (target, new_mem));
38861
38862 if (extend == SIGN_EXTEND)
38863 {
38864 int sub_off = ((BYTES_BIG_ENDIAN)
38865 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38866 : 0);
38867 rtx sign_reg
38868 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38869
38870 emit_insn (gen_rtx_SET (target,
38871 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38872 }
38873
38874 return;
38875 }
38876
38877 /* Emit the addis instruction that will be part of a fused instruction
38878 sequence. */
38879
38880 void
38881 emit_fusion_addis (rtx target, rtx addis_value)
38882 {
38883 rtx fuse_ops[10];
38884 const char *addis_str = NULL;
38885
38886 /* Emit the addis instruction. */
38887 fuse_ops[0] = target;
38888 if (satisfies_constraint_L (addis_value))
38889 {
38890 fuse_ops[1] = addis_value;
38891 addis_str = "lis %0,%v1";
38892 }
38893
38894 else if (GET_CODE (addis_value) == PLUS)
38895 {
38896 rtx op0 = XEXP (addis_value, 0);
38897 rtx op1 = XEXP (addis_value, 1);
38898
38899 if (REG_P (op0) && CONST_INT_P (op1)
38900 && satisfies_constraint_L (op1))
38901 {
38902 fuse_ops[1] = op0;
38903 fuse_ops[2] = op1;
38904 addis_str = "addis %0,%1,%v2";
38905 }
38906 }
38907
38908 else if (GET_CODE (addis_value) == HIGH)
38909 {
38910 rtx value = XEXP (addis_value, 0);
38911 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38912 {
38913 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38914 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38915 if (TARGET_ELF)
38916 addis_str = "addis %0,%2,%1@toc@ha";
38917
38918 else if (TARGET_XCOFF)
38919 addis_str = "addis %0,%1@u(%2)";
38920
38921 else
38922 gcc_unreachable ();
38923 }
38924
38925 else if (GET_CODE (value) == PLUS)
38926 {
38927 rtx op0 = XEXP (value, 0);
38928 rtx op1 = XEXP (value, 1);
38929
38930 if (GET_CODE (op0) == UNSPEC
38931 && XINT (op0, 1) == UNSPEC_TOCREL
38932 && CONST_INT_P (op1))
38933 {
38934 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38935 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38936 fuse_ops[3] = op1;
38937 if (TARGET_ELF)
38938 addis_str = "addis %0,%2,%1+%3@toc@ha";
38939
38940 else if (TARGET_XCOFF)
38941 addis_str = "addis %0,%1+%3@u(%2)";
38942
38943 else
38944 gcc_unreachable ();
38945 }
38946 }
38947
38948 else if (satisfies_constraint_L (value))
38949 {
38950 fuse_ops[1] = value;
38951 addis_str = "lis %0,%v1";
38952 }
38953
38954 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38955 {
38956 fuse_ops[1] = value;
38957 addis_str = "lis %0,%1@ha";
38958 }
38959 }
38960
38961 if (!addis_str)
38962 fatal_insn ("Could not generate addis value for fusion", addis_value);
38963
38964 output_asm_insn (addis_str, fuse_ops);
38965 }
38966
38967 /* Emit a D-form load or store instruction that is the second instruction
38968 of a fusion sequence. */
38969
38970 static void
38971 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38972 {
38973 rtx fuse_ops[10];
38974 char insn_template[80];
38975
38976 fuse_ops[0] = load_reg;
38977 fuse_ops[1] = addis_reg;
38978
38979 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38980 {
38981 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38982 fuse_ops[2] = offset;
38983 output_asm_insn (insn_template, fuse_ops);
38984 }
38985
38986 else if (GET_CODE (offset) == UNSPEC
38987 && XINT (offset, 1) == UNSPEC_TOCREL)
38988 {
38989 if (TARGET_ELF)
38990 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38991
38992 else if (TARGET_XCOFF)
38993 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38994
38995 else
38996 gcc_unreachable ();
38997
38998 fuse_ops[2] = XVECEXP (offset, 0, 0);
38999 output_asm_insn (insn_template, fuse_ops);
39000 }
39001
39002 else if (GET_CODE (offset) == PLUS
39003 && GET_CODE (XEXP (offset, 0)) == UNSPEC
39004 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
39005 && CONST_INT_P (XEXP (offset, 1)))
39006 {
39007 rtx tocrel_unspec = XEXP (offset, 0);
39008 if (TARGET_ELF)
39009 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
39010
39011 else if (TARGET_XCOFF)
39012 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
39013
39014 else
39015 gcc_unreachable ();
39016
39017 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
39018 fuse_ops[3] = XEXP (offset, 1);
39019 output_asm_insn (insn_template, fuse_ops);
39020 }
39021
39022 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
39023 {
39024 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
39025
39026 fuse_ops[2] = offset;
39027 output_asm_insn (insn_template, fuse_ops);
39028 }
39029
39030 else
39031 fatal_insn ("Unable to generate load/store offset for fusion", offset);
39032
39033 return;
39034 }
39035
39036 /* Given an address, convert it into the addis and load offset parts. Addresses
39037 created during the peephole2 process look like:
39038 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
39039 (unspec [(...)] UNSPEC_TOCREL)) */
39040
39041 static void
39042 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
39043 {
39044 rtx hi, lo;
39045
39046 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
39047 {
39048 hi = XEXP (addr, 0);
39049 lo = XEXP (addr, 1);
39050 }
39051 else
39052 gcc_unreachable ();
39053
39054 *p_hi = hi;
39055 *p_lo = lo;
39056 }
39057
39058 /* Return a string to fuse an addis instruction with a gpr load to the same
39059 register that we loaded up the addis instruction. The address that is used
39060 is the logical address that was formed during peephole2:
39061 (lo_sum (high) (low-part))
39062
39063 The code is complicated, so we call output_asm_insn directly, and just
39064 return "". */
39065
39066 const char *
39067 emit_fusion_gpr_load (rtx target, rtx mem)
39068 {
39069 rtx addis_value;
39070 rtx addr;
39071 rtx load_offset;
39072 const char *load_str = NULL;
39073 machine_mode mode;
39074
39075 if (GET_CODE (mem) == ZERO_EXTEND)
39076 mem = XEXP (mem, 0);
39077
39078 gcc_assert (REG_P (target) && MEM_P (mem));
39079
39080 addr = XEXP (mem, 0);
39081 fusion_split_address (addr, &addis_value, &load_offset);
39082
39083 /* Now emit the load instruction to the same register. */
39084 mode = GET_MODE (mem);
39085 switch (mode)
39086 {
39087 case E_QImode:
39088 load_str = "lbz";
39089 break;
39090
39091 case E_HImode:
39092 load_str = "lhz";
39093 break;
39094
39095 case E_SImode:
39096 case E_SFmode:
39097 load_str = "lwz";
39098 break;
39099
39100 case E_DImode:
39101 case E_DFmode:
39102 gcc_assert (TARGET_POWERPC64);
39103 load_str = "ld";
39104 break;
39105
39106 default:
39107 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
39108 }
39109
39110 /* Emit the addis instruction. */
39111 emit_fusion_addis (target, addis_value);
39112
39113 /* Emit the D-form load instruction. */
39114 emit_fusion_load (target, target, load_offset, load_str);
39115
39116 return "";
39117 }
39118 \f
39119
39120 #ifdef RS6000_GLIBC_ATOMIC_FENV
39121 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39122 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39123 #endif
39124
39125 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39126
39127 static void
39128 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39129 {
39130 if (!TARGET_HARD_FLOAT)
39131 {
39132 #ifdef RS6000_GLIBC_ATOMIC_FENV
39133 if (atomic_hold_decl == NULL_TREE)
39134 {
39135 atomic_hold_decl
39136 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39137 get_identifier ("__atomic_feholdexcept"),
39138 build_function_type_list (void_type_node,
39139 double_ptr_type_node,
39140 NULL_TREE));
39141 TREE_PUBLIC (atomic_hold_decl) = 1;
39142 DECL_EXTERNAL (atomic_hold_decl) = 1;
39143 }
39144
39145 if (atomic_clear_decl == NULL_TREE)
39146 {
39147 atomic_clear_decl
39148 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39149 get_identifier ("__atomic_feclearexcept"),
39150 build_function_type_list (void_type_node,
39151 NULL_TREE));
39152 TREE_PUBLIC (atomic_clear_decl) = 1;
39153 DECL_EXTERNAL (atomic_clear_decl) = 1;
39154 }
39155
39156 tree const_double = build_qualified_type (double_type_node,
39157 TYPE_QUAL_CONST);
39158 tree const_double_ptr = build_pointer_type (const_double);
39159 if (atomic_update_decl == NULL_TREE)
39160 {
39161 atomic_update_decl
39162 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39163 get_identifier ("__atomic_feupdateenv"),
39164 build_function_type_list (void_type_node,
39165 const_double_ptr,
39166 NULL_TREE));
39167 TREE_PUBLIC (atomic_update_decl) = 1;
39168 DECL_EXTERNAL (atomic_update_decl) = 1;
39169 }
39170
39171 tree fenv_var = create_tmp_var_raw (double_type_node);
39172 TREE_ADDRESSABLE (fenv_var) = 1;
39173 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39174
39175 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39176 *clear = build_call_expr (atomic_clear_decl, 0);
39177 *update = build_call_expr (atomic_update_decl, 1,
39178 fold_convert (const_double_ptr, fenv_addr));
39179 #endif
39180 return;
39181 }
39182
39183 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39184 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39185 tree call_mffs = build_call_expr (mffs, 0);
39186
39187 /* Generates the equivalent of feholdexcept (&fenv_var)
39188
39189 *fenv_var = __builtin_mffs ();
39190 double fenv_hold;
39191 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39192 __builtin_mtfsf (0xff, fenv_hold); */
39193
39194 /* Mask to clear everything except for the rounding modes and non-IEEE
39195 arithmetic flag. */
39196 const unsigned HOST_WIDE_INT hold_exception_mask =
39197 HOST_WIDE_INT_C (0xffffffff00000007);
39198
39199 tree fenv_var = create_tmp_var_raw (double_type_node);
39200
39201 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39202
39203 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39204 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39205 build_int_cst (uint64_type_node,
39206 hold_exception_mask));
39207
39208 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39209 fenv_llu_and);
39210
39211 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39212 build_int_cst (unsigned_type_node, 0xff),
39213 fenv_hold_mtfsf);
39214
39215 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39216
39217 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39218
39219 double fenv_clear = __builtin_mffs ();
39220 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39221 __builtin_mtfsf (0xff, fenv_clear); */
39222
39223 /* Mask to clear everything except for the rounding modes and non-IEEE
39224 arithmetic flag. */
39225 const unsigned HOST_WIDE_INT clear_exception_mask =
39226 HOST_WIDE_INT_C (0xffffffff00000000);
39227
39228 tree fenv_clear = create_tmp_var_raw (double_type_node);
39229
39230 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39231
39232 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39233 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39234 fenv_clean_llu,
39235 build_int_cst (uint64_type_node,
39236 clear_exception_mask));
39237
39238 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39239 fenv_clear_llu_and);
39240
39241 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39242 build_int_cst (unsigned_type_node, 0xff),
39243 fenv_clear_mtfsf);
39244
39245 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39246
39247 /* Generates the equivalent of feupdateenv (&fenv_var)
39248
39249 double old_fenv = __builtin_mffs ();
39250 double fenv_update;
39251 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39252 (*(uint64_t*)fenv_var 0x1ff80fff);
39253 __builtin_mtfsf (0xff, fenv_update); */
39254
39255 const unsigned HOST_WIDE_INT update_exception_mask =
39256 HOST_WIDE_INT_C (0xffffffff1fffff00);
39257 const unsigned HOST_WIDE_INT new_exception_mask =
39258 HOST_WIDE_INT_C (0x1ff80fff);
39259
39260 tree old_fenv = create_tmp_var_raw (double_type_node);
39261 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39262
39263 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39264 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39265 build_int_cst (uint64_type_node,
39266 update_exception_mask));
39267
39268 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39269 build_int_cst (uint64_type_node,
39270 new_exception_mask));
39271
39272 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39273 old_llu_and, new_llu_and);
39274
39275 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39276 new_llu_mask);
39277
39278 tree update_mtfsf = build_call_expr (mtfsf, 2,
39279 build_int_cst (unsigned_type_node, 0xff),
39280 fenv_update_mtfsf);
39281
39282 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39283 }
39284
39285 void
39286 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39287 {
39288 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39289
39290 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39291 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39292
39293 /* The destination of the vmrgew instruction layout is:
39294 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39295 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39296 vmrgew instruction will be correct. */
39297 if (BYTES_BIG_ENDIAN)
39298 {
39299 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39300 GEN_INT (0)));
39301 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39302 GEN_INT (3)));
39303 }
39304 else
39305 {
39306 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39307 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39308 }
39309
39310 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39311 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39312
39313 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39314 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39315
39316 if (BYTES_BIG_ENDIAN)
39317 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39318 else
39319 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39320 }
39321
39322 void
39323 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39324 {
39325 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39326
39327 rtx_tmp0 = gen_reg_rtx (V2DImode);
39328 rtx_tmp1 = gen_reg_rtx (V2DImode);
39329
39330 /* The destination of the vmrgew instruction layout is:
39331 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39332 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39333 vmrgew instruction will be correct. */
39334 if (BYTES_BIG_ENDIAN)
39335 {
39336 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39337 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39338 }
39339 else
39340 {
39341 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39342 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39343 }
39344
39345 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39346 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39347
39348 if (signed_convert)
39349 {
39350 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39351 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39352 }
39353 else
39354 {
39355 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39356 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39357 }
39358
39359 if (BYTES_BIG_ENDIAN)
39360 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39361 else
39362 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39363 }
39364
39365 void
39366 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39367 rtx src2)
39368 {
39369 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39370
39371 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39372 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39373
39374 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39375 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39376
39377 rtx_tmp2 = gen_reg_rtx (V4SImode);
39378 rtx_tmp3 = gen_reg_rtx (V4SImode);
39379
39380 if (signed_convert)
39381 {
39382 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39383 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39384 }
39385 else
39386 {
39387 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39388 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39389 }
39390
39391 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39392 }
39393
39394 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39395
39396 static bool
39397 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39398 optimization_type opt_type)
39399 {
39400 switch (op)
39401 {
39402 case rsqrt_optab:
39403 return (opt_type == OPTIMIZE_FOR_SPEED
39404 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39405
39406 default:
39407 return true;
39408 }
39409 }
39410
39411 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39412
39413 static HOST_WIDE_INT
39414 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39415 {
39416 if (TREE_CODE (exp) == STRING_CST
39417 && (STRICT_ALIGNMENT || !optimize_size))
39418 return MAX (align, BITS_PER_WORD);
39419 return align;
39420 }
39421
39422 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39423
39424 static HOST_WIDE_INT
39425 rs6000_starting_frame_offset (void)
39426 {
39427 if (FRAME_GROWS_DOWNWARD)
39428 return 0;
39429 return RS6000_STARTING_FRAME_OFFSET;
39430 }
39431 \f
39432
39433 /* Create an alias for a mangled name where we have changed the mangling (in
39434 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39435 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39436
39437 #if TARGET_ELF && RS6000_WEAK
39438 static void
39439 rs6000_globalize_decl_name (FILE * stream, tree decl)
39440 {
39441 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39442
39443 targetm.asm_out.globalize_label (stream, name);
39444
39445 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39446 {
39447 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39448 const char *old_name;
39449
39450 ieee128_mangling_gcc_8_1 = true;
39451 lang_hooks.set_decl_assembler_name (decl);
39452 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39453 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39454 ieee128_mangling_gcc_8_1 = false;
39455
39456 if (strcmp (name, old_name) != 0)
39457 {
39458 fprintf (stream, "\t.weak %s\n", old_name);
39459 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39460 }
39461 }
39462 }
39463 #endif
39464
39465 \f
39466 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39467 function names from <foo>l to <foo>f128 if the default long double type is
39468 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39469 include file switches the names on systems that support long double as IEEE
39470 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39471 In the future, glibc will export names like __ieee128_sinf128 and we can
39472 switch to using those instead of using sinf128, which pollutes the user's
39473 namespace.
39474
39475 This will switch the names for Fortran math functions as well (which doesn't
39476 use math.h). However, Fortran needs other changes to the compiler and
39477 library before you can switch the real*16 type at compile time.
39478
39479 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39480 only do this if the default is that long double is IBM extended double, and
39481 the user asked for IEEE 128-bit. */
39482
39483 static tree
39484 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39485 {
39486 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39487 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39488 {
39489 size_t len = IDENTIFIER_LENGTH (id);
39490 const char *name = IDENTIFIER_POINTER (id);
39491
39492 if (name[len - 1] == 'l')
39493 {
39494 bool uses_ieee128_p = false;
39495 tree type = TREE_TYPE (decl);
39496 machine_mode ret_mode = TYPE_MODE (type);
39497
39498 /* See if the function returns a IEEE 128-bit floating point type or
39499 complex type. */
39500 if (ret_mode == TFmode || ret_mode == TCmode)
39501 uses_ieee128_p = true;
39502 else
39503 {
39504 function_args_iterator args_iter;
39505 tree arg;
39506
39507 /* See if the function passes a IEEE 128-bit floating point type
39508 or complex type. */
39509 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39510 {
39511 machine_mode arg_mode = TYPE_MODE (arg);
39512 if (arg_mode == TFmode || arg_mode == TCmode)
39513 {
39514 uses_ieee128_p = true;
39515 break;
39516 }
39517 }
39518 }
39519
39520 /* If we passed or returned an IEEE 128-bit floating point type,
39521 change the name. */
39522 if (uses_ieee128_p)
39523 {
39524 char *name2 = (char *) alloca (len + 4);
39525 memcpy (name2, name, len - 1);
39526 strcpy (name2 + len - 1, "f128");
39527 id = get_identifier (name2);
39528 }
39529 }
39530 }
39531
39532 return id;
39533 }
39534
39535 \f
39536 struct gcc_target targetm = TARGET_INITIALIZER;
39537
39538 #include "gt-rs6000.h"