rs6000 - Fix PR target/88343
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84
85 /* This file should be included last. */
86 #include "target-def.h"
87
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
91
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
105
106 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
107
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack {
110 int reload_completed; /* stack info won't change from here on */
111 int first_gp_reg_save; /* first callee saved GP register used */
112 int first_fp_reg_save; /* first callee saved FP register used */
113 int first_altivec_reg_save; /* first callee saved AltiVec register used */
114 int lr_save_p; /* true if the link reg needs to be saved */
115 int cr_save_p; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask; /* mask of vec registers to save */
117 int push_p; /* true if we need to allocate stack space */
118 int calls_p; /* true if the function makes any calls */
119 int world_save_p; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi; /* which ABI to use */
122 int gp_save_offset; /* offset to save GP regs from initial SP */
123 int fp_save_offset; /* offset to save FP regs from initial SP */
124 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset; /* offset to save LR from initial SP */
126 int cr_save_offset; /* offset to save CR from initial SP */
127 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset; /* offset to save the varargs registers */
129 int ehrd_offset; /* offset to EH return data */
130 int ehcr_offset; /* offset to EH CR field data */
131 int reg_size; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size; /* variable save area size */
133 int parm_size; /* outgoing parameter size */
134 int save_size; /* save area size */
135 int fixed_size; /* fixed size of stack frame */
136 int gp_size; /* size of saved GP registers */
137 int fp_size; /* size of saved FP registers */
138 int altivec_size; /* size of saved AltiVec registers */
139 int cr_size; /* size to hold CR if not in fixed area */
140 int vrsave_size; /* size to hold VRSAVE */
141 int altivec_padding_size; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
143 int savres_strategy;
144 } rs6000_stack_t;
145
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
149 {
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame;
152 /* Flags if __builtin_return_address (0) was used. */
153 int ra_need_lr;
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
155 int lr_save_state;
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer;
164 bool split_stack_argp_used;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 /* The number of components we use for separate shrink-wrapping. */
168 int n_components;
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately[32];
172 bool fpr_is_wrapped_separately[32];
173 bool lr_is_wrapped_separately;
174 bool toc_is_wrapped_separately;
175 } machine_function;
176
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load;
179
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined;
182
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno;
186
187 #ifdef USING_ELFOS_H
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno = 0;
190 #endif
191
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
193 int dot_symbols;
194
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode;
199
200 #if TARGET_ELF
201 /* Note whether IEEE 128-bit floating point was passed or returned, either as
202 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
203 floating point. We changed the default C++ mangling for these types and we
204 may want to generate a weak alias of the old mangling (U10__float128) to the
205 new mangling (u9__ieee128). */
206 static bool rs6000_passes_ieee128;
207 #endif
208
209 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
210 name used in current releases (i.e. u9__ieee128). */
211 static bool ieee128_mangling_gcc_8_1;
212
213 /* Width in bits of a pointer. */
214 unsigned rs6000_pointer_size;
215
216 #ifdef HAVE_AS_GNU_ATTRIBUTE
217 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
218 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
219 # endif
220 /* Flag whether floating point values have been passed/returned.
221 Note that this doesn't say whether fprs are used, since the
222 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
223 should be set for soft-float values passed in gprs and ieee128
224 values passed in vsx registers. */
225 static bool rs6000_passes_float;
226 static bool rs6000_passes_long_double;
227 /* Flag whether vector values have been passed/returned. */
228 static bool rs6000_passes_vector;
229 /* Flag whether small (<= 8 byte) structures have been returned. */
230 static bool rs6000_returns_struct;
231 #endif
232
233 /* Value is TRUE if register/mode pair is acceptable. */
234 static bool rs6000_hard_regno_mode_ok_p
235 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
236
237 /* Maximum number of registers needed for a given register class and mode. */
238 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
239
240 /* How many registers are needed for a given register and mode. */
241 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
242
243 /* Map register number to register class. */
244 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
245
246 static int dbg_cost_ctrl;
247
248 /* Built in types. */
249 tree rs6000_builtin_types[RS6000_BTI_MAX];
250 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
251
252 /* Flag to say the TOC is initialized */
253 int toc_initialized, need_toc_init;
254 char toc_label_name[10];
255
256 /* Cached value of rs6000_variable_issue. This is cached in
257 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
258 static short cached_can_issue_more;
259
260 static GTY(()) section *read_only_data_section;
261 static GTY(()) section *private_data_section;
262 static GTY(()) section *tls_data_section;
263 static GTY(()) section *tls_private_data_section;
264 static GTY(()) section *read_only_private_data_section;
265 static GTY(()) section *sdata2_section;
266 static GTY(()) section *toc_section;
267
268 struct builtin_description
269 {
270 const HOST_WIDE_INT mask;
271 const enum insn_code icode;
272 const char *const name;
273 const enum rs6000_builtins code;
274 };
275
276 /* Describe the vector unit used for modes. */
277 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
278 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
279
280 /* Register classes for various constraints that are based on the target
281 switches. */
282 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
283
284 /* Describe the alignment of a vector. */
285 int rs6000_vector_align[NUM_MACHINE_MODES];
286
287 /* Map selected modes to types for builtins. */
288 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
289
290 /* What modes to automatically generate reciprocal divide estimate (fre) and
291 reciprocal sqrt (frsqrte) for. */
292 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
293
294 /* Masks to determine which reciprocal esitmate instructions to generate
295 automatically. */
296 enum rs6000_recip_mask {
297 RECIP_SF_DIV = 0x001, /* Use divide estimate */
298 RECIP_DF_DIV = 0x002,
299 RECIP_V4SF_DIV = 0x004,
300 RECIP_V2DF_DIV = 0x008,
301
302 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
303 RECIP_DF_RSQRT = 0x020,
304 RECIP_V4SF_RSQRT = 0x040,
305 RECIP_V2DF_RSQRT = 0x080,
306
307 /* Various combination of flags for -mrecip=xxx. */
308 RECIP_NONE = 0,
309 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
310 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
311 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
312
313 RECIP_HIGH_PRECISION = RECIP_ALL,
314
315 /* On low precision machines like the power5, don't enable double precision
316 reciprocal square root estimate, since it isn't accurate enough. */
317 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
318 };
319
320 /* -mrecip options. */
321 static struct
322 {
323 const char *string; /* option name */
324 unsigned int mask; /* mask bits to set */
325 } recip_options[] = {
326 { "all", RECIP_ALL },
327 { "none", RECIP_NONE },
328 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
329 | RECIP_V2DF_DIV) },
330 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
331 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
332 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
333 | RECIP_V2DF_RSQRT) },
334 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
335 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
336 };
337
338 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
339 static const struct
340 {
341 const char *cpu;
342 unsigned int cpuid;
343 } cpu_is_info[] = {
344 { "power9", PPC_PLATFORM_POWER9 },
345 { "power8", PPC_PLATFORM_POWER8 },
346 { "power7", PPC_PLATFORM_POWER7 },
347 { "power6x", PPC_PLATFORM_POWER6X },
348 { "power6", PPC_PLATFORM_POWER6 },
349 { "power5+", PPC_PLATFORM_POWER5_PLUS },
350 { "power5", PPC_PLATFORM_POWER5 },
351 { "ppc970", PPC_PLATFORM_PPC970 },
352 { "power4", PPC_PLATFORM_POWER4 },
353 { "ppca2", PPC_PLATFORM_PPCA2 },
354 { "ppc476", PPC_PLATFORM_PPC476 },
355 { "ppc464", PPC_PLATFORM_PPC464 },
356 { "ppc440", PPC_PLATFORM_PPC440 },
357 { "ppc405", PPC_PLATFORM_PPC405 },
358 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
359 };
360
361 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
362 static const struct
363 {
364 const char *hwcap;
365 int mask;
366 unsigned int id;
367 } cpu_supports_info[] = {
368 /* AT_HWCAP masks. */
369 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
370 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
371 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
372 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
373 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
374 { "booke", PPC_FEATURE_BOOKE, 0 },
375 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
376 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
377 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
378 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
379 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
380 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
381 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
382 { "notb", PPC_FEATURE_NO_TB, 0 },
383 { "pa6t", PPC_FEATURE_PA6T, 0 },
384 { "power4", PPC_FEATURE_POWER4, 0 },
385 { "power5", PPC_FEATURE_POWER5, 0 },
386 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
387 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
388 { "ppc32", PPC_FEATURE_32, 0 },
389 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
390 { "ppc64", PPC_FEATURE_64, 0 },
391 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
392 { "smt", PPC_FEATURE_SMT, 0 },
393 { "spe", PPC_FEATURE_HAS_SPE, 0 },
394 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
395 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
396 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
397
398 /* AT_HWCAP2 masks. */
399 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
400 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
401 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
402 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
403 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
404 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
405 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
406 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
407 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
408 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
409 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
410 { "darn", PPC_FEATURE2_DARN, 1 },
411 { "scv", PPC_FEATURE2_SCV, 1 }
412 };
413
414 /* On PowerPC, we have a limited number of target clones that we care about
415 which means we can use an array to hold the options, rather than having more
416 elaborate data structures to identify each possible variation. Order the
417 clones from the default to the highest ISA. */
418 enum {
419 CLONE_DEFAULT = 0, /* default clone. */
420 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
421 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
422 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
423 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
424 CLONE_MAX
425 };
426
427 /* Map compiler ISA bits into HWCAP names. */
428 struct clone_map {
429 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
430 const char *name; /* name to use in __builtin_cpu_supports. */
431 };
432
433 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
434 { 0, "" }, /* Default options. */
435 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
436 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
437 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
438 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
439 };
440
441
442 /* Newer LIBCs explicitly export this symbol to declare that they provide
443 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
444 reference to this symbol whenever we expand a CPU builtin, so that
445 we never link against an old LIBC. */
446 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
447
448 /* True if we have expanded a CPU builtin. */
449 bool cpu_builtin_p;
450
451 /* Pointer to function (in rs6000-c.c) that can define or undefine target
452 macros that have changed. Languages that don't support the preprocessor
453 don't link in rs6000-c.c, so we can't call it directly. */
454 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
455
456 /* Simplfy register classes into simpler classifications. We assume
457 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
458 check for standard register classes (gpr/floating/altivec/vsx) and
459 floating/vector classes (float/altivec/vsx). */
460
461 enum rs6000_reg_type {
462 NO_REG_TYPE,
463 PSEUDO_REG_TYPE,
464 GPR_REG_TYPE,
465 VSX_REG_TYPE,
466 ALTIVEC_REG_TYPE,
467 FPR_REG_TYPE,
468 SPR_REG_TYPE,
469 CR_REG_TYPE
470 };
471
472 /* Map register class to register type. */
473 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
474
475 /* First/last register type for the 'normal' register types (i.e. general
476 purpose, floating point, altivec, and VSX registers). */
477 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
478
479 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
480
481
482 /* Register classes we care about in secondary reload or go if legitimate
483 address. We only need to worry about GPR, FPR, and Altivec registers here,
484 along an ANY field that is the OR of the 3 register classes. */
485
486 enum rs6000_reload_reg_type {
487 RELOAD_REG_GPR, /* General purpose registers. */
488 RELOAD_REG_FPR, /* Traditional floating point regs. */
489 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
490 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
491 N_RELOAD_REG
492 };
493
494 /* For setting up register classes, loop through the 3 register classes mapping
495 into real registers, and skip the ANY class, which is just an OR of the
496 bits. */
497 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
498 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
499
500 /* Map reload register type to a register in the register class. */
501 struct reload_reg_map_type {
502 const char *name; /* Register class name. */
503 int reg; /* Register in the register class. */
504 };
505
506 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
507 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
508 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
509 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
510 { "Any", -1 }, /* RELOAD_REG_ANY. */
511 };
512
513 /* Mask bits for each register class, indexed per mode. Historically the
514 compiler has been more restrictive which types can do PRE_MODIFY instead of
515 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
516 typedef unsigned char addr_mask_type;
517
518 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
519 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
520 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
521 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
522 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
523 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
524 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
525 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
526
527 /* Register type masks based on the type, of valid addressing modes. */
528 struct rs6000_reg_addr {
529 enum insn_code reload_load; /* INSN to reload for loading. */
530 enum insn_code reload_store; /* INSN to reload for storing. */
531 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
532 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
533 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
534 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
535 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
536 };
537
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
539
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
543 {
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
546 }
547
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
551 {
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
554 }
555
556 /* Return true if we have D-form addressing in altivec registers. */
557 static inline bool
558 mode_supports_vmx_dform (machine_mode mode)
559 {
560 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
561 }
562
563 /* Return true if we have D-form addressing in VSX registers. This addressing
564 is more limited than normal d-form addressing in that the offset must be
565 aligned on a 16-byte boundary. */
566 static inline bool
567 mode_supports_dq_form (machine_mode mode)
568 {
569 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
570 != 0);
571 }
572
573 /* Given that there exists at least one variable that is set (produced)
574 by OUT_INSN and read (consumed) by IN_INSN, return true iff
575 IN_INSN represents one or more memory store operations and none of
576 the variables set by OUT_INSN is used by IN_INSN as the address of a
577 store operation. If either IN_INSN or OUT_INSN does not represent
578 a "single" RTL SET expression (as loosely defined by the
579 implementation of the single_set function) or a PARALLEL with only
580 SETs, CLOBBERs, and USEs inside, this function returns false.
581
582 This rs6000-specific version of store_data_bypass_p checks for
583 certain conditions that result in assertion failures (and internal
584 compiler errors) in the generic store_data_bypass_p function and
585 returns false rather than calling store_data_bypass_p if one of the
586 problematic conditions is detected. */
587
588 int
589 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
590 {
591 rtx out_set, in_set;
592 rtx out_pat, in_pat;
593 rtx out_exp, in_exp;
594 int i, j;
595
596 in_set = single_set (in_insn);
597 if (in_set)
598 {
599 if (MEM_P (SET_DEST (in_set)))
600 {
601 out_set = single_set (out_insn);
602 if (!out_set)
603 {
604 out_pat = PATTERN (out_insn);
605 if (GET_CODE (out_pat) == PARALLEL)
606 {
607 for (i = 0; i < XVECLEN (out_pat, 0); i++)
608 {
609 out_exp = XVECEXP (out_pat, 0, i);
610 if ((GET_CODE (out_exp) == CLOBBER)
611 || (GET_CODE (out_exp) == USE))
612 continue;
613 else if (GET_CODE (out_exp) != SET)
614 return false;
615 }
616 }
617 }
618 }
619 }
620 else
621 {
622 in_pat = PATTERN (in_insn);
623 if (GET_CODE (in_pat) != PARALLEL)
624 return false;
625
626 for (i = 0; i < XVECLEN (in_pat, 0); i++)
627 {
628 in_exp = XVECEXP (in_pat, 0, i);
629 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
630 continue;
631 else if (GET_CODE (in_exp) != SET)
632 return false;
633
634 if (MEM_P (SET_DEST (in_exp)))
635 {
636 out_set = single_set (out_insn);
637 if (!out_set)
638 {
639 out_pat = PATTERN (out_insn);
640 if (GET_CODE (out_pat) != PARALLEL)
641 return false;
642 for (j = 0; j < XVECLEN (out_pat, 0); j++)
643 {
644 out_exp = XVECEXP (out_pat, 0, j);
645 if ((GET_CODE (out_exp) == CLOBBER)
646 || (GET_CODE (out_exp) == USE))
647 continue;
648 else if (GET_CODE (out_exp) != SET)
649 return false;
650 }
651 }
652 }
653 }
654 }
655 return store_data_bypass_p (out_insn, in_insn);
656 }
657
658 \f
659 /* Processor costs (relative to an add) */
660
661 const struct processor_costs *rs6000_cost;
662
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
681 };
682
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
701 };
702
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
721 };
722
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
741 };
742
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
761 };
762
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
781 };
782
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
801 };
802
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
821 };
822
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
841 };
842
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
861 };
862
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
881 };
882
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
901 };
902
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
921 };
922
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
941 };
942
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
962 };
963
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
982 };
983
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1002 };
1003
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1022 };
1023
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1037 32,
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1042 };
1043
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1062 };
1063
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1082 };
1083
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1102 };
1103
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1122 };
1123
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1142 };
1143
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1162 };
1163
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1182 };
1183
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1202 };
1203
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1222 };
1223
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1242 };
1243
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1257 64,
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1262 };
1263
1264 \f
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_X
1275
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1278
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1281
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1284
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1287
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1290
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1293
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1296
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1299
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1302
1303 struct rs6000_builtin_info_type {
1304 const char *name;
1305 const enum insn_code icode;
1306 const HOST_WIDE_INT mask;
1307 const unsigned attr;
1308 };
1309
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1311 {
1312 #include "rs6000-builtin.def"
1313 };
1314
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_X
1324
1325 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1326 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1327
1328 \f
1329 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1330 static struct machine_function * rs6000_init_machine_status (void);
1331 static int rs6000_ra_ever_killed (void);
1332 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1333 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1334 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1336 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1337 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1338 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1339 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1340 bool);
1341 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1342 unsigned int);
1343 static bool is_microcoded_insn (rtx_insn *);
1344 static bool is_nonpipeline_insn (rtx_insn *);
1345 static bool is_cracked_insn (rtx_insn *);
1346 static bool is_load_insn (rtx, rtx *);
1347 static bool is_store_insn (rtx, rtx *);
1348 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1349 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1350 static bool insn_must_be_first_in_group (rtx_insn *);
1351 static bool insn_must_be_last_in_group (rtx_insn *);
1352 static void altivec_init_builtins (void);
1353 static tree builtin_function_type (machine_mode, machine_mode,
1354 machine_mode, machine_mode,
1355 enum rs6000_builtins, const char *name);
1356 static void rs6000_common_init_builtins (void);
1357 static void htm_init_builtins (void);
1358 static rs6000_stack_t *rs6000_stack_info (void);
1359 static void is_altivec_return_reg (rtx, void *);
1360 int easy_vector_constant (rtx, machine_mode);
1361 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1362 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1363 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1364 bool, bool);
1365 #if TARGET_MACHO
1366 static void macho_branch_islands (void);
1367 static tree get_prev_label (tree);
1368 #endif
1369 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1370 int, int *);
1371 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1372 int, int, int *);
1373 static bool rs6000_mode_dependent_address (const_rtx);
1374 static bool rs6000_debug_mode_dependent_address (const_rtx);
1375 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1376 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1377 machine_mode, rtx);
1378 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1379 machine_mode,
1380 rtx);
1381 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1382 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1383 enum reg_class);
1384 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1385 reg_class_t,
1386 reg_class_t);
1387 static bool rs6000_debug_can_change_mode_class (machine_mode,
1388 machine_mode,
1389 reg_class_t);
1390 static bool rs6000_save_toc_in_prologue_p (void);
1391 static rtx rs6000_internal_arg_pointer (void);
1392
1393 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1394 int, int *)
1395 = rs6000_legitimize_reload_address;
1396
1397 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1398 = rs6000_mode_dependent_address;
1399
1400 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1401 machine_mode, rtx)
1402 = rs6000_secondary_reload_class;
1403
1404 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1405 = rs6000_preferred_reload_class;
1406
1407 const int INSN_NOT_AVAILABLE = -1;
1408
1409 static void rs6000_print_isa_options (FILE *, int, const char *,
1410 HOST_WIDE_INT);
1411 static void rs6000_print_builtin_options (FILE *, int, const char *,
1412 HOST_WIDE_INT);
1413 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1414
1415 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1416 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1417 enum rs6000_reg_type,
1418 machine_mode,
1419 secondary_reload_info *,
1420 bool);
1421 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1422 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1423 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1424
1425 /* Hash table stuff for keeping track of TOC entries. */
1426
1427 struct GTY((for_user)) toc_hash_struct
1428 {
1429 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1430 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1431 rtx key;
1432 machine_mode key_mode;
1433 int labelno;
1434 };
1435
1436 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1437 {
1438 static hashval_t hash (toc_hash_struct *);
1439 static bool equal (toc_hash_struct *, toc_hash_struct *);
1440 };
1441
1442 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1443
1444 /* Hash table to keep track of the argument types for builtin functions. */
1445
1446 struct GTY((for_user)) builtin_hash_struct
1447 {
1448 tree type;
1449 machine_mode mode[4]; /* return value + 3 arguments. */
1450 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1451 };
1452
1453 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1454 {
1455 static hashval_t hash (builtin_hash_struct *);
1456 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1457 };
1458
1459 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1460
1461 \f
1462 /* Default register names. */
1463 char rs6000_reg_names[][8] =
1464 {
1465 "0", "1", "2", "3", "4", "5", "6", "7",
1466 "8", "9", "10", "11", "12", "13", "14", "15",
1467 "16", "17", "18", "19", "20", "21", "22", "23",
1468 "24", "25", "26", "27", "28", "29", "30", "31",
1469 "0", "1", "2", "3", "4", "5", "6", "7",
1470 "8", "9", "10", "11", "12", "13", "14", "15",
1471 "16", "17", "18", "19", "20", "21", "22", "23",
1472 "24", "25", "26", "27", "28", "29", "30", "31",
1473 "mq", "lr", "ctr","ap",
1474 "0", "1", "2", "3", "4", "5", "6", "7",
1475 "ca",
1476 /* AltiVec registers. */
1477 "0", "1", "2", "3", "4", "5", "6", "7",
1478 "8", "9", "10", "11", "12", "13", "14", "15",
1479 "16", "17", "18", "19", "20", "21", "22", "23",
1480 "24", "25", "26", "27", "28", "29", "30", "31",
1481 "vrsave", "vscr",
1482 /* Soft frame pointer. */
1483 "sfp",
1484 /* HTM SPR registers. */
1485 "tfhar", "tfiar", "texasr"
1486 };
1487
1488 #ifdef TARGET_REGNAMES
1489 static const char alt_reg_names[][8] =
1490 {
1491 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1492 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1493 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1494 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1495 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1496 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1497 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1498 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1499 "mq", "lr", "ctr", "ap",
1500 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1501 "ca",
1502 /* AltiVec registers. */
1503 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1504 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1505 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1506 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1507 "vrsave", "vscr",
1508 /* Soft frame pointer. */
1509 "sfp",
1510 /* HTM SPR registers. */
1511 "tfhar", "tfiar", "texasr"
1512 };
1513 #endif
1514
1515 /* Table of valid machine attributes. */
1516
1517 static const struct attribute_spec rs6000_attribute_table[] =
1518 {
1519 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1520 affects_type_identity, handler, exclude } */
1521 { "altivec", 1, 1, false, true, false, false,
1522 rs6000_handle_altivec_attribute, NULL },
1523 { "longcall", 0, 0, false, true, true, false,
1524 rs6000_handle_longcall_attribute, NULL },
1525 { "shortcall", 0, 0, false, true, true, false,
1526 rs6000_handle_longcall_attribute, NULL },
1527 { "ms_struct", 0, 0, false, false, false, false,
1528 rs6000_handle_struct_attribute, NULL },
1529 { "gcc_struct", 0, 0, false, false, false, false,
1530 rs6000_handle_struct_attribute, NULL },
1531 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1532 SUBTARGET_ATTRIBUTE_TABLE,
1533 #endif
1534 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1535 };
1536 \f
1537 #ifndef TARGET_PROFILE_KERNEL
1538 #define TARGET_PROFILE_KERNEL 0
1539 #endif
1540
1541 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1542 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1543 \f
1544 /* Initialize the GCC target structure. */
1545 #undef TARGET_ATTRIBUTE_TABLE
1546 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1547 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1548 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1549 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1550 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1551
1552 #undef TARGET_ASM_ALIGNED_DI_OP
1553 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1554
1555 /* Default unaligned ops are only provided for ELF. Find the ops needed
1556 for non-ELF systems. */
1557 #ifndef OBJECT_FORMAT_ELF
1558 #if TARGET_XCOFF
1559 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1560 64-bit targets. */
1561 #undef TARGET_ASM_UNALIGNED_HI_OP
1562 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1563 #undef TARGET_ASM_UNALIGNED_SI_OP
1564 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1565 #undef TARGET_ASM_UNALIGNED_DI_OP
1566 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1567 #else
1568 /* For Darwin. */
1569 #undef TARGET_ASM_UNALIGNED_HI_OP
1570 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1571 #undef TARGET_ASM_UNALIGNED_SI_OP
1572 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1573 #undef TARGET_ASM_UNALIGNED_DI_OP
1574 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1575 #undef TARGET_ASM_ALIGNED_DI_OP
1576 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1577 #endif
1578 #endif
1579
1580 /* This hook deals with fixups for relocatable code and DI-mode objects
1581 in 64-bit code. */
1582 #undef TARGET_ASM_INTEGER
1583 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1584
1585 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1586 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1587 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1588 #endif
1589
1590 #undef TARGET_SET_UP_BY_PROLOGUE
1591 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1592
1593 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1594 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1595 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1596 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1597 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1598 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1599 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1600 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1601 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1602 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1603 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1605
1606 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1607 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1608
1609 #undef TARGET_INTERNAL_ARG_POINTER
1610 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1611
1612 #undef TARGET_HAVE_TLS
1613 #define TARGET_HAVE_TLS HAVE_AS_TLS
1614
1615 #undef TARGET_CANNOT_FORCE_CONST_MEM
1616 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1617
1618 #undef TARGET_DELEGITIMIZE_ADDRESS
1619 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1620
1621 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1622 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1623
1624 #undef TARGET_LEGITIMATE_COMBINED_INSN
1625 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1626
1627 #undef TARGET_ASM_FUNCTION_PROLOGUE
1628 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1629 #undef TARGET_ASM_FUNCTION_EPILOGUE
1630 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1631
1632 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1633 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1634
1635 #undef TARGET_LEGITIMIZE_ADDRESS
1636 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1637
1638 #undef TARGET_SCHED_VARIABLE_ISSUE
1639 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1640
1641 #undef TARGET_SCHED_ISSUE_RATE
1642 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1643 #undef TARGET_SCHED_ADJUST_COST
1644 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1645 #undef TARGET_SCHED_ADJUST_PRIORITY
1646 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1647 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1648 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1649 #undef TARGET_SCHED_INIT
1650 #define TARGET_SCHED_INIT rs6000_sched_init
1651 #undef TARGET_SCHED_FINISH
1652 #define TARGET_SCHED_FINISH rs6000_sched_finish
1653 #undef TARGET_SCHED_REORDER
1654 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1655 #undef TARGET_SCHED_REORDER2
1656 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1657
1658 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1659 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1660
1661 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1662 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1663
1664 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1665 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1666 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1667 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1668 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1669 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1670 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1671 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1672
1673 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1674 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1675
1676 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1677 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1678 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1679 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1680 rs6000_builtin_support_vector_misalignment
1681 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1682 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1683 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1684 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1685 rs6000_builtin_vectorization_cost
1686 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1687 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1688 rs6000_preferred_simd_mode
1689 #undef TARGET_VECTORIZE_INIT_COST
1690 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1691 #undef TARGET_VECTORIZE_ADD_STMT_COST
1692 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1693 #undef TARGET_VECTORIZE_FINISH_COST
1694 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1695 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1696 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1697
1698 #undef TARGET_INIT_BUILTINS
1699 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1700 #undef TARGET_BUILTIN_DECL
1701 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1702
1703 #undef TARGET_FOLD_BUILTIN
1704 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1705 #undef TARGET_GIMPLE_FOLD_BUILTIN
1706 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1707
1708 #undef TARGET_EXPAND_BUILTIN
1709 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1710
1711 #undef TARGET_MANGLE_TYPE
1712 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1713
1714 #undef TARGET_INIT_LIBFUNCS
1715 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1716
1717 #if TARGET_MACHO
1718 #undef TARGET_BINDS_LOCAL_P
1719 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1720 #endif
1721
1722 #undef TARGET_MS_BITFIELD_LAYOUT_P
1723 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1724
1725 #undef TARGET_ASM_OUTPUT_MI_THUNK
1726 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1727
1728 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1729 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1730
1731 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1732 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1733
1734 #undef TARGET_REGISTER_MOVE_COST
1735 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1736 #undef TARGET_MEMORY_MOVE_COST
1737 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1738 #undef TARGET_CANNOT_COPY_INSN_P
1739 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1740 #undef TARGET_RTX_COSTS
1741 #define TARGET_RTX_COSTS rs6000_rtx_costs
1742 #undef TARGET_ADDRESS_COST
1743 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1744 #undef TARGET_INSN_COST
1745 #define TARGET_INSN_COST rs6000_insn_cost
1746
1747 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1748 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1749
1750 #undef TARGET_PROMOTE_FUNCTION_MODE
1751 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1752
1753 #undef TARGET_RETURN_IN_MEMORY
1754 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1755
1756 #undef TARGET_RETURN_IN_MSB
1757 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1758
1759 #undef TARGET_SETUP_INCOMING_VARARGS
1760 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1761
1762 /* Always strict argument naming on rs6000. */
1763 #undef TARGET_STRICT_ARGUMENT_NAMING
1764 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1765 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1766 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1767 #undef TARGET_SPLIT_COMPLEX_ARG
1768 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1769 #undef TARGET_MUST_PASS_IN_STACK
1770 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1771 #undef TARGET_PASS_BY_REFERENCE
1772 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1773 #undef TARGET_ARG_PARTIAL_BYTES
1774 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1775 #undef TARGET_FUNCTION_ARG_ADVANCE
1776 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1777 #undef TARGET_FUNCTION_ARG
1778 #define TARGET_FUNCTION_ARG rs6000_function_arg
1779 #undef TARGET_FUNCTION_ARG_PADDING
1780 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1781 #undef TARGET_FUNCTION_ARG_BOUNDARY
1782 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1783
1784 #undef TARGET_BUILD_BUILTIN_VA_LIST
1785 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1786
1787 #undef TARGET_EXPAND_BUILTIN_VA_START
1788 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1789
1790 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1791 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1792
1793 #undef TARGET_EH_RETURN_FILTER_MODE
1794 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1795
1796 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1797 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1798
1799 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1800 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1801
1802 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1803 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1804
1805 #undef TARGET_FLOATN_MODE
1806 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1807
1808 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1809 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1810
1811 #undef TARGET_MD_ASM_ADJUST
1812 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1813
1814 #undef TARGET_OPTION_OVERRIDE
1815 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1816
1817 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1818 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1819 rs6000_builtin_vectorized_function
1820
1821 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1822 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1823 rs6000_builtin_md_vectorized_function
1824
1825 #undef TARGET_STACK_PROTECT_GUARD
1826 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1827
1828 #if !TARGET_MACHO
1829 #undef TARGET_STACK_PROTECT_FAIL
1830 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1831 #endif
1832
1833 #ifdef HAVE_AS_TLS
1834 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1835 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1836 #endif
1837
1838 /* Use a 32-bit anchor range. This leads to sequences like:
1839
1840 addis tmp,anchor,high
1841 add dest,tmp,low
1842
1843 where tmp itself acts as an anchor, and can be shared between
1844 accesses to the same 64k page. */
1845 #undef TARGET_MIN_ANCHOR_OFFSET
1846 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1847 #undef TARGET_MAX_ANCHOR_OFFSET
1848 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1849 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1850 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1851 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1852 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1853
1854 #undef TARGET_BUILTIN_RECIPROCAL
1855 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1856
1857 #undef TARGET_SECONDARY_RELOAD
1858 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1859 #undef TARGET_SECONDARY_MEMORY_NEEDED
1860 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1861 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1862 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1863
1864 #undef TARGET_LEGITIMATE_ADDRESS_P
1865 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1866
1867 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1868 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1869
1870 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1871 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1872
1873 #undef TARGET_CAN_ELIMINATE
1874 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1875
1876 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1877 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1878
1879 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1880 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1881
1882 #undef TARGET_TRAMPOLINE_INIT
1883 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1884
1885 #undef TARGET_FUNCTION_VALUE
1886 #define TARGET_FUNCTION_VALUE rs6000_function_value
1887
1888 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1889 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1890
1891 #undef TARGET_OPTION_SAVE
1892 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1893
1894 #undef TARGET_OPTION_RESTORE
1895 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1896
1897 #undef TARGET_OPTION_PRINT
1898 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1899
1900 #undef TARGET_CAN_INLINE_P
1901 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1902
1903 #undef TARGET_SET_CURRENT_FUNCTION
1904 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1905
1906 #undef TARGET_LEGITIMATE_CONSTANT_P
1907 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1908
1909 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1910 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1911
1912 #undef TARGET_CAN_USE_DOLOOP_P
1913 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1914
1915 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1916 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1917
1918 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1919 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1920 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1921 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1922 #undef TARGET_UNWIND_WORD_MODE
1923 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1924
1925 #undef TARGET_OFFLOAD_OPTIONS
1926 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1927
1928 #undef TARGET_C_MODE_FOR_SUFFIX
1929 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1930
1931 #undef TARGET_INVALID_BINARY_OP
1932 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1933
1934 #undef TARGET_OPTAB_SUPPORTED_P
1935 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1936
1937 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1938 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1939
1940 #undef TARGET_COMPARE_VERSION_PRIORITY
1941 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1942
1943 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1944 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1945 rs6000_generate_version_dispatcher_body
1946
1947 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1948 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1949 rs6000_get_function_versions_dispatcher
1950
1951 #undef TARGET_OPTION_FUNCTION_VERSIONS
1952 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1953
1954 #undef TARGET_HARD_REGNO_NREGS
1955 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1956 #undef TARGET_HARD_REGNO_MODE_OK
1957 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1958
1959 #undef TARGET_MODES_TIEABLE_P
1960 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1961
1962 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1963 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1964 rs6000_hard_regno_call_part_clobbered
1965
1966 #undef TARGET_SLOW_UNALIGNED_ACCESS
1967 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1968
1969 #undef TARGET_CAN_CHANGE_MODE_CLASS
1970 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1971
1972 #undef TARGET_CONSTANT_ALIGNMENT
1973 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1974
1975 #undef TARGET_STARTING_FRAME_OFFSET
1976 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1977
1978 #if TARGET_ELF && RS6000_WEAK
1979 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1980 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1981 #endif
1982
1983 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1984 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1985
1986 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1987 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1988 \f
1989
1990 /* Processor table. */
1991 struct rs6000_ptt
1992 {
1993 const char *const name; /* Canonical processor name. */
1994 const enum processor_type processor; /* Processor type enum value. */
1995 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1996 };
1997
1998 static struct rs6000_ptt const processor_target_table[] =
1999 {
2000 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2001 #include "rs6000-cpus.def"
2002 #undef RS6000_CPU
2003 };
2004
2005 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2006 name is invalid. */
2007
2008 static int
2009 rs6000_cpu_name_lookup (const char *name)
2010 {
2011 size_t i;
2012
2013 if (name != NULL)
2014 {
2015 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2016 if (! strcmp (name, processor_target_table[i].name))
2017 return (int)i;
2018 }
2019
2020 return -1;
2021 }
2022
2023 \f
2024 /* Return number of consecutive hard regs needed starting at reg REGNO
2025 to hold something of mode MODE.
2026 This is ordinarily the length in words of a value of mode MODE
2027 but can be less for certain modes in special long registers.
2028
2029 POWER and PowerPC GPRs hold 32 bits worth;
2030 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2031
2032 static int
2033 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2034 {
2035 unsigned HOST_WIDE_INT reg_size;
2036
2037 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2038 128-bit floating point that can go in vector registers, which has VSX
2039 memory addressing. */
2040 if (FP_REGNO_P (regno))
2041 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2042 ? UNITS_PER_VSX_WORD
2043 : UNITS_PER_FP_WORD);
2044
2045 else if (ALTIVEC_REGNO_P (regno))
2046 reg_size = UNITS_PER_ALTIVEC_WORD;
2047
2048 else
2049 reg_size = UNITS_PER_WORD;
2050
2051 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2052 }
2053
2054 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2055 MODE. */
2056 static int
2057 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2058 {
2059 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2060
2061 if (COMPLEX_MODE_P (mode))
2062 mode = GET_MODE_INNER (mode);
2063
2064 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2065 register combinations, and use PTImode where we need to deal with quad
2066 word memory operations. Don't allow quad words in the argument or frame
2067 pointer registers, just registers 0..31. */
2068 if (mode == PTImode)
2069 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2070 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2071 && ((regno & 1) == 0));
2072
2073 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2074 implementations. Don't allow an item to be split between a FP register
2075 and an Altivec register. Allow TImode in all VSX registers if the user
2076 asked for it. */
2077 if (TARGET_VSX && VSX_REGNO_P (regno)
2078 && (VECTOR_MEM_VSX_P (mode)
2079 || FLOAT128_VECTOR_P (mode)
2080 || reg_addr[mode].scalar_in_vmx_p
2081 || mode == TImode
2082 || (TARGET_VADDUQM && mode == V1TImode)))
2083 {
2084 if (FP_REGNO_P (regno))
2085 return FP_REGNO_P (last_regno);
2086
2087 if (ALTIVEC_REGNO_P (regno))
2088 {
2089 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2090 return 0;
2091
2092 return ALTIVEC_REGNO_P (last_regno);
2093 }
2094 }
2095
2096 /* The GPRs can hold any mode, but values bigger than one register
2097 cannot go past R31. */
2098 if (INT_REGNO_P (regno))
2099 return INT_REGNO_P (last_regno);
2100
2101 /* The float registers (except for VSX vector modes) can only hold floating
2102 modes and DImode. */
2103 if (FP_REGNO_P (regno))
2104 {
2105 if (FLOAT128_VECTOR_P (mode))
2106 return false;
2107
2108 if (SCALAR_FLOAT_MODE_P (mode)
2109 && (mode != TDmode || (regno % 2) == 0)
2110 && FP_REGNO_P (last_regno))
2111 return 1;
2112
2113 if (GET_MODE_CLASS (mode) == MODE_INT)
2114 {
2115 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2116 return 1;
2117
2118 if (TARGET_P8_VECTOR && (mode == SImode))
2119 return 1;
2120
2121 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2122 return 1;
2123 }
2124
2125 return 0;
2126 }
2127
2128 /* The CR register can only hold CC modes. */
2129 if (CR_REGNO_P (regno))
2130 return GET_MODE_CLASS (mode) == MODE_CC;
2131
2132 if (CA_REGNO_P (regno))
2133 return mode == Pmode || mode == SImode;
2134
2135 /* AltiVec only in AldyVec registers. */
2136 if (ALTIVEC_REGNO_P (regno))
2137 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2138 || mode == V1TImode);
2139
2140 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2141 and it must be able to fit within the register set. */
2142
2143 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2144 }
2145
2146 /* Implement TARGET_HARD_REGNO_NREGS. */
2147
2148 static unsigned int
2149 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2150 {
2151 return rs6000_hard_regno_nregs[mode][regno];
2152 }
2153
2154 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2155
2156 static bool
2157 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2158 {
2159 return rs6000_hard_regno_mode_ok_p[mode][regno];
2160 }
2161
2162 /* Implement TARGET_MODES_TIEABLE_P.
2163
2164 PTImode cannot tie with other modes because PTImode is restricted to even
2165 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2166 57744).
2167
2168 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2169 128-bit floating point on VSX systems ties with other vectors. */
2170
2171 static bool
2172 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2173 {
2174 if (mode1 == PTImode)
2175 return mode2 == PTImode;
2176 if (mode2 == PTImode)
2177 return false;
2178
2179 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2180 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2181 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2182 return false;
2183
2184 if (SCALAR_FLOAT_MODE_P (mode1))
2185 return SCALAR_FLOAT_MODE_P (mode2);
2186 if (SCALAR_FLOAT_MODE_P (mode2))
2187 return false;
2188
2189 if (GET_MODE_CLASS (mode1) == MODE_CC)
2190 return GET_MODE_CLASS (mode2) == MODE_CC;
2191 if (GET_MODE_CLASS (mode2) == MODE_CC)
2192 return false;
2193
2194 return true;
2195 }
2196
2197 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2198
2199 static bool
2200 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2201 {
2202 if (TARGET_32BIT
2203 && TARGET_POWERPC64
2204 && GET_MODE_SIZE (mode) > 4
2205 && INT_REGNO_P (regno))
2206 return true;
2207
2208 if (TARGET_VSX
2209 && FP_REGNO_P (regno)
2210 && GET_MODE_SIZE (mode) > 8
2211 && !FLOAT128_2REG_P (mode))
2212 return true;
2213
2214 return false;
2215 }
2216
2217 /* Print interesting facts about registers. */
2218 static void
2219 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2220 {
2221 int r, m;
2222
2223 for (r = first_regno; r <= last_regno; ++r)
2224 {
2225 const char *comma = "";
2226 int len;
2227
2228 if (first_regno == last_regno)
2229 fprintf (stderr, "%s:\t", reg_name);
2230 else
2231 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2232
2233 len = 8;
2234 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2235 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2236 {
2237 if (len > 70)
2238 {
2239 fprintf (stderr, ",\n\t");
2240 len = 8;
2241 comma = "";
2242 }
2243
2244 if (rs6000_hard_regno_nregs[m][r] > 1)
2245 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2246 rs6000_hard_regno_nregs[m][r]);
2247 else
2248 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2249
2250 comma = ", ";
2251 }
2252
2253 if (call_used_regs[r])
2254 {
2255 if (len > 70)
2256 {
2257 fprintf (stderr, ",\n\t");
2258 len = 8;
2259 comma = "";
2260 }
2261
2262 len += fprintf (stderr, "%s%s", comma, "call-used");
2263 comma = ", ";
2264 }
2265
2266 if (fixed_regs[r])
2267 {
2268 if (len > 70)
2269 {
2270 fprintf (stderr, ",\n\t");
2271 len = 8;
2272 comma = "";
2273 }
2274
2275 len += fprintf (stderr, "%s%s", comma, "fixed");
2276 comma = ", ";
2277 }
2278
2279 if (len > 70)
2280 {
2281 fprintf (stderr, ",\n\t");
2282 comma = "";
2283 }
2284
2285 len += fprintf (stderr, "%sreg-class = %s", comma,
2286 reg_class_names[(int)rs6000_regno_regclass[r]]);
2287 comma = ", ";
2288
2289 if (len > 70)
2290 {
2291 fprintf (stderr, ",\n\t");
2292 comma = "";
2293 }
2294
2295 fprintf (stderr, "%sregno = %d\n", comma, r);
2296 }
2297 }
2298
2299 static const char *
2300 rs6000_debug_vector_unit (enum rs6000_vector v)
2301 {
2302 const char *ret;
2303
2304 switch (v)
2305 {
2306 case VECTOR_NONE: ret = "none"; break;
2307 case VECTOR_ALTIVEC: ret = "altivec"; break;
2308 case VECTOR_VSX: ret = "vsx"; break;
2309 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2310 default: ret = "unknown"; break;
2311 }
2312
2313 return ret;
2314 }
2315
2316 /* Inner function printing just the address mask for a particular reload
2317 register class. */
2318 DEBUG_FUNCTION char *
2319 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2320 {
2321 static char ret[8];
2322 char *p = ret;
2323
2324 if ((mask & RELOAD_REG_VALID) != 0)
2325 *p++ = 'v';
2326 else if (keep_spaces)
2327 *p++ = ' ';
2328
2329 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2330 *p++ = 'm';
2331 else if (keep_spaces)
2332 *p++ = ' ';
2333
2334 if ((mask & RELOAD_REG_INDEXED) != 0)
2335 *p++ = 'i';
2336 else if (keep_spaces)
2337 *p++ = ' ';
2338
2339 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2340 *p++ = 'O';
2341 else if ((mask & RELOAD_REG_OFFSET) != 0)
2342 *p++ = 'o';
2343 else if (keep_spaces)
2344 *p++ = ' ';
2345
2346 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2347 *p++ = '+';
2348 else if (keep_spaces)
2349 *p++ = ' ';
2350
2351 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2352 *p++ = '+';
2353 else if (keep_spaces)
2354 *p++ = ' ';
2355
2356 if ((mask & RELOAD_REG_AND_M16) != 0)
2357 *p++ = '&';
2358 else if (keep_spaces)
2359 *p++ = ' ';
2360
2361 *p = '\0';
2362
2363 return ret;
2364 }
2365
2366 /* Print the address masks in a human readble fashion. */
2367 DEBUG_FUNCTION void
2368 rs6000_debug_print_mode (ssize_t m)
2369 {
2370 ssize_t rc;
2371 int spaces = 0;
2372
2373 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2374 for (rc = 0; rc < N_RELOAD_REG; rc++)
2375 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2376 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2377
2378 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2379 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2380 {
2381 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2382 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2383 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2384 spaces = 0;
2385 }
2386 else
2387 spaces += sizeof (" Reload=sl") - 1;
2388
2389 if (reg_addr[m].scalar_in_vmx_p)
2390 {
2391 fprintf (stderr, "%*s Upper=y", spaces, "");
2392 spaces = 0;
2393 }
2394 else
2395 spaces += sizeof (" Upper=y") - 1;
2396
2397 if (rs6000_vector_unit[m] != VECTOR_NONE
2398 || rs6000_vector_mem[m] != VECTOR_NONE)
2399 {
2400 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2401 spaces, "",
2402 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2403 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2404 }
2405
2406 fputs ("\n", stderr);
2407 }
2408
2409 #define DEBUG_FMT_ID "%-32s= "
2410 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2411 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2412 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2413
2414 /* Print various interesting information with -mdebug=reg. */
2415 static void
2416 rs6000_debug_reg_global (void)
2417 {
2418 static const char *const tf[2] = { "false", "true" };
2419 const char *nl = (const char *)0;
2420 int m;
2421 size_t m1, m2, v;
2422 char costly_num[20];
2423 char nop_num[20];
2424 char flags_buffer[40];
2425 const char *costly_str;
2426 const char *nop_str;
2427 const char *trace_str;
2428 const char *abi_str;
2429 const char *cmodel_str;
2430 struct cl_target_option cl_opts;
2431
2432 /* Modes we want tieable information on. */
2433 static const machine_mode print_tieable_modes[] = {
2434 QImode,
2435 HImode,
2436 SImode,
2437 DImode,
2438 TImode,
2439 PTImode,
2440 SFmode,
2441 DFmode,
2442 TFmode,
2443 IFmode,
2444 KFmode,
2445 SDmode,
2446 DDmode,
2447 TDmode,
2448 V16QImode,
2449 V8HImode,
2450 V4SImode,
2451 V2DImode,
2452 V1TImode,
2453 V32QImode,
2454 V16HImode,
2455 V8SImode,
2456 V4DImode,
2457 V2TImode,
2458 V4SFmode,
2459 V2DFmode,
2460 V8SFmode,
2461 V4DFmode,
2462 CCmode,
2463 CCUNSmode,
2464 CCEQmode,
2465 };
2466
2467 /* Virtual regs we are interested in. */
2468 const static struct {
2469 int regno; /* register number. */
2470 const char *name; /* register name. */
2471 } virtual_regs[] = {
2472 { STACK_POINTER_REGNUM, "stack pointer:" },
2473 { TOC_REGNUM, "toc: " },
2474 { STATIC_CHAIN_REGNUM, "static chain: " },
2475 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2476 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2477 { ARG_POINTER_REGNUM, "arg pointer: " },
2478 { FRAME_POINTER_REGNUM, "frame pointer:" },
2479 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2480 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2481 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2482 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2483 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2484 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2485 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2486 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2487 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2488 };
2489
2490 fputs ("\nHard register information:\n", stderr);
2491 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2492 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2493 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2494 LAST_ALTIVEC_REGNO,
2495 "vs");
2496 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2497 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2498 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2499 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2500 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2501 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2502
2503 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2504 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2505 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2506
2507 fprintf (stderr,
2508 "\n"
2509 "d reg_class = %s\n"
2510 "f reg_class = %s\n"
2511 "v reg_class = %s\n"
2512 "wa reg_class = %s\n"
2513 "wb reg_class = %s\n"
2514 "wd reg_class = %s\n"
2515 "we reg_class = %s\n"
2516 "wf reg_class = %s\n"
2517 "wg reg_class = %s\n"
2518 "wh reg_class = %s\n"
2519 "wi reg_class = %s\n"
2520 "wj reg_class = %s\n"
2521 "wk reg_class = %s\n"
2522 "wl reg_class = %s\n"
2523 "wm reg_class = %s\n"
2524 "wo reg_class = %s\n"
2525 "wp reg_class = %s\n"
2526 "wq reg_class = %s\n"
2527 "wr reg_class = %s\n"
2528 "ws reg_class = %s\n"
2529 "wt reg_class = %s\n"
2530 "wu reg_class = %s\n"
2531 "wv reg_class = %s\n"
2532 "ww reg_class = %s\n"
2533 "wx reg_class = %s\n"
2534 "wy reg_class = %s\n"
2535 "wz reg_class = %s\n"
2536 "wA reg_class = %s\n"
2537 "wH reg_class = %s\n"
2538 "wI reg_class = %s\n"
2539 "wJ reg_class = %s\n"
2540 "wK reg_class = %s\n"
2541 "\n",
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2573 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2574
2575 nl = "\n";
2576 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2577 rs6000_debug_print_mode (m);
2578
2579 fputs ("\n", stderr);
2580
2581 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2582 {
2583 machine_mode mode1 = print_tieable_modes[m1];
2584 bool first_time = true;
2585
2586 nl = (const char *)0;
2587 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2588 {
2589 machine_mode mode2 = print_tieable_modes[m2];
2590 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2591 {
2592 if (first_time)
2593 {
2594 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2595 nl = "\n";
2596 first_time = false;
2597 }
2598
2599 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2600 }
2601 }
2602
2603 if (!first_time)
2604 fputs ("\n", stderr);
2605 }
2606
2607 if (nl)
2608 fputs (nl, stderr);
2609
2610 if (rs6000_recip_control)
2611 {
2612 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2613
2614 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2615 if (rs6000_recip_bits[m])
2616 {
2617 fprintf (stderr,
2618 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2619 GET_MODE_NAME (m),
2620 (RS6000_RECIP_AUTO_RE_P (m)
2621 ? "auto"
2622 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2623 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2624 ? "auto"
2625 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2626 }
2627
2628 fputs ("\n", stderr);
2629 }
2630
2631 if (rs6000_cpu_index >= 0)
2632 {
2633 const char *name = processor_target_table[rs6000_cpu_index].name;
2634 HOST_WIDE_INT flags
2635 = processor_target_table[rs6000_cpu_index].target_enable;
2636
2637 sprintf (flags_buffer, "-mcpu=%s flags", name);
2638 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2639 }
2640 else
2641 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2642
2643 if (rs6000_tune_index >= 0)
2644 {
2645 const char *name = processor_target_table[rs6000_tune_index].name;
2646 HOST_WIDE_INT flags
2647 = processor_target_table[rs6000_tune_index].target_enable;
2648
2649 sprintf (flags_buffer, "-mtune=%s flags", name);
2650 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2651 }
2652 else
2653 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2654
2655 cl_target_option_save (&cl_opts, &global_options);
2656 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2657 rs6000_isa_flags);
2658
2659 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2660 rs6000_isa_flags_explicit);
2661
2662 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2663 rs6000_builtin_mask);
2664
2665 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2666
2667 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2668 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2669
2670 switch (rs6000_sched_costly_dep)
2671 {
2672 case max_dep_latency:
2673 costly_str = "max_dep_latency";
2674 break;
2675
2676 case no_dep_costly:
2677 costly_str = "no_dep_costly";
2678 break;
2679
2680 case all_deps_costly:
2681 costly_str = "all_deps_costly";
2682 break;
2683
2684 case true_store_to_load_dep_costly:
2685 costly_str = "true_store_to_load_dep_costly";
2686 break;
2687
2688 case store_to_load_dep_costly:
2689 costly_str = "store_to_load_dep_costly";
2690 break;
2691
2692 default:
2693 costly_str = costly_num;
2694 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2695 break;
2696 }
2697
2698 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2699
2700 switch (rs6000_sched_insert_nops)
2701 {
2702 case sched_finish_regroup_exact:
2703 nop_str = "sched_finish_regroup_exact";
2704 break;
2705
2706 case sched_finish_pad_groups:
2707 nop_str = "sched_finish_pad_groups";
2708 break;
2709
2710 case sched_finish_none:
2711 nop_str = "sched_finish_none";
2712 break;
2713
2714 default:
2715 nop_str = nop_num;
2716 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2717 break;
2718 }
2719
2720 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2721
2722 switch (rs6000_sdata)
2723 {
2724 default:
2725 case SDATA_NONE:
2726 break;
2727
2728 case SDATA_DATA:
2729 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2730 break;
2731
2732 case SDATA_SYSV:
2733 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2734 break;
2735
2736 case SDATA_EABI:
2737 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2738 break;
2739
2740 }
2741
2742 switch (rs6000_traceback)
2743 {
2744 case traceback_default: trace_str = "default"; break;
2745 case traceback_none: trace_str = "none"; break;
2746 case traceback_part: trace_str = "part"; break;
2747 case traceback_full: trace_str = "full"; break;
2748 default: trace_str = "unknown"; break;
2749 }
2750
2751 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2752
2753 switch (rs6000_current_cmodel)
2754 {
2755 case CMODEL_SMALL: cmodel_str = "small"; break;
2756 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2757 case CMODEL_LARGE: cmodel_str = "large"; break;
2758 default: cmodel_str = "unknown"; break;
2759 }
2760
2761 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2762
2763 switch (rs6000_current_abi)
2764 {
2765 case ABI_NONE: abi_str = "none"; break;
2766 case ABI_AIX: abi_str = "aix"; break;
2767 case ABI_ELFv2: abi_str = "ELFv2"; break;
2768 case ABI_V4: abi_str = "V4"; break;
2769 case ABI_DARWIN: abi_str = "darwin"; break;
2770 default: abi_str = "unknown"; break;
2771 }
2772
2773 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2774
2775 if (rs6000_altivec_abi)
2776 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2777
2778 if (rs6000_darwin64_abi)
2779 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2780
2781 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2782 (TARGET_SOFT_FLOAT ? "true" : "false"));
2783
2784 if (TARGET_LINK_STACK)
2785 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2786
2787 if (TARGET_P8_FUSION)
2788 {
2789 char options[80];
2790
2791 strcpy (options, "power8");
2792 if (TARGET_P8_FUSION_SIGN)
2793 strcat (options, ", sign");
2794
2795 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2796 }
2797
2798 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2799 TARGET_SECURE_PLT ? "secure" : "bss");
2800 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2801 aix_struct_return ? "aix" : "sysv");
2802 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2803 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2804 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2805 tf[!!rs6000_align_branch_targets]);
2806 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2807 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2808 rs6000_long_double_type_size);
2809 if (rs6000_long_double_type_size > 64)
2810 {
2811 fprintf (stderr, DEBUG_FMT_S, "long double type",
2812 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2813 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2814 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2815 }
2816 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2817 (int)rs6000_sched_restricted_insns_priority);
2818 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2819 (int)END_BUILTINS);
2820 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2821 (int)RS6000_BUILTIN_COUNT);
2822
2823 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2824 (int)TARGET_FLOAT128_ENABLE_TYPE);
2825
2826 if (TARGET_VSX)
2827 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2828 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2829
2830 if (TARGET_DIRECT_MOVE_128)
2831 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2832 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2833 }
2834
2835 \f
2836 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2837 legitimate address support to figure out the appropriate addressing to
2838 use. */
2839
2840 static void
2841 rs6000_setup_reg_addr_masks (void)
2842 {
2843 ssize_t rc, reg, m, nregs;
2844 addr_mask_type any_addr_mask, addr_mask;
2845
2846 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2847 {
2848 machine_mode m2 = (machine_mode) m;
2849 bool complex_p = false;
2850 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2851 size_t msize;
2852
2853 if (COMPLEX_MODE_P (m2))
2854 {
2855 complex_p = true;
2856 m2 = GET_MODE_INNER (m2);
2857 }
2858
2859 msize = GET_MODE_SIZE (m2);
2860
2861 /* SDmode is special in that we want to access it only via REG+REG
2862 addressing on power7 and above, since we want to use the LFIWZX and
2863 STFIWZX instructions to load it. */
2864 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2865
2866 any_addr_mask = 0;
2867 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2868 {
2869 addr_mask = 0;
2870 reg = reload_reg_map[rc].reg;
2871
2872 /* Can mode values go in the GPR/FPR/Altivec registers? */
2873 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2874 {
2875 bool small_int_vsx_p = (small_int_p
2876 && (rc == RELOAD_REG_FPR
2877 || rc == RELOAD_REG_VMX));
2878
2879 nregs = rs6000_hard_regno_nregs[m][reg];
2880 addr_mask |= RELOAD_REG_VALID;
2881
2882 /* Indicate if the mode takes more than 1 physical register. If
2883 it takes a single register, indicate it can do REG+REG
2884 addressing. Small integers in VSX registers can only do
2885 REG+REG addressing. */
2886 if (small_int_vsx_p)
2887 addr_mask |= RELOAD_REG_INDEXED;
2888 else if (nregs > 1 || m == BLKmode || complex_p)
2889 addr_mask |= RELOAD_REG_MULTIPLE;
2890 else
2891 addr_mask |= RELOAD_REG_INDEXED;
2892
2893 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2894 addressing. If we allow scalars into Altivec registers,
2895 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2896
2897 For VSX systems, we don't allow update addressing for
2898 DFmode/SFmode if those registers can go in both the
2899 traditional floating point registers and Altivec registers.
2900 The load/store instructions for the Altivec registers do not
2901 have update forms. If we allowed update addressing, it seems
2902 to break IV-OPT code using floating point if the index type is
2903 int instead of long (PR target/81550 and target/84042). */
2904
2905 if (TARGET_UPDATE
2906 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2907 && msize <= 8
2908 && !VECTOR_MODE_P (m2)
2909 && !FLOAT128_VECTOR_P (m2)
2910 && !complex_p
2911 && (m != E_DFmode || !TARGET_VSX)
2912 && (m != E_SFmode || !TARGET_P8_VECTOR)
2913 && !small_int_vsx_p)
2914 {
2915 addr_mask |= RELOAD_REG_PRE_INCDEC;
2916
2917 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2918 we don't allow PRE_MODIFY for some multi-register
2919 operations. */
2920 switch (m)
2921 {
2922 default:
2923 addr_mask |= RELOAD_REG_PRE_MODIFY;
2924 break;
2925
2926 case E_DImode:
2927 if (TARGET_POWERPC64)
2928 addr_mask |= RELOAD_REG_PRE_MODIFY;
2929 break;
2930
2931 case E_DFmode:
2932 case E_DDmode:
2933 if (TARGET_HARD_FLOAT)
2934 addr_mask |= RELOAD_REG_PRE_MODIFY;
2935 break;
2936 }
2937 }
2938 }
2939
2940 /* GPR and FPR registers can do REG+OFFSET addressing, except
2941 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2942 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2943 if ((addr_mask != 0) && !indexed_only_p
2944 && msize <= 8
2945 && (rc == RELOAD_REG_GPR
2946 || ((msize == 8 || m2 == SFmode)
2947 && (rc == RELOAD_REG_FPR
2948 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2949 addr_mask |= RELOAD_REG_OFFSET;
2950
2951 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2952 instructions are enabled. The offset for 128-bit VSX registers is
2953 only 12-bits. While GPRs can handle the full offset range, VSX
2954 registers can only handle the restricted range. */
2955 else if ((addr_mask != 0) && !indexed_only_p
2956 && msize == 16 && TARGET_P9_VECTOR
2957 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2958 || (m2 == TImode && TARGET_VSX)))
2959 {
2960 addr_mask |= RELOAD_REG_OFFSET;
2961 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2962 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2963 }
2964
2965 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2966 addressing on 128-bit types. */
2967 if (rc == RELOAD_REG_VMX && msize == 16
2968 && (addr_mask & RELOAD_REG_VALID) != 0)
2969 addr_mask |= RELOAD_REG_AND_M16;
2970
2971 reg_addr[m].addr_mask[rc] = addr_mask;
2972 any_addr_mask |= addr_mask;
2973 }
2974
2975 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2976 }
2977 }
2978
2979 \f
2980 /* Initialize the various global tables that are based on register size. */
2981 static void
2982 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2983 {
2984 ssize_t r, m, c;
2985 int align64;
2986 int align32;
2987
2988 /* Precalculate REGNO_REG_CLASS. */
2989 rs6000_regno_regclass[0] = GENERAL_REGS;
2990 for (r = 1; r < 32; ++r)
2991 rs6000_regno_regclass[r] = BASE_REGS;
2992
2993 for (r = 32; r < 64; ++r)
2994 rs6000_regno_regclass[r] = FLOAT_REGS;
2995
2996 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2997 rs6000_regno_regclass[r] = NO_REGS;
2998
2999 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3000 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3001
3002 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3003 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3004 rs6000_regno_regclass[r] = CR_REGS;
3005
3006 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3007 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3008 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3009 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3010 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3011 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3012 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3013 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3014 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3015 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3016
3017 /* Precalculate register class to simpler reload register class. We don't
3018 need all of the register classes that are combinations of different
3019 classes, just the simple ones that have constraint letters. */
3020 for (c = 0; c < N_REG_CLASSES; c++)
3021 reg_class_to_reg_type[c] = NO_REG_TYPE;
3022
3023 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3024 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3025 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3026 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3029 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3031 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3032 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3033
3034 if (TARGET_VSX)
3035 {
3036 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3037 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3038 }
3039 else
3040 {
3041 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3042 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3043 }
3044
3045 /* Precalculate the valid memory formats as well as the vector information,
3046 this must be set up before the rs6000_hard_regno_nregs_internal calls
3047 below. */
3048 gcc_assert ((int)VECTOR_NONE == 0);
3049 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3050 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3051
3052 gcc_assert ((int)CODE_FOR_nothing == 0);
3053 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3054
3055 gcc_assert ((int)NO_REGS == 0);
3056 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3057
3058 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3059 believes it can use native alignment or still uses 128-bit alignment. */
3060 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3061 {
3062 align64 = 64;
3063 align32 = 32;
3064 }
3065 else
3066 {
3067 align64 = 128;
3068 align32 = 128;
3069 }
3070
3071 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3072 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3073 if (TARGET_FLOAT128_TYPE)
3074 {
3075 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3076 rs6000_vector_align[KFmode] = 128;
3077
3078 if (FLOAT128_IEEE_P (TFmode))
3079 {
3080 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3081 rs6000_vector_align[TFmode] = 128;
3082 }
3083 }
3084
3085 /* V2DF mode, VSX only. */
3086 if (TARGET_VSX)
3087 {
3088 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3089 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3090 rs6000_vector_align[V2DFmode] = align64;
3091 }
3092
3093 /* V4SF mode, either VSX or Altivec. */
3094 if (TARGET_VSX)
3095 {
3096 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3097 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3098 rs6000_vector_align[V4SFmode] = align32;
3099 }
3100 else if (TARGET_ALTIVEC)
3101 {
3102 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3103 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3104 rs6000_vector_align[V4SFmode] = align32;
3105 }
3106
3107 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3108 and stores. */
3109 if (TARGET_ALTIVEC)
3110 {
3111 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3112 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3113 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3114 rs6000_vector_align[V4SImode] = align32;
3115 rs6000_vector_align[V8HImode] = align32;
3116 rs6000_vector_align[V16QImode] = align32;
3117
3118 if (TARGET_VSX)
3119 {
3120 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3121 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3122 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3123 }
3124 else
3125 {
3126 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3127 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3128 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3129 }
3130 }
3131
3132 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3133 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3134 if (TARGET_VSX)
3135 {
3136 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3137 rs6000_vector_unit[V2DImode]
3138 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3139 rs6000_vector_align[V2DImode] = align64;
3140
3141 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3142 rs6000_vector_unit[V1TImode]
3143 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3144 rs6000_vector_align[V1TImode] = 128;
3145 }
3146
3147 /* DFmode, see if we want to use the VSX unit. Memory is handled
3148 differently, so don't set rs6000_vector_mem. */
3149 if (TARGET_VSX)
3150 {
3151 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3152 rs6000_vector_align[DFmode] = 64;
3153 }
3154
3155 /* SFmode, see if we want to use the VSX unit. */
3156 if (TARGET_P8_VECTOR)
3157 {
3158 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3159 rs6000_vector_align[SFmode] = 32;
3160 }
3161
3162 /* Allow TImode in VSX register and set the VSX memory macros. */
3163 if (TARGET_VSX)
3164 {
3165 rs6000_vector_mem[TImode] = VECTOR_VSX;
3166 rs6000_vector_align[TImode] = align64;
3167 }
3168
3169 /* Register class constraints for the constraints that depend on compile
3170 switches. When the VSX code was added, different constraints were added
3171 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3172 of the VSX registers are used. The register classes for scalar floating
3173 point types is set, based on whether we allow that type into the upper
3174 (Altivec) registers. GCC has register classes to target the Altivec
3175 registers for load/store operations, to select using a VSX memory
3176 operation instead of the traditional floating point operation. The
3177 constraints are:
3178
3179 d - Register class to use with traditional DFmode instructions.
3180 f - Register class to use with traditional SFmode instructions.
3181 v - Altivec register.
3182 wa - Any VSX register.
3183 wc - Reserved to represent individual CR bits (used in LLVM).
3184 wd - Preferred register class for V2DFmode.
3185 wf - Preferred register class for V4SFmode.
3186 wg - Float register for power6x move insns.
3187 wh - FP register for direct move instructions.
3188 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3189 wj - FP or VSX register to hold 64-bit integers for direct moves.
3190 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3191 wl - Float register if we can do 32-bit signed int loads.
3192 wm - VSX register for ISA 2.07 direct move operations.
3193 wn - always NO_REGS.
3194 wr - GPR if 64-bit mode is permitted.
3195 ws - Register class to do ISA 2.06 DF operations.
3196 wt - VSX register for TImode in VSX registers.
3197 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3198 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3199 ww - Register class to do SF conversions in with VSX operations.
3200 wx - Float register if we can do 32-bit int stores.
3201 wy - Register class to do ISA 2.07 SF operations.
3202 wz - Float register if we can do 32-bit unsigned int loads.
3203 wH - Altivec register if SImode is allowed in VSX registers.
3204 wI - VSX register if SImode is allowed in VSX registers.
3205 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3206 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3207
3208 if (TARGET_HARD_FLOAT)
3209 {
3210 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3211 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3212 }
3213
3214 if (TARGET_VSX)
3215 {
3216 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3217 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3218 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3219 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3220 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3222 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3223 }
3224
3225 /* Add conditional constraints based on various options, to allow us to
3226 collapse multiple insn patterns. */
3227 if (TARGET_ALTIVEC)
3228 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3229
3230 if (TARGET_MFPGPR) /* DFmode */
3231 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3232
3233 if (TARGET_LFIWAX)
3234 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3235
3236 if (TARGET_DIRECT_MOVE)
3237 {
3238 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3239 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3240 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3241 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3242 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3243 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3244 }
3245
3246 if (TARGET_POWERPC64)
3247 {
3248 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3249 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3250 }
3251
3252 if (TARGET_P8_VECTOR) /* SFmode */
3253 {
3254 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3255 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3256 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3257 }
3258 else if (TARGET_VSX)
3259 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3260
3261 if (TARGET_STFIWX)
3262 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3263
3264 if (TARGET_LFIWZX)
3265 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3266
3267 if (TARGET_FLOAT128_TYPE)
3268 {
3269 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3270 if (FLOAT128_IEEE_P (TFmode))
3271 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3272 }
3273
3274 if (TARGET_P9_VECTOR)
3275 {
3276 /* Support for new D-form instructions. */
3277 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3278
3279 /* Support for ISA 3.0 (power9) vectors. */
3280 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3281 }
3282
3283 /* Support for new direct moves (ISA 3.0 + 64bit). */
3284 if (TARGET_DIRECT_MOVE_128)
3285 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3286
3287 /* Support small integers in VSX registers. */
3288 if (TARGET_P8_VECTOR)
3289 {
3290 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3291 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3292 if (TARGET_P9_VECTOR)
3293 {
3294 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3295 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3296 }
3297 }
3298
3299 /* Set up the reload helper and direct move functions. */
3300 if (TARGET_VSX || TARGET_ALTIVEC)
3301 {
3302 if (TARGET_64BIT)
3303 {
3304 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3305 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3306 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3307 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3308 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3309 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3310 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3311 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3312 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3313 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3314 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3315 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3316 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3317 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3318 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3319 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3320 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3321 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3322 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3323 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3324
3325 if (FLOAT128_VECTOR_P (KFmode))
3326 {
3327 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3328 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3329 }
3330
3331 if (FLOAT128_VECTOR_P (TFmode))
3332 {
3333 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3334 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3335 }
3336
3337 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3338 available. */
3339 if (TARGET_NO_SDMODE_STACK)
3340 {
3341 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3342 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3343 }
3344
3345 if (TARGET_VSX)
3346 {
3347 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3348 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3349 }
3350
3351 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3352 {
3353 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3354 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3355 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3356 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3357 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3358 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3359 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3360 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3361 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3362
3363 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3364 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3365 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3366 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3367 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3368 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3369 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3370 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3371 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3372
3373 if (FLOAT128_VECTOR_P (KFmode))
3374 {
3375 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3376 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3377 }
3378
3379 if (FLOAT128_VECTOR_P (TFmode))
3380 {
3381 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3382 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3383 }
3384 }
3385 }
3386 else
3387 {
3388 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3389 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3390 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3391 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3392 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3393 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3394 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3395 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3396 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3397 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3398 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3399 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3400 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3401 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3402 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3403 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3404 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3405 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3406 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3407 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3408
3409 if (FLOAT128_VECTOR_P (KFmode))
3410 {
3411 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3412 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3413 }
3414
3415 if (FLOAT128_IEEE_P (TFmode))
3416 {
3417 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3418 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3419 }
3420
3421 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3422 available. */
3423 if (TARGET_NO_SDMODE_STACK)
3424 {
3425 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3426 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3427 }
3428
3429 if (TARGET_VSX)
3430 {
3431 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3432 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3433 }
3434
3435 if (TARGET_DIRECT_MOVE)
3436 {
3437 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3438 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3439 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3440 }
3441 }
3442
3443 reg_addr[DFmode].scalar_in_vmx_p = true;
3444 reg_addr[DImode].scalar_in_vmx_p = true;
3445
3446 if (TARGET_P8_VECTOR)
3447 {
3448 reg_addr[SFmode].scalar_in_vmx_p = true;
3449 reg_addr[SImode].scalar_in_vmx_p = true;
3450
3451 if (TARGET_P9_VECTOR)
3452 {
3453 reg_addr[HImode].scalar_in_vmx_p = true;
3454 reg_addr[QImode].scalar_in_vmx_p = true;
3455 }
3456 }
3457 }
3458
3459 /* Precalculate HARD_REGNO_NREGS. */
3460 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3461 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3462 rs6000_hard_regno_nregs[m][r]
3463 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3464
3465 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3466 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3467 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3468 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3469 rs6000_hard_regno_mode_ok_p[m][r] = true;
3470
3471 /* Precalculate CLASS_MAX_NREGS sizes. */
3472 for (c = 0; c < LIM_REG_CLASSES; ++c)
3473 {
3474 int reg_size;
3475
3476 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3477 reg_size = UNITS_PER_VSX_WORD;
3478
3479 else if (c == ALTIVEC_REGS)
3480 reg_size = UNITS_PER_ALTIVEC_WORD;
3481
3482 else if (c == FLOAT_REGS)
3483 reg_size = UNITS_PER_FP_WORD;
3484
3485 else
3486 reg_size = UNITS_PER_WORD;
3487
3488 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3489 {
3490 machine_mode m2 = (machine_mode)m;
3491 int reg_size2 = reg_size;
3492
3493 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3494 in VSX. */
3495 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3496 reg_size2 = UNITS_PER_FP_WORD;
3497
3498 rs6000_class_max_nregs[m][c]
3499 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3500 }
3501 }
3502
3503 /* Calculate which modes to automatically generate code to use a the
3504 reciprocal divide and square root instructions. In the future, possibly
3505 automatically generate the instructions even if the user did not specify
3506 -mrecip. The older machines double precision reciprocal sqrt estimate is
3507 not accurate enough. */
3508 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3509 if (TARGET_FRES)
3510 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3511 if (TARGET_FRE)
3512 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3513 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3514 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3515 if (VECTOR_UNIT_VSX_P (V2DFmode))
3516 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3517
3518 if (TARGET_FRSQRTES)
3519 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3520 if (TARGET_FRSQRTE)
3521 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3522 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3523 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3524 if (VECTOR_UNIT_VSX_P (V2DFmode))
3525 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3526
3527 if (rs6000_recip_control)
3528 {
3529 if (!flag_finite_math_only)
3530 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3531 "-ffast-math");
3532 if (flag_trapping_math)
3533 warning (0, "%qs requires %qs or %qs", "-mrecip",
3534 "-fno-trapping-math", "-ffast-math");
3535 if (!flag_reciprocal_math)
3536 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3537 "-ffast-math");
3538 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3539 {
3540 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3541 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3542 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3543
3544 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3545 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3546 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3547
3548 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3549 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3550 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3551
3552 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3553 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3554 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3555
3556 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3557 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3558 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3559
3560 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3561 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3562 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3563
3564 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3565 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3566 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3567
3568 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3569 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3570 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3571 }
3572 }
3573
3574 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3575 legitimate address support to figure out the appropriate addressing to
3576 use. */
3577 rs6000_setup_reg_addr_masks ();
3578
3579 if (global_init_p || TARGET_DEBUG_TARGET)
3580 {
3581 if (TARGET_DEBUG_REG)
3582 rs6000_debug_reg_global ();
3583
3584 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3585 fprintf (stderr,
3586 "SImode variable mult cost = %d\n"
3587 "SImode constant mult cost = %d\n"
3588 "SImode short constant mult cost = %d\n"
3589 "DImode multipliciation cost = %d\n"
3590 "SImode division cost = %d\n"
3591 "DImode division cost = %d\n"
3592 "Simple fp operation cost = %d\n"
3593 "DFmode multiplication cost = %d\n"
3594 "SFmode division cost = %d\n"
3595 "DFmode division cost = %d\n"
3596 "cache line size = %d\n"
3597 "l1 cache size = %d\n"
3598 "l2 cache size = %d\n"
3599 "simultaneous prefetches = %d\n"
3600 "\n",
3601 rs6000_cost->mulsi,
3602 rs6000_cost->mulsi_const,
3603 rs6000_cost->mulsi_const9,
3604 rs6000_cost->muldi,
3605 rs6000_cost->divsi,
3606 rs6000_cost->divdi,
3607 rs6000_cost->fp,
3608 rs6000_cost->dmul,
3609 rs6000_cost->sdiv,
3610 rs6000_cost->ddiv,
3611 rs6000_cost->cache_line_size,
3612 rs6000_cost->l1_cache_size,
3613 rs6000_cost->l2_cache_size,
3614 rs6000_cost->simultaneous_prefetches);
3615 }
3616 }
3617
3618 #if TARGET_MACHO
3619 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3620
3621 static void
3622 darwin_rs6000_override_options (void)
3623 {
3624 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3625 off. */
3626 rs6000_altivec_abi = 1;
3627 TARGET_ALTIVEC_VRSAVE = 1;
3628 rs6000_current_abi = ABI_DARWIN;
3629
3630 if (DEFAULT_ABI == ABI_DARWIN
3631 && TARGET_64BIT)
3632 darwin_one_byte_bool = 1;
3633
3634 if (TARGET_64BIT && ! TARGET_POWERPC64)
3635 {
3636 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3637 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3638 }
3639 if (flag_mkernel)
3640 {
3641 rs6000_default_long_calls = 1;
3642 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3643 }
3644
3645 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3646 Altivec. */
3647 if (!flag_mkernel && !flag_apple_kext
3648 && TARGET_64BIT
3649 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3650 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3651
3652 /* Unless the user (not the configurer) has explicitly overridden
3653 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3654 G4 unless targeting the kernel. */
3655 if (!flag_mkernel
3656 && !flag_apple_kext
3657 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3658 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3659 && ! global_options_set.x_rs6000_cpu_index)
3660 {
3661 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3662 }
3663 }
3664 #endif
3665
3666 /* If not otherwise specified by a target, make 'long double' equivalent to
3667 'double'. */
3668
3669 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3670 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3671 #endif
3672
3673 /* Return the builtin mask of the various options used that could affect which
3674 builtins were used. In the past we used target_flags, but we've run out of
3675 bits, and some options are no longer in target_flags. */
3676
3677 HOST_WIDE_INT
3678 rs6000_builtin_mask_calculate (void)
3679 {
3680 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3681 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3682 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3683 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3684 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3685 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3686 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3687 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3688 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3689 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3690 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3691 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3692 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3693 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3694 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3695 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3696 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3697 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3698 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3699 | ((TARGET_LONG_DOUBLE_128
3700 && TARGET_HARD_FLOAT
3701 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3702 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3703 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3704 }
3705
3706 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3707 to clobber the XER[CA] bit because clobbering that bit without telling
3708 the compiler worked just fine with versions of GCC before GCC 5, and
3709 breaking a lot of older code in ways that are hard to track down is
3710 not such a great idea. */
3711
3712 static rtx_insn *
3713 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3714 vec<const char *> &/*constraints*/,
3715 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3716 {
3717 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3718 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3719 return NULL;
3720 }
3721
3722 /* Override command line options.
3723
3724 Combine build-specific configuration information with options
3725 specified on the command line to set various state variables which
3726 influence code generation, optimization, and expansion of built-in
3727 functions. Assure that command-line configuration preferences are
3728 compatible with each other and with the build configuration; issue
3729 warnings while adjusting configuration or error messages while
3730 rejecting configuration.
3731
3732 Upon entry to this function:
3733
3734 This function is called once at the beginning of
3735 compilation, and then again at the start and end of compiling
3736 each section of code that has a different configuration, as
3737 indicated, for example, by adding the
3738
3739 __attribute__((__target__("cpu=power9")))
3740
3741 qualifier to a function definition or, for example, by bracketing
3742 code between
3743
3744 #pragma GCC target("altivec")
3745
3746 and
3747
3748 #pragma GCC reset_options
3749
3750 directives. Parameter global_init_p is true for the initial
3751 invocation, which initializes global variables, and false for all
3752 subsequent invocations.
3753
3754
3755 Various global state information is assumed to be valid. This
3756 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3757 default CPU specified at build configure time, TARGET_DEFAULT,
3758 representing the default set of option flags for the default
3759 target, and global_options_set.x_rs6000_isa_flags, representing
3760 which options were requested on the command line.
3761
3762 Upon return from this function:
3763
3764 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3765 was set by name on the command line. Additionally, if certain
3766 attributes are automatically enabled or disabled by this function
3767 in order to assure compatibility between options and
3768 configuration, the flags associated with those attributes are
3769 also set. By setting these "explicit bits", we avoid the risk
3770 that other code might accidentally overwrite these particular
3771 attributes with "default values".
3772
3773 The various bits of rs6000_isa_flags are set to indicate the
3774 target options that have been selected for the most current
3775 compilation efforts. This has the effect of also turning on the
3776 associated TARGET_XXX values since these are macros which are
3777 generally defined to test the corresponding bit of the
3778 rs6000_isa_flags variable.
3779
3780 The variable rs6000_builtin_mask is set to represent the target
3781 options for the most current compilation efforts, consistent with
3782 the current contents of rs6000_isa_flags. This variable controls
3783 expansion of built-in functions.
3784
3785 Various other global variables and fields of global structures
3786 (over 50 in all) are initialized to reflect the desired options
3787 for the most current compilation efforts. */
3788
3789 static bool
3790 rs6000_option_override_internal (bool global_init_p)
3791 {
3792 bool ret = true;
3793
3794 HOST_WIDE_INT set_masks;
3795 HOST_WIDE_INT ignore_masks;
3796 int cpu_index = -1;
3797 int tune_index;
3798 struct cl_target_option *main_target_opt
3799 = ((global_init_p || target_option_default_node == NULL)
3800 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3801
3802 /* Print defaults. */
3803 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3804 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3805
3806 /* Remember the explicit arguments. */
3807 if (global_init_p)
3808 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3809
3810 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3811 library functions, so warn about it. The flag may be useful for
3812 performance studies from time to time though, so don't disable it
3813 entirely. */
3814 if (global_options_set.x_rs6000_alignment_flags
3815 && rs6000_alignment_flags == MASK_ALIGN_POWER
3816 && DEFAULT_ABI == ABI_DARWIN
3817 && TARGET_64BIT)
3818 warning (0, "%qs is not supported for 64-bit Darwin;"
3819 " it is incompatible with the installed C and C++ libraries",
3820 "-malign-power");
3821
3822 /* Numerous experiment shows that IRA based loop pressure
3823 calculation works better for RTL loop invariant motion on targets
3824 with enough (>= 32) registers. It is an expensive optimization.
3825 So it is on only for peak performance. */
3826 if (optimize >= 3 && global_init_p
3827 && !global_options_set.x_flag_ira_loop_pressure)
3828 flag_ira_loop_pressure = 1;
3829
3830 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3831 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3832 options were already specified. */
3833 if (flag_sanitize & SANITIZE_USER_ADDRESS
3834 && !global_options_set.x_flag_asynchronous_unwind_tables)
3835 flag_asynchronous_unwind_tables = 1;
3836
3837 /* Set the pointer size. */
3838 if (TARGET_64BIT)
3839 {
3840 rs6000_pmode = DImode;
3841 rs6000_pointer_size = 64;
3842 }
3843 else
3844 {
3845 rs6000_pmode = SImode;
3846 rs6000_pointer_size = 32;
3847 }
3848
3849 /* Some OSs don't support saving the high part of 64-bit registers on context
3850 switch. Other OSs don't support saving Altivec registers. On those OSs,
3851 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3852 if the user wants either, the user must explicitly specify them and we
3853 won't interfere with the user's specification. */
3854
3855 set_masks = POWERPC_MASKS;
3856 #ifdef OS_MISSING_POWERPC64
3857 if (OS_MISSING_POWERPC64)
3858 set_masks &= ~OPTION_MASK_POWERPC64;
3859 #endif
3860 #ifdef OS_MISSING_ALTIVEC
3861 if (OS_MISSING_ALTIVEC)
3862 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3863 | OTHER_VSX_VECTOR_MASKS);
3864 #endif
3865
3866 /* Don't override by the processor default if given explicitly. */
3867 set_masks &= ~rs6000_isa_flags_explicit;
3868
3869 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3870 the cpu in a target attribute or pragma, but did not specify a tuning
3871 option, use the cpu for the tuning option rather than the option specified
3872 with -mtune on the command line. Process a '--with-cpu' configuration
3873 request as an implicit --cpu. */
3874 if (rs6000_cpu_index >= 0)
3875 cpu_index = rs6000_cpu_index;
3876 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3877 cpu_index = main_target_opt->x_rs6000_cpu_index;
3878 else if (OPTION_TARGET_CPU_DEFAULT)
3879 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3880
3881 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3882 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3883 with those from the cpu, except for options that were explicitly set. If
3884 we don't have a cpu, do not override the target bits set in
3885 TARGET_DEFAULT. */
3886 if (cpu_index >= 0)
3887 {
3888 rs6000_cpu_index = cpu_index;
3889 rs6000_isa_flags &= ~set_masks;
3890 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3891 & set_masks);
3892 }
3893 else
3894 {
3895 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3896 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3897 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3898 to using rs6000_isa_flags, we need to do the initialization here.
3899
3900 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3901 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3902 HOST_WIDE_INT flags;
3903 if (TARGET_DEFAULT)
3904 flags = TARGET_DEFAULT;
3905 else
3906 {
3907 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3908 const char *default_cpu = (!TARGET_POWERPC64
3909 ? "powerpc"
3910 : (BYTES_BIG_ENDIAN
3911 ? "powerpc64"
3912 : "powerpc64le"));
3913 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3914 flags = processor_target_table[default_cpu_index].target_enable;
3915 }
3916 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3917 }
3918
3919 if (rs6000_tune_index >= 0)
3920 tune_index = rs6000_tune_index;
3921 else if (cpu_index >= 0)
3922 rs6000_tune_index = tune_index = cpu_index;
3923 else
3924 {
3925 size_t i;
3926 enum processor_type tune_proc
3927 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3928
3929 tune_index = -1;
3930 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3931 if (processor_target_table[i].processor == tune_proc)
3932 {
3933 tune_index = i;
3934 break;
3935 }
3936 }
3937
3938 if (cpu_index >= 0)
3939 rs6000_cpu = processor_target_table[cpu_index].processor;
3940 else
3941 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3942
3943 gcc_assert (tune_index >= 0);
3944 rs6000_tune = processor_target_table[tune_index].processor;
3945
3946 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3947 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3948 || rs6000_cpu == PROCESSOR_PPCE5500)
3949 {
3950 if (TARGET_ALTIVEC)
3951 error ("AltiVec not supported in this target");
3952 }
3953
3954 /* If we are optimizing big endian systems for space, use the load/store
3955 multiple instructions. */
3956 if (BYTES_BIG_ENDIAN && optimize_size)
3957 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3958
3959 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3960 because the hardware doesn't support the instructions used in little
3961 endian mode, and causes an alignment trap. The 750 does not cause an
3962 alignment trap (except when the target is unaligned). */
3963
3964 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3965 {
3966 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3967 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3968 warning (0, "%qs is not supported on little endian systems",
3969 "-mmultiple");
3970 }
3971
3972 /* If little-endian, default to -mstrict-align on older processors.
3973 Testing for htm matches power8 and later. */
3974 if (!BYTES_BIG_ENDIAN
3975 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3976 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3977
3978 if (!rs6000_fold_gimple)
3979 fprintf (stderr,
3980 "gimple folding of rs6000 builtins has been disabled.\n");
3981
3982 /* Add some warnings for VSX. */
3983 if (TARGET_VSX)
3984 {
3985 const char *msg = NULL;
3986 if (!TARGET_HARD_FLOAT)
3987 {
3988 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3989 msg = N_("-mvsx requires hardware floating point");
3990 else
3991 {
3992 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3993 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3994 }
3995 }
3996 else if (TARGET_AVOID_XFORM > 0)
3997 msg = N_("-mvsx needs indexed addressing");
3998 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3999 & OPTION_MASK_ALTIVEC))
4000 {
4001 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4002 msg = N_("-mvsx and -mno-altivec are incompatible");
4003 else
4004 msg = N_("-mno-altivec disables vsx");
4005 }
4006
4007 if (msg)
4008 {
4009 warning (0, msg);
4010 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4011 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4012 }
4013 }
4014
4015 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4016 the -mcpu setting to enable options that conflict. */
4017 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4018 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4019 | OPTION_MASK_ALTIVEC
4020 | OPTION_MASK_VSX)) != 0)
4021 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4022 | OPTION_MASK_DIRECT_MOVE)
4023 & ~rs6000_isa_flags_explicit);
4024
4025 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4026 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4027
4028 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4029 off all of the options that depend on those flags. */
4030 ignore_masks = rs6000_disable_incompatible_switches ();
4031
4032 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4033 unless the user explicitly used the -mno-<option> to disable the code. */
4034 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4035 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4036 else if (TARGET_P9_MINMAX)
4037 {
4038 if (cpu_index >= 0)
4039 {
4040 if (cpu_index == PROCESSOR_POWER9)
4041 {
4042 /* legacy behavior: allow -mcpu=power9 with certain
4043 capabilities explicitly disabled. */
4044 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4045 }
4046 else
4047 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4048 "for <xxx> less than power9", "-mcpu");
4049 }
4050 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4051 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4052 & rs6000_isa_flags_explicit))
4053 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4054 were explicitly cleared. */
4055 error ("%qs incompatible with explicitly disabled options",
4056 "-mpower9-minmax");
4057 else
4058 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4059 }
4060 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4061 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4062 else if (TARGET_VSX)
4063 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4064 else if (TARGET_POPCNTD)
4065 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4066 else if (TARGET_DFP)
4067 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4068 else if (TARGET_CMPB)
4069 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4070 else if (TARGET_FPRND)
4071 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4072 else if (TARGET_POPCNTB)
4073 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4074 else if (TARGET_ALTIVEC)
4075 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4076
4077 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4078 {
4079 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4080 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4081 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4082 }
4083
4084 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4085 {
4086 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4087 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4088 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4089 }
4090
4091 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4092 {
4093 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4094 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4095 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4096 }
4097
4098 if (TARGET_P8_VECTOR && !TARGET_VSX)
4099 {
4100 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4101 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4102 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4103 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4104 {
4105 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4106 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4107 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4108 }
4109 else
4110 {
4111 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4112 not explicit. */
4113 rs6000_isa_flags |= OPTION_MASK_VSX;
4114 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4115 }
4116 }
4117
4118 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4119 {
4120 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4121 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4122 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4123 }
4124
4125 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4126 silently turn off quad memory mode. */
4127 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4128 {
4129 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4130 warning (0, N_("-mquad-memory requires 64-bit mode"));
4131
4132 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4133 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4134
4135 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4136 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4137 }
4138
4139 /* Non-atomic quad memory load/store are disabled for little endian, since
4140 the words are reversed, but atomic operations can still be done by
4141 swapping the words. */
4142 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4143 {
4144 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4145 warning (0, N_("-mquad-memory is not available in little endian "
4146 "mode"));
4147
4148 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4149 }
4150
4151 /* Assume if the user asked for normal quad memory instructions, they want
4152 the atomic versions as well, unless they explicity told us not to use quad
4153 word atomic instructions. */
4154 if (TARGET_QUAD_MEMORY
4155 && !TARGET_QUAD_MEMORY_ATOMIC
4156 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4157 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4158
4159 /* If we can shrink-wrap the TOC register save separately, then use
4160 -msave-toc-indirect unless explicitly disabled. */
4161 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4162 && flag_shrink_wrap_separate
4163 && optimize_function_for_speed_p (cfun))
4164 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4165
4166 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4167 generating power8 instructions. Power9 does not optimize power8 fusion
4168 cases. */
4169 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4170 {
4171 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4172 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4173 else
4174 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4175 }
4176
4177 /* Setting additional fusion flags turns on base fusion. */
4178 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4179 {
4180 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4181 {
4182 if (TARGET_P8_FUSION_SIGN)
4183 error ("%qs requires %qs", "-mpower8-fusion-sign",
4184 "-mpower8-fusion");
4185
4186 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4187 }
4188 else
4189 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4190 }
4191
4192 /* Power8 does not fuse sign extended loads with the addis. If we are
4193 optimizing at high levels for speed, convert a sign extended load into a
4194 zero extending load, and an explicit sign extension. */
4195 if (TARGET_P8_FUSION
4196 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4197 && optimize_function_for_speed_p (cfun)
4198 && optimize >= 3)
4199 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4200
4201 /* ISA 3.0 vector instructions include ISA 2.07. */
4202 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4203 {
4204 /* We prefer to not mention undocumented options in
4205 error messages. However, if users have managed to select
4206 power9-vector without selecting power8-vector, they
4207 already know about undocumented flags. */
4208 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4209 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4210 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4211 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4212 {
4213 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4214 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4215 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4216 }
4217 else
4218 {
4219 /* OPTION_MASK_P9_VECTOR is explicit and
4220 OPTION_MASK_P8_VECTOR is not explicit. */
4221 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4222 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4223 }
4224 }
4225
4226 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4227 support. If we only have ISA 2.06 support, and the user did not specify
4228 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4229 but we don't enable the full vectorization support */
4230 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4231 TARGET_ALLOW_MOVMISALIGN = 1;
4232
4233 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4234 {
4235 if (TARGET_ALLOW_MOVMISALIGN > 0
4236 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4237 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4238
4239 TARGET_ALLOW_MOVMISALIGN = 0;
4240 }
4241
4242 /* Determine when unaligned vector accesses are permitted, and when
4243 they are preferred over masked Altivec loads. Note that if
4244 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4245 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4246 not true. */
4247 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4248 {
4249 if (!TARGET_VSX)
4250 {
4251 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4252 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4253
4254 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4255 }
4256
4257 else if (!TARGET_ALLOW_MOVMISALIGN)
4258 {
4259 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4260 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4261 "-mallow-movmisalign");
4262
4263 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4264 }
4265 }
4266
4267 /* Use long double size to select the appropriate long double. We use
4268 TYPE_PRECISION to differentiate the 3 different long double types. We map
4269 128 into the precision used for TFmode. */
4270 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4271 ? 64
4272 : FLOAT_PRECISION_TFmode);
4273
4274 /* Set long double size before the IEEE 128-bit tests. */
4275 if (!global_options_set.x_rs6000_long_double_type_size)
4276 {
4277 if (main_target_opt != NULL
4278 && (main_target_opt->x_rs6000_long_double_type_size
4279 != default_long_double_size))
4280 error ("target attribute or pragma changes long double size");
4281 else
4282 rs6000_long_double_type_size = default_long_double_size;
4283 }
4284 else if (rs6000_long_double_type_size == 128)
4285 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4286 else if (global_options_set.x_rs6000_ieeequad)
4287 {
4288 if (global_options.x_rs6000_ieeequad)
4289 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4290 else
4291 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4292 }
4293
4294 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4295 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4296 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4297 those systems will not pick up this default. Warn if the user changes the
4298 default unless -Wno-psabi. */
4299 if (!global_options_set.x_rs6000_ieeequad)
4300 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4301
4302 else
4303 {
4304 if (global_options.x_rs6000_ieeequad
4305 && (!TARGET_POPCNTD || !TARGET_VSX))
4306 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4307
4308 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4309 {
4310 static bool warned_change_long_double;
4311 if (!warned_change_long_double)
4312 {
4313 warned_change_long_double = true;
4314 if (TARGET_IEEEQUAD)
4315 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4316 else
4317 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4318 }
4319 }
4320 }
4321
4322 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4323 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4324 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4325 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4326 the keyword as well as the type. */
4327 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4328
4329 /* IEEE 128-bit floating point requires VSX support. */
4330 if (TARGET_FLOAT128_KEYWORD)
4331 {
4332 if (!TARGET_VSX)
4333 {
4334 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4335 error ("%qs requires VSX support", "-mfloat128");
4336
4337 TARGET_FLOAT128_TYPE = 0;
4338 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4339 | OPTION_MASK_FLOAT128_HW);
4340 }
4341 else if (!TARGET_FLOAT128_TYPE)
4342 {
4343 TARGET_FLOAT128_TYPE = 1;
4344 warning (0, "The -mfloat128 option may not be fully supported");
4345 }
4346 }
4347
4348 /* Enable the __float128 keyword under Linux by default. */
4349 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4350 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4351 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4352
4353 /* If we have are supporting the float128 type and full ISA 3.0 support,
4354 enable -mfloat128-hardware by default. However, don't enable the
4355 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4356 because sometimes the compiler wants to put things in an integer
4357 container, and if we don't have __int128 support, it is impossible. */
4358 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4359 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4360 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4361 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4362
4363 if (TARGET_FLOAT128_HW
4364 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4365 {
4366 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4367 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4368
4369 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4370 }
4371
4372 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4373 {
4374 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4375 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4376
4377 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4378 }
4379
4380 /* Print the options after updating the defaults. */
4381 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4382 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4383
4384 /* E500mc does "better" if we inline more aggressively. Respect the
4385 user's opinion, though. */
4386 if (rs6000_block_move_inline_limit == 0
4387 && (rs6000_tune == PROCESSOR_PPCE500MC
4388 || rs6000_tune == PROCESSOR_PPCE500MC64
4389 || rs6000_tune == PROCESSOR_PPCE5500
4390 || rs6000_tune == PROCESSOR_PPCE6500))
4391 rs6000_block_move_inline_limit = 128;
4392
4393 /* store_one_arg depends on expand_block_move to handle at least the
4394 size of reg_parm_stack_space. */
4395 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4396 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4397
4398 if (global_init_p)
4399 {
4400 /* If the appropriate debug option is enabled, replace the target hooks
4401 with debug versions that call the real version and then prints
4402 debugging information. */
4403 if (TARGET_DEBUG_COST)
4404 {
4405 targetm.rtx_costs = rs6000_debug_rtx_costs;
4406 targetm.address_cost = rs6000_debug_address_cost;
4407 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4408 }
4409
4410 if (TARGET_DEBUG_ADDR)
4411 {
4412 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4413 targetm.legitimize_address = rs6000_debug_legitimize_address;
4414 rs6000_secondary_reload_class_ptr
4415 = rs6000_debug_secondary_reload_class;
4416 targetm.secondary_memory_needed
4417 = rs6000_debug_secondary_memory_needed;
4418 targetm.can_change_mode_class
4419 = rs6000_debug_can_change_mode_class;
4420 rs6000_preferred_reload_class_ptr
4421 = rs6000_debug_preferred_reload_class;
4422 rs6000_legitimize_reload_address_ptr
4423 = rs6000_debug_legitimize_reload_address;
4424 rs6000_mode_dependent_address_ptr
4425 = rs6000_debug_mode_dependent_address;
4426 }
4427
4428 if (rs6000_veclibabi_name)
4429 {
4430 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4431 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4432 else
4433 {
4434 error ("unknown vectorization library ABI type (%qs) for "
4435 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4436 ret = false;
4437 }
4438 }
4439 }
4440
4441 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4442 target attribute or pragma which automatically enables both options,
4443 unless the altivec ABI was set. This is set by default for 64-bit, but
4444 not for 32-bit. */
4445 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4446 {
4447 TARGET_FLOAT128_TYPE = 0;
4448 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4449 | OPTION_MASK_FLOAT128_KEYWORD)
4450 & ~rs6000_isa_flags_explicit);
4451 }
4452
4453 /* Enable Altivec ABI for AIX -maltivec. */
4454 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4455 {
4456 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4457 error ("target attribute or pragma changes AltiVec ABI");
4458 else
4459 rs6000_altivec_abi = 1;
4460 }
4461
4462 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4463 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4464 be explicitly overridden in either case. */
4465 if (TARGET_ELF)
4466 {
4467 if (!global_options_set.x_rs6000_altivec_abi
4468 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4469 {
4470 if (main_target_opt != NULL &&
4471 !main_target_opt->x_rs6000_altivec_abi)
4472 error ("target attribute or pragma changes AltiVec ABI");
4473 else
4474 rs6000_altivec_abi = 1;
4475 }
4476 }
4477
4478 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4479 So far, the only darwin64 targets are also MACH-O. */
4480 if (TARGET_MACHO
4481 && DEFAULT_ABI == ABI_DARWIN
4482 && TARGET_64BIT)
4483 {
4484 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4485 error ("target attribute or pragma changes darwin64 ABI");
4486 else
4487 {
4488 rs6000_darwin64_abi = 1;
4489 /* Default to natural alignment, for better performance. */
4490 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4491 }
4492 }
4493
4494 /* Place FP constants in the constant pool instead of TOC
4495 if section anchors enabled. */
4496 if (flag_section_anchors
4497 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4498 TARGET_NO_FP_IN_TOC = 1;
4499
4500 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4501 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4502
4503 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4504 SUBTARGET_OVERRIDE_OPTIONS;
4505 #endif
4506 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4507 SUBSUBTARGET_OVERRIDE_OPTIONS;
4508 #endif
4509 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4510 SUB3TARGET_OVERRIDE_OPTIONS;
4511 #endif
4512
4513 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4514 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4515
4516 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4517 && rs6000_tune != PROCESSOR_POWER5
4518 && rs6000_tune != PROCESSOR_POWER6
4519 && rs6000_tune != PROCESSOR_POWER7
4520 && rs6000_tune != PROCESSOR_POWER8
4521 && rs6000_tune != PROCESSOR_POWER9
4522 && rs6000_tune != PROCESSOR_PPCA2
4523 && rs6000_tune != PROCESSOR_CELL
4524 && rs6000_tune != PROCESSOR_PPC476);
4525 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4526 || rs6000_tune == PROCESSOR_POWER5
4527 || rs6000_tune == PROCESSOR_POWER7
4528 || rs6000_tune == PROCESSOR_POWER8);
4529 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4530 || rs6000_tune == PROCESSOR_POWER5
4531 || rs6000_tune == PROCESSOR_POWER6
4532 || rs6000_tune == PROCESSOR_POWER7
4533 || rs6000_tune == PROCESSOR_POWER8
4534 || rs6000_tune == PROCESSOR_POWER9
4535 || rs6000_tune == PROCESSOR_PPCE500MC
4536 || rs6000_tune == PROCESSOR_PPCE500MC64
4537 || rs6000_tune == PROCESSOR_PPCE5500
4538 || rs6000_tune == PROCESSOR_PPCE6500);
4539
4540 /* Allow debug switches to override the above settings. These are set to -1
4541 in rs6000.opt to indicate the user hasn't directly set the switch. */
4542 if (TARGET_ALWAYS_HINT >= 0)
4543 rs6000_always_hint = TARGET_ALWAYS_HINT;
4544
4545 if (TARGET_SCHED_GROUPS >= 0)
4546 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4547
4548 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4549 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4550
4551 rs6000_sched_restricted_insns_priority
4552 = (rs6000_sched_groups ? 1 : 0);
4553
4554 /* Handle -msched-costly-dep option. */
4555 rs6000_sched_costly_dep
4556 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4557
4558 if (rs6000_sched_costly_dep_str)
4559 {
4560 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4561 rs6000_sched_costly_dep = no_dep_costly;
4562 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4563 rs6000_sched_costly_dep = all_deps_costly;
4564 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4565 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4566 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4567 rs6000_sched_costly_dep = store_to_load_dep_costly;
4568 else
4569 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4570 atoi (rs6000_sched_costly_dep_str));
4571 }
4572
4573 /* Handle -minsert-sched-nops option. */
4574 rs6000_sched_insert_nops
4575 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4576
4577 if (rs6000_sched_insert_nops_str)
4578 {
4579 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4580 rs6000_sched_insert_nops = sched_finish_none;
4581 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4582 rs6000_sched_insert_nops = sched_finish_pad_groups;
4583 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4584 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4585 else
4586 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4587 atoi (rs6000_sched_insert_nops_str));
4588 }
4589
4590 /* Handle stack protector */
4591 if (!global_options_set.x_rs6000_stack_protector_guard)
4592 #ifdef TARGET_THREAD_SSP_OFFSET
4593 rs6000_stack_protector_guard = SSP_TLS;
4594 #else
4595 rs6000_stack_protector_guard = SSP_GLOBAL;
4596 #endif
4597
4598 #ifdef TARGET_THREAD_SSP_OFFSET
4599 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4600 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4601 #endif
4602
4603 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4604 {
4605 char *endp;
4606 const char *str = rs6000_stack_protector_guard_offset_str;
4607
4608 errno = 0;
4609 long offset = strtol (str, &endp, 0);
4610 if (!*str || *endp || errno)
4611 error ("%qs is not a valid number in %qs", str,
4612 "-mstack-protector-guard-offset=");
4613
4614 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4615 || (TARGET_64BIT && (offset & 3)))
4616 error ("%qs is not a valid offset in %qs", str,
4617 "-mstack-protector-guard-offset=");
4618
4619 rs6000_stack_protector_guard_offset = offset;
4620 }
4621
4622 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4623 {
4624 const char *str = rs6000_stack_protector_guard_reg_str;
4625 int reg = decode_reg_name (str);
4626
4627 if (!IN_RANGE (reg, 1, 31))
4628 error ("%qs is not a valid base register in %qs", str,
4629 "-mstack-protector-guard-reg=");
4630
4631 rs6000_stack_protector_guard_reg = reg;
4632 }
4633
4634 if (rs6000_stack_protector_guard == SSP_TLS
4635 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4636 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4637
4638 if (global_init_p)
4639 {
4640 #ifdef TARGET_REGNAMES
4641 /* If the user desires alternate register names, copy in the
4642 alternate names now. */
4643 if (TARGET_REGNAMES)
4644 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4645 #endif
4646
4647 /* Set aix_struct_return last, after the ABI is determined.
4648 If -maix-struct-return or -msvr4-struct-return was explicitly
4649 used, don't override with the ABI default. */
4650 if (!global_options_set.x_aix_struct_return)
4651 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4652
4653 #if 0
4654 /* IBM XL compiler defaults to unsigned bitfields. */
4655 if (TARGET_XL_COMPAT)
4656 flag_signed_bitfields = 0;
4657 #endif
4658
4659 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4660 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4661
4662 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4663
4664 /* We can only guarantee the availability of DI pseudo-ops when
4665 assembling for 64-bit targets. */
4666 if (!TARGET_64BIT)
4667 {
4668 targetm.asm_out.aligned_op.di = NULL;
4669 targetm.asm_out.unaligned_op.di = NULL;
4670 }
4671
4672
4673 /* Set branch target alignment, if not optimizing for size. */
4674 if (!optimize_size)
4675 {
4676 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4677 aligned 8byte to avoid misprediction by the branch predictor. */
4678 if (rs6000_tune == PROCESSOR_TITAN
4679 || rs6000_tune == PROCESSOR_CELL)
4680 {
4681 if (flag_align_functions && !str_align_functions)
4682 str_align_functions = "8";
4683 if (flag_align_jumps && !str_align_jumps)
4684 str_align_jumps = "8";
4685 if (flag_align_loops && !str_align_loops)
4686 str_align_loops = "8";
4687 }
4688 if (rs6000_align_branch_targets)
4689 {
4690 if (flag_align_functions && !str_align_functions)
4691 str_align_functions = "16";
4692 if (flag_align_jumps && !str_align_jumps)
4693 str_align_jumps = "16";
4694 if (flag_align_loops && !str_align_loops)
4695 {
4696 can_override_loop_align = 1;
4697 str_align_loops = "16";
4698 }
4699 }
4700
4701 if (flag_align_jumps && !str_align_jumps)
4702 str_align_jumps = "16";
4703 if (flag_align_loops && !str_align_loops)
4704 str_align_loops = "16";
4705 }
4706
4707 /* Arrange to save and restore machine status around nested functions. */
4708 init_machine_status = rs6000_init_machine_status;
4709
4710 /* We should always be splitting complex arguments, but we can't break
4711 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4712 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4713 targetm.calls.split_complex_arg = NULL;
4714
4715 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4716 if (DEFAULT_ABI == ABI_AIX)
4717 targetm.calls.custom_function_descriptors = 0;
4718 }
4719
4720 /* Initialize rs6000_cost with the appropriate target costs. */
4721 if (optimize_size)
4722 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4723 else
4724 switch (rs6000_tune)
4725 {
4726 case PROCESSOR_RS64A:
4727 rs6000_cost = &rs64a_cost;
4728 break;
4729
4730 case PROCESSOR_MPCCORE:
4731 rs6000_cost = &mpccore_cost;
4732 break;
4733
4734 case PROCESSOR_PPC403:
4735 rs6000_cost = &ppc403_cost;
4736 break;
4737
4738 case PROCESSOR_PPC405:
4739 rs6000_cost = &ppc405_cost;
4740 break;
4741
4742 case PROCESSOR_PPC440:
4743 rs6000_cost = &ppc440_cost;
4744 break;
4745
4746 case PROCESSOR_PPC476:
4747 rs6000_cost = &ppc476_cost;
4748 break;
4749
4750 case PROCESSOR_PPC601:
4751 rs6000_cost = &ppc601_cost;
4752 break;
4753
4754 case PROCESSOR_PPC603:
4755 rs6000_cost = &ppc603_cost;
4756 break;
4757
4758 case PROCESSOR_PPC604:
4759 rs6000_cost = &ppc604_cost;
4760 break;
4761
4762 case PROCESSOR_PPC604e:
4763 rs6000_cost = &ppc604e_cost;
4764 break;
4765
4766 case PROCESSOR_PPC620:
4767 rs6000_cost = &ppc620_cost;
4768 break;
4769
4770 case PROCESSOR_PPC630:
4771 rs6000_cost = &ppc630_cost;
4772 break;
4773
4774 case PROCESSOR_CELL:
4775 rs6000_cost = &ppccell_cost;
4776 break;
4777
4778 case PROCESSOR_PPC750:
4779 case PROCESSOR_PPC7400:
4780 rs6000_cost = &ppc750_cost;
4781 break;
4782
4783 case PROCESSOR_PPC7450:
4784 rs6000_cost = &ppc7450_cost;
4785 break;
4786
4787 case PROCESSOR_PPC8540:
4788 case PROCESSOR_PPC8548:
4789 rs6000_cost = &ppc8540_cost;
4790 break;
4791
4792 case PROCESSOR_PPCE300C2:
4793 case PROCESSOR_PPCE300C3:
4794 rs6000_cost = &ppce300c2c3_cost;
4795 break;
4796
4797 case PROCESSOR_PPCE500MC:
4798 rs6000_cost = &ppce500mc_cost;
4799 break;
4800
4801 case PROCESSOR_PPCE500MC64:
4802 rs6000_cost = &ppce500mc64_cost;
4803 break;
4804
4805 case PROCESSOR_PPCE5500:
4806 rs6000_cost = &ppce5500_cost;
4807 break;
4808
4809 case PROCESSOR_PPCE6500:
4810 rs6000_cost = &ppce6500_cost;
4811 break;
4812
4813 case PROCESSOR_TITAN:
4814 rs6000_cost = &titan_cost;
4815 break;
4816
4817 case PROCESSOR_POWER4:
4818 case PROCESSOR_POWER5:
4819 rs6000_cost = &power4_cost;
4820 break;
4821
4822 case PROCESSOR_POWER6:
4823 rs6000_cost = &power6_cost;
4824 break;
4825
4826 case PROCESSOR_POWER7:
4827 rs6000_cost = &power7_cost;
4828 break;
4829
4830 case PROCESSOR_POWER8:
4831 rs6000_cost = &power8_cost;
4832 break;
4833
4834 case PROCESSOR_POWER9:
4835 rs6000_cost = &power9_cost;
4836 break;
4837
4838 case PROCESSOR_PPCA2:
4839 rs6000_cost = &ppca2_cost;
4840 break;
4841
4842 default:
4843 gcc_unreachable ();
4844 }
4845
4846 if (global_init_p)
4847 {
4848 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4849 rs6000_cost->simultaneous_prefetches,
4850 global_options.x_param_values,
4851 global_options_set.x_param_values);
4852 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4853 global_options.x_param_values,
4854 global_options_set.x_param_values);
4855 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4856 rs6000_cost->cache_line_size,
4857 global_options.x_param_values,
4858 global_options_set.x_param_values);
4859 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4860 global_options.x_param_values,
4861 global_options_set.x_param_values);
4862
4863 /* Increase loop peeling limits based on performance analysis. */
4864 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4865 global_options.x_param_values,
4866 global_options_set.x_param_values);
4867 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4868 global_options.x_param_values,
4869 global_options_set.x_param_values);
4870
4871 /* Use the 'model' -fsched-pressure algorithm by default. */
4872 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4873 SCHED_PRESSURE_MODEL,
4874 global_options.x_param_values,
4875 global_options_set.x_param_values);
4876
4877 /* If using typedef char *va_list, signal that
4878 __builtin_va_start (&ap, 0) can be optimized to
4879 ap = __builtin_next_arg (0). */
4880 if (DEFAULT_ABI != ABI_V4)
4881 targetm.expand_builtin_va_start = NULL;
4882 }
4883
4884 /* If not explicitly specified via option, decide whether to generate indexed
4885 load/store instructions. A value of -1 indicates that the
4886 initial value of this variable has not been overwritten. During
4887 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4888 if (TARGET_AVOID_XFORM == -1)
4889 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4890 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4891 need indexed accesses and the type used is the scalar type of the element
4892 being loaded or stored. */
4893 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4894 && !TARGET_ALTIVEC);
4895
4896 /* Set the -mrecip options. */
4897 if (rs6000_recip_name)
4898 {
4899 char *p = ASTRDUP (rs6000_recip_name);
4900 char *q;
4901 unsigned int mask, i;
4902 bool invert;
4903
4904 while ((q = strtok (p, ",")) != NULL)
4905 {
4906 p = NULL;
4907 if (*q == '!')
4908 {
4909 invert = true;
4910 q++;
4911 }
4912 else
4913 invert = false;
4914
4915 if (!strcmp (q, "default"))
4916 mask = ((TARGET_RECIP_PRECISION)
4917 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4918 else
4919 {
4920 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4921 if (!strcmp (q, recip_options[i].string))
4922 {
4923 mask = recip_options[i].mask;
4924 break;
4925 }
4926
4927 if (i == ARRAY_SIZE (recip_options))
4928 {
4929 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4930 invert = false;
4931 mask = 0;
4932 ret = false;
4933 }
4934 }
4935
4936 if (invert)
4937 rs6000_recip_control &= ~mask;
4938 else
4939 rs6000_recip_control |= mask;
4940 }
4941 }
4942
4943 /* Set the builtin mask of the various options used that could affect which
4944 builtins were used. In the past we used target_flags, but we've run out
4945 of bits, and some options are no longer in target_flags. */
4946 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4947 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4948 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4949 rs6000_builtin_mask);
4950
4951 /* Initialize all of the registers. */
4952 rs6000_init_hard_regno_mode_ok (global_init_p);
4953
4954 /* Save the initial options in case the user does function specific options */
4955 if (global_init_p)
4956 target_option_default_node = target_option_current_node
4957 = build_target_option_node (&global_options);
4958
4959 /* If not explicitly specified via option, decide whether to generate the
4960 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4961 if (TARGET_LINK_STACK == -1)
4962 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4963
4964 /* Deprecate use of -mno-speculate-indirect-jumps. */
4965 if (!rs6000_speculate_indirect_jumps)
4966 warning (0, "%qs is deprecated and not recommended in any circumstances",
4967 "-mno-speculate-indirect-jumps");
4968
4969 return ret;
4970 }
4971
4972 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4973 define the target cpu type. */
4974
4975 static void
4976 rs6000_option_override (void)
4977 {
4978 (void) rs6000_option_override_internal (true);
4979 }
4980
4981 \f
4982 /* Implement targetm.vectorize.builtin_mask_for_load. */
4983 static tree
4984 rs6000_builtin_mask_for_load (void)
4985 {
4986 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4987 if ((TARGET_ALTIVEC && !TARGET_VSX)
4988 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4989 return altivec_builtin_mask_for_load;
4990 else
4991 return 0;
4992 }
4993
4994 /* Implement LOOP_ALIGN. */
4995 align_flags
4996 rs6000_loop_align (rtx label)
4997 {
4998 basic_block bb;
4999 int ninsns;
5000
5001 /* Don't override loop alignment if -falign-loops was specified. */
5002 if (!can_override_loop_align)
5003 return align_loops;
5004
5005 bb = BLOCK_FOR_INSN (label);
5006 ninsns = num_loop_insns(bb->loop_father);
5007
5008 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5009 if (ninsns > 4 && ninsns <= 8
5010 && (rs6000_tune == PROCESSOR_POWER4
5011 || rs6000_tune == PROCESSOR_POWER5
5012 || rs6000_tune == PROCESSOR_POWER6
5013 || rs6000_tune == PROCESSOR_POWER7
5014 || rs6000_tune == PROCESSOR_POWER8))
5015 return align_flags (5);
5016 else
5017 return align_loops;
5018 }
5019
5020 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5021 after applying N number of iterations. This routine does not determine
5022 how may iterations are required to reach desired alignment. */
5023
5024 static bool
5025 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5026 {
5027 if (is_packed)
5028 return false;
5029
5030 if (TARGET_32BIT)
5031 {
5032 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5033 return true;
5034
5035 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5036 return true;
5037
5038 return false;
5039 }
5040 else
5041 {
5042 if (TARGET_MACHO)
5043 return false;
5044
5045 /* Assuming that all other types are naturally aligned. CHECKME! */
5046 return true;
5047 }
5048 }
5049
5050 /* Return true if the vector misalignment factor is supported by the
5051 target. */
5052 static bool
5053 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5054 const_tree type,
5055 int misalignment,
5056 bool is_packed)
5057 {
5058 if (TARGET_VSX)
5059 {
5060 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5061 return true;
5062
5063 /* Return if movmisalign pattern is not supported for this mode. */
5064 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5065 return false;
5066
5067 if (misalignment == -1)
5068 {
5069 /* Misalignment factor is unknown at compile time but we know
5070 it's word aligned. */
5071 if (rs6000_vector_alignment_reachable (type, is_packed))
5072 {
5073 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5074
5075 if (element_size == 64 || element_size == 32)
5076 return true;
5077 }
5078
5079 return false;
5080 }
5081
5082 /* VSX supports word-aligned vector. */
5083 if (misalignment % 4 == 0)
5084 return true;
5085 }
5086 return false;
5087 }
5088
5089 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5090 static int
5091 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5092 tree vectype, int misalign)
5093 {
5094 unsigned elements;
5095 tree elem_type;
5096
5097 switch (type_of_cost)
5098 {
5099 case scalar_stmt:
5100 case scalar_load:
5101 case scalar_store:
5102 case vector_stmt:
5103 case vector_load:
5104 case vector_store:
5105 case vec_to_scalar:
5106 case scalar_to_vec:
5107 case cond_branch_not_taken:
5108 return 1;
5109
5110 case vec_perm:
5111 if (TARGET_VSX)
5112 return 3;
5113 else
5114 return 1;
5115
5116 case vec_promote_demote:
5117 if (TARGET_VSX)
5118 return 4;
5119 else
5120 return 1;
5121
5122 case cond_branch_taken:
5123 return 3;
5124
5125 case unaligned_load:
5126 case vector_gather_load:
5127 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5128 return 1;
5129
5130 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5131 {
5132 elements = TYPE_VECTOR_SUBPARTS (vectype);
5133 if (elements == 2)
5134 /* Double word aligned. */
5135 return 2;
5136
5137 if (elements == 4)
5138 {
5139 switch (misalign)
5140 {
5141 case 8:
5142 /* Double word aligned. */
5143 return 2;
5144
5145 case -1:
5146 /* Unknown misalignment. */
5147 case 4:
5148 case 12:
5149 /* Word aligned. */
5150 return 22;
5151
5152 default:
5153 gcc_unreachable ();
5154 }
5155 }
5156 }
5157
5158 if (TARGET_ALTIVEC)
5159 /* Misaligned loads are not supported. */
5160 gcc_unreachable ();
5161
5162 return 2;
5163
5164 case unaligned_store:
5165 case vector_scatter_store:
5166 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5167 return 1;
5168
5169 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5170 {
5171 elements = TYPE_VECTOR_SUBPARTS (vectype);
5172 if (elements == 2)
5173 /* Double word aligned. */
5174 return 2;
5175
5176 if (elements == 4)
5177 {
5178 switch (misalign)
5179 {
5180 case 8:
5181 /* Double word aligned. */
5182 return 2;
5183
5184 case -1:
5185 /* Unknown misalignment. */
5186 case 4:
5187 case 12:
5188 /* Word aligned. */
5189 return 23;
5190
5191 default:
5192 gcc_unreachable ();
5193 }
5194 }
5195 }
5196
5197 if (TARGET_ALTIVEC)
5198 /* Misaligned stores are not supported. */
5199 gcc_unreachable ();
5200
5201 return 2;
5202
5203 case vec_construct:
5204 /* This is a rough approximation assuming non-constant elements
5205 constructed into a vector via element insertion. FIXME:
5206 vec_construct is not granular enough for uniformly good
5207 decisions. If the initialization is a splat, this is
5208 cheaper than we estimate. Improve this someday. */
5209 elem_type = TREE_TYPE (vectype);
5210 /* 32-bit vectors loaded into registers are stored as double
5211 precision, so we need 2 permutes, 2 converts, and 1 merge
5212 to construct a vector of short floats from them. */
5213 if (SCALAR_FLOAT_TYPE_P (elem_type)
5214 && TYPE_PRECISION (elem_type) == 32)
5215 return 5;
5216 /* On POWER9, integer vector types are built up in GPRs and then
5217 use a direct move (2 cycles). For POWER8 this is even worse,
5218 as we need two direct moves and a merge, and the direct moves
5219 are five cycles. */
5220 else if (INTEGRAL_TYPE_P (elem_type))
5221 {
5222 if (TARGET_P9_VECTOR)
5223 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5224 else
5225 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5226 }
5227 else
5228 /* V2DFmode doesn't need a direct move. */
5229 return 2;
5230
5231 default:
5232 gcc_unreachable ();
5233 }
5234 }
5235
5236 /* Implement targetm.vectorize.preferred_simd_mode. */
5237
5238 static machine_mode
5239 rs6000_preferred_simd_mode (scalar_mode mode)
5240 {
5241 if (TARGET_VSX)
5242 switch (mode)
5243 {
5244 case E_DFmode:
5245 return V2DFmode;
5246 default:;
5247 }
5248 if (TARGET_ALTIVEC || TARGET_VSX)
5249 switch (mode)
5250 {
5251 case E_SFmode:
5252 return V4SFmode;
5253 case E_TImode:
5254 return V1TImode;
5255 case E_DImode:
5256 return V2DImode;
5257 case E_SImode:
5258 return V4SImode;
5259 case E_HImode:
5260 return V8HImode;
5261 case E_QImode:
5262 return V16QImode;
5263 default:;
5264 }
5265 return word_mode;
5266 }
5267
5268 typedef struct _rs6000_cost_data
5269 {
5270 struct loop *loop_info;
5271 unsigned cost[3];
5272 } rs6000_cost_data;
5273
5274 /* Test for likely overcommitment of vector hardware resources. If a
5275 loop iteration is relatively large, and too large a percentage of
5276 instructions in the loop are vectorized, the cost model may not
5277 adequately reflect delays from unavailable vector resources.
5278 Penalize the loop body cost for this case. */
5279
5280 static void
5281 rs6000_density_test (rs6000_cost_data *data)
5282 {
5283 const int DENSITY_PCT_THRESHOLD = 85;
5284 const int DENSITY_SIZE_THRESHOLD = 70;
5285 const int DENSITY_PENALTY = 10;
5286 struct loop *loop = data->loop_info;
5287 basic_block *bbs = get_loop_body (loop);
5288 int nbbs = loop->num_nodes;
5289 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5290 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5291 int i, density_pct;
5292
5293 for (i = 0; i < nbbs; i++)
5294 {
5295 basic_block bb = bbs[i];
5296 gimple_stmt_iterator gsi;
5297
5298 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5299 {
5300 gimple *stmt = gsi_stmt (gsi);
5301 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5302
5303 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5304 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5305 not_vec_cost++;
5306 }
5307 }
5308
5309 free (bbs);
5310 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5311
5312 if (density_pct > DENSITY_PCT_THRESHOLD
5313 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5314 {
5315 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5316 if (dump_enabled_p ())
5317 dump_printf_loc (MSG_NOTE, vect_location,
5318 "density %d%%, cost %d exceeds threshold, penalizing "
5319 "loop body cost by %d%%", density_pct,
5320 vec_cost + not_vec_cost, DENSITY_PENALTY);
5321 }
5322 }
5323
5324 /* Implement targetm.vectorize.init_cost. */
5325
5326 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5327 instruction is needed by the vectorization. */
5328 static bool rs6000_vect_nonmem;
5329
5330 static void *
5331 rs6000_init_cost (struct loop *loop_info)
5332 {
5333 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5334 data->loop_info = loop_info;
5335 data->cost[vect_prologue] = 0;
5336 data->cost[vect_body] = 0;
5337 data->cost[vect_epilogue] = 0;
5338 rs6000_vect_nonmem = false;
5339 return data;
5340 }
5341
5342 /* Implement targetm.vectorize.add_stmt_cost. */
5343
5344 static unsigned
5345 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5346 struct _stmt_vec_info *stmt_info, int misalign,
5347 enum vect_cost_model_location where)
5348 {
5349 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5350 unsigned retval = 0;
5351
5352 if (flag_vect_cost_model)
5353 {
5354 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5355 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5356 misalign);
5357 /* Statements in an inner loop relative to the loop being
5358 vectorized are weighted more heavily. The value here is
5359 arbitrary and could potentially be improved with analysis. */
5360 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5361 count *= 50; /* FIXME. */
5362
5363 retval = (unsigned) (count * stmt_cost);
5364 cost_data->cost[where] += retval;
5365
5366 /* Check whether we're doing something other than just a copy loop.
5367 Not all such loops may be profitably vectorized; see
5368 rs6000_finish_cost. */
5369 if ((kind == vec_to_scalar || kind == vec_perm
5370 || kind == vec_promote_demote || kind == vec_construct
5371 || kind == scalar_to_vec)
5372 || (where == vect_body && kind == vector_stmt))
5373 rs6000_vect_nonmem = true;
5374 }
5375
5376 return retval;
5377 }
5378
5379 /* Implement targetm.vectorize.finish_cost. */
5380
5381 static void
5382 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5383 unsigned *body_cost, unsigned *epilogue_cost)
5384 {
5385 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5386
5387 if (cost_data->loop_info)
5388 rs6000_density_test (cost_data);
5389
5390 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5391 that require versioning for any reason. The vectorization is at
5392 best a wash inside the loop, and the versioning checks make
5393 profitability highly unlikely and potentially quite harmful. */
5394 if (cost_data->loop_info)
5395 {
5396 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5397 if (!rs6000_vect_nonmem
5398 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5399 && LOOP_REQUIRES_VERSIONING (vec_info))
5400 cost_data->cost[vect_body] += 10000;
5401 }
5402
5403 *prologue_cost = cost_data->cost[vect_prologue];
5404 *body_cost = cost_data->cost[vect_body];
5405 *epilogue_cost = cost_data->cost[vect_epilogue];
5406 }
5407
5408 /* Implement targetm.vectorize.destroy_cost_data. */
5409
5410 static void
5411 rs6000_destroy_cost_data (void *data)
5412 {
5413 free (data);
5414 }
5415
5416 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5417 library with vectorized intrinsics. */
5418
5419 static tree
5420 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5421 tree type_in)
5422 {
5423 char name[32];
5424 const char *suffix = NULL;
5425 tree fntype, new_fndecl, bdecl = NULL_TREE;
5426 int n_args = 1;
5427 const char *bname;
5428 machine_mode el_mode, in_mode;
5429 int n, in_n;
5430
5431 /* Libmass is suitable for unsafe math only as it does not correctly support
5432 parts of IEEE with the required precision such as denormals. Only support
5433 it if we have VSX to use the simd d2 or f4 functions.
5434 XXX: Add variable length support. */
5435 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5436 return NULL_TREE;
5437
5438 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5439 n = TYPE_VECTOR_SUBPARTS (type_out);
5440 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5441 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5442 if (el_mode != in_mode
5443 || n != in_n)
5444 return NULL_TREE;
5445
5446 switch (fn)
5447 {
5448 CASE_CFN_ATAN2:
5449 CASE_CFN_HYPOT:
5450 CASE_CFN_POW:
5451 n_args = 2;
5452 gcc_fallthrough ();
5453
5454 CASE_CFN_ACOS:
5455 CASE_CFN_ACOSH:
5456 CASE_CFN_ASIN:
5457 CASE_CFN_ASINH:
5458 CASE_CFN_ATAN:
5459 CASE_CFN_ATANH:
5460 CASE_CFN_CBRT:
5461 CASE_CFN_COS:
5462 CASE_CFN_COSH:
5463 CASE_CFN_ERF:
5464 CASE_CFN_ERFC:
5465 CASE_CFN_EXP2:
5466 CASE_CFN_EXP:
5467 CASE_CFN_EXPM1:
5468 CASE_CFN_LGAMMA:
5469 CASE_CFN_LOG10:
5470 CASE_CFN_LOG1P:
5471 CASE_CFN_LOG2:
5472 CASE_CFN_LOG:
5473 CASE_CFN_SIN:
5474 CASE_CFN_SINH:
5475 CASE_CFN_SQRT:
5476 CASE_CFN_TAN:
5477 CASE_CFN_TANH:
5478 if (el_mode == DFmode && n == 2)
5479 {
5480 bdecl = mathfn_built_in (double_type_node, fn);
5481 suffix = "d2"; /* pow -> powd2 */
5482 }
5483 else if (el_mode == SFmode && n == 4)
5484 {
5485 bdecl = mathfn_built_in (float_type_node, fn);
5486 suffix = "4"; /* powf -> powf4 */
5487 }
5488 else
5489 return NULL_TREE;
5490 if (!bdecl)
5491 return NULL_TREE;
5492 break;
5493
5494 default:
5495 return NULL_TREE;
5496 }
5497
5498 gcc_assert (suffix != NULL);
5499 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5500 if (!bname)
5501 return NULL_TREE;
5502
5503 strcpy (name, bname + sizeof ("__builtin_") - 1);
5504 strcat (name, suffix);
5505
5506 if (n_args == 1)
5507 fntype = build_function_type_list (type_out, type_in, NULL);
5508 else if (n_args == 2)
5509 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5510 else
5511 gcc_unreachable ();
5512
5513 /* Build a function declaration for the vectorized function. */
5514 new_fndecl = build_decl (BUILTINS_LOCATION,
5515 FUNCTION_DECL, get_identifier (name), fntype);
5516 TREE_PUBLIC (new_fndecl) = 1;
5517 DECL_EXTERNAL (new_fndecl) = 1;
5518 DECL_IS_NOVOPS (new_fndecl) = 1;
5519 TREE_READONLY (new_fndecl) = 1;
5520
5521 return new_fndecl;
5522 }
5523
5524 /* Returns a function decl for a vectorized version of the builtin function
5525 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5526 if it is not available. */
5527
5528 static tree
5529 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5530 tree type_in)
5531 {
5532 machine_mode in_mode, out_mode;
5533 int in_n, out_n;
5534
5535 if (TARGET_DEBUG_BUILTIN)
5536 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5537 combined_fn_name (combined_fn (fn)),
5538 GET_MODE_NAME (TYPE_MODE (type_out)),
5539 GET_MODE_NAME (TYPE_MODE (type_in)));
5540
5541 if (TREE_CODE (type_out) != VECTOR_TYPE
5542 || TREE_CODE (type_in) != VECTOR_TYPE)
5543 return NULL_TREE;
5544
5545 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5546 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5547 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5548 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5549
5550 switch (fn)
5551 {
5552 CASE_CFN_COPYSIGN:
5553 if (VECTOR_UNIT_VSX_P (V2DFmode)
5554 && out_mode == DFmode && out_n == 2
5555 && in_mode == DFmode && in_n == 2)
5556 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5557 if (VECTOR_UNIT_VSX_P (V4SFmode)
5558 && out_mode == SFmode && out_n == 4
5559 && in_mode == SFmode && in_n == 4)
5560 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5561 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5562 && out_mode == SFmode && out_n == 4
5563 && in_mode == SFmode && in_n == 4)
5564 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5565 break;
5566 CASE_CFN_CEIL:
5567 if (VECTOR_UNIT_VSX_P (V2DFmode)
5568 && out_mode == DFmode && out_n == 2
5569 && in_mode == DFmode && in_n == 2)
5570 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5571 if (VECTOR_UNIT_VSX_P (V4SFmode)
5572 && out_mode == SFmode && out_n == 4
5573 && in_mode == SFmode && in_n == 4)
5574 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5575 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5576 && out_mode == SFmode && out_n == 4
5577 && in_mode == SFmode && in_n == 4)
5578 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5579 break;
5580 CASE_CFN_FLOOR:
5581 if (VECTOR_UNIT_VSX_P (V2DFmode)
5582 && out_mode == DFmode && out_n == 2
5583 && in_mode == DFmode && in_n == 2)
5584 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5585 if (VECTOR_UNIT_VSX_P (V4SFmode)
5586 && out_mode == SFmode && out_n == 4
5587 && in_mode == SFmode && in_n == 4)
5588 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5589 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5590 && out_mode == SFmode && out_n == 4
5591 && in_mode == SFmode && in_n == 4)
5592 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5593 break;
5594 CASE_CFN_FMA:
5595 if (VECTOR_UNIT_VSX_P (V2DFmode)
5596 && out_mode == DFmode && out_n == 2
5597 && in_mode == DFmode && in_n == 2)
5598 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5599 if (VECTOR_UNIT_VSX_P (V4SFmode)
5600 && out_mode == SFmode && out_n == 4
5601 && in_mode == SFmode && in_n == 4)
5602 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5603 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5604 && out_mode == SFmode && out_n == 4
5605 && in_mode == SFmode && in_n == 4)
5606 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5607 break;
5608 CASE_CFN_TRUNC:
5609 if (VECTOR_UNIT_VSX_P (V2DFmode)
5610 && out_mode == DFmode && out_n == 2
5611 && in_mode == DFmode && in_n == 2)
5612 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5613 if (VECTOR_UNIT_VSX_P (V4SFmode)
5614 && out_mode == SFmode && out_n == 4
5615 && in_mode == SFmode && in_n == 4)
5616 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5617 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5618 && out_mode == SFmode && out_n == 4
5619 && in_mode == SFmode && in_n == 4)
5620 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5621 break;
5622 CASE_CFN_NEARBYINT:
5623 if (VECTOR_UNIT_VSX_P (V2DFmode)
5624 && flag_unsafe_math_optimizations
5625 && out_mode == DFmode && out_n == 2
5626 && in_mode == DFmode && in_n == 2)
5627 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5628 if (VECTOR_UNIT_VSX_P (V4SFmode)
5629 && flag_unsafe_math_optimizations
5630 && out_mode == SFmode && out_n == 4
5631 && in_mode == SFmode && in_n == 4)
5632 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5633 break;
5634 CASE_CFN_RINT:
5635 if (VECTOR_UNIT_VSX_P (V2DFmode)
5636 && !flag_trapping_math
5637 && out_mode == DFmode && out_n == 2
5638 && in_mode == DFmode && in_n == 2)
5639 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5640 if (VECTOR_UNIT_VSX_P (V4SFmode)
5641 && !flag_trapping_math
5642 && out_mode == SFmode && out_n == 4
5643 && in_mode == SFmode && in_n == 4)
5644 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5645 break;
5646 default:
5647 break;
5648 }
5649
5650 /* Generate calls to libmass if appropriate. */
5651 if (rs6000_veclib_handler)
5652 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5653
5654 return NULL_TREE;
5655 }
5656
5657 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5658
5659 static tree
5660 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5661 tree type_in)
5662 {
5663 machine_mode in_mode, out_mode;
5664 int in_n, out_n;
5665
5666 if (TARGET_DEBUG_BUILTIN)
5667 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5668 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5669 GET_MODE_NAME (TYPE_MODE (type_out)),
5670 GET_MODE_NAME (TYPE_MODE (type_in)));
5671
5672 if (TREE_CODE (type_out) != VECTOR_TYPE
5673 || TREE_CODE (type_in) != VECTOR_TYPE)
5674 return NULL_TREE;
5675
5676 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5677 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5678 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5679 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5680
5681 enum rs6000_builtins fn
5682 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5683 switch (fn)
5684 {
5685 case RS6000_BUILTIN_RSQRTF:
5686 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5687 && out_mode == SFmode && out_n == 4
5688 && in_mode == SFmode && in_n == 4)
5689 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5690 break;
5691 case RS6000_BUILTIN_RSQRT:
5692 if (VECTOR_UNIT_VSX_P (V2DFmode)
5693 && out_mode == DFmode && out_n == 2
5694 && in_mode == DFmode && in_n == 2)
5695 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5696 break;
5697 case RS6000_BUILTIN_RECIPF:
5698 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5699 && out_mode == SFmode && out_n == 4
5700 && in_mode == SFmode && in_n == 4)
5701 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5702 break;
5703 case RS6000_BUILTIN_RECIP:
5704 if (VECTOR_UNIT_VSX_P (V2DFmode)
5705 && out_mode == DFmode && out_n == 2
5706 && in_mode == DFmode && in_n == 2)
5707 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5708 break;
5709 default:
5710 break;
5711 }
5712 return NULL_TREE;
5713 }
5714 \f
5715 /* Default CPU string for rs6000*_file_start functions. */
5716 static const char *rs6000_default_cpu;
5717
5718 /* Do anything needed at the start of the asm file. */
5719
5720 static void
5721 rs6000_file_start (void)
5722 {
5723 char buffer[80];
5724 const char *start = buffer;
5725 FILE *file = asm_out_file;
5726
5727 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5728
5729 default_file_start ();
5730
5731 if (flag_verbose_asm)
5732 {
5733 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5734
5735 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5736 {
5737 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5738 start = "";
5739 }
5740
5741 if (global_options_set.x_rs6000_cpu_index)
5742 {
5743 fprintf (file, "%s -mcpu=%s", start,
5744 processor_target_table[rs6000_cpu_index].name);
5745 start = "";
5746 }
5747
5748 if (global_options_set.x_rs6000_tune_index)
5749 {
5750 fprintf (file, "%s -mtune=%s", start,
5751 processor_target_table[rs6000_tune_index].name);
5752 start = "";
5753 }
5754
5755 if (PPC405_ERRATUM77)
5756 {
5757 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5758 start = "";
5759 }
5760
5761 #ifdef USING_ELFOS_H
5762 switch (rs6000_sdata)
5763 {
5764 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5765 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5766 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5767 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5768 }
5769
5770 if (rs6000_sdata && g_switch_value)
5771 {
5772 fprintf (file, "%s -G %d", start,
5773 g_switch_value);
5774 start = "";
5775 }
5776 #endif
5777
5778 if (*start == '\0')
5779 putc ('\n', file);
5780 }
5781
5782 #ifdef USING_ELFOS_H
5783 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5784 && !global_options_set.x_rs6000_cpu_index)
5785 {
5786 fputs ("\t.machine ", asm_out_file);
5787 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5788 fputs ("power9\n", asm_out_file);
5789 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5790 fputs ("power8\n", asm_out_file);
5791 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5792 fputs ("power7\n", asm_out_file);
5793 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5794 fputs ("power6\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5796 fputs ("power5\n", asm_out_file);
5797 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5798 fputs ("power4\n", asm_out_file);
5799 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5800 fputs ("ppc64\n", asm_out_file);
5801 else
5802 fputs ("ppc\n", asm_out_file);
5803 }
5804 #endif
5805
5806 if (DEFAULT_ABI == ABI_ELFv2)
5807 fprintf (file, "\t.abiversion 2\n");
5808 }
5809
5810 \f
5811 /* Return nonzero if this function is known to have a null epilogue. */
5812
5813 int
5814 direct_return (void)
5815 {
5816 if (reload_completed)
5817 {
5818 rs6000_stack_t *info = rs6000_stack_info ();
5819
5820 if (info->first_gp_reg_save == 32
5821 && info->first_fp_reg_save == 64
5822 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5823 && ! info->lr_save_p
5824 && ! info->cr_save_p
5825 && info->vrsave_size == 0
5826 && ! info->push_p)
5827 return 1;
5828 }
5829
5830 return 0;
5831 }
5832
5833 /* Helper for num_insns_constant. Calculate number of instructions to
5834 load VALUE to a single gpr using combinations of addi, addis, ori,
5835 oris and sldi instructions. */
5836
5837 static int
5838 num_insns_constant_gpr (HOST_WIDE_INT value)
5839 {
5840 /* signed constant loadable with addi */
5841 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5842 return 1;
5843
5844 /* constant loadable with addis */
5845 else if ((value & 0xffff) == 0
5846 && (value >> 31 == -1 || value >> 31 == 0))
5847 return 1;
5848
5849 else if (TARGET_POWERPC64)
5850 {
5851 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5852 HOST_WIDE_INT high = value >> 31;
5853
5854 if (high == 0 || high == -1)
5855 return 2;
5856
5857 high >>= 1;
5858
5859 if (low == 0)
5860 return num_insns_constant_gpr (high) + 1;
5861 else if (high == 0)
5862 return num_insns_constant_gpr (low) + 1;
5863 else
5864 return (num_insns_constant_gpr (high)
5865 + num_insns_constant_gpr (low) + 1);
5866 }
5867
5868 else
5869 return 2;
5870 }
5871
5872 /* Helper for num_insns_constant. Allow constants formed by the
5873 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5874 and handle modes that require multiple gprs. */
5875
5876 static int
5877 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5878 {
5879 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5880 int total = 0;
5881 while (nregs-- > 0)
5882 {
5883 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5884 int insns = num_insns_constant_gpr (low);
5885 if (insns > 2
5886 /* We won't get more than 2 from num_insns_constant_gpr
5887 except when TARGET_POWERPC64 and mode is DImode or
5888 wider, so the register mode must be DImode. */
5889 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5890 insns = 2;
5891 total += insns;
5892 value >>= BITS_PER_WORD;
5893 }
5894 return total;
5895 }
5896
5897 /* Return the number of instructions it takes to form a constant in as
5898 many gprs are needed for MODE. */
5899
5900 int
5901 num_insns_constant (rtx op, machine_mode mode)
5902 {
5903 HOST_WIDE_INT val;
5904
5905 switch (GET_CODE (op))
5906 {
5907 case CONST_INT:
5908 val = INTVAL (op);
5909 break;
5910
5911 case CONST_WIDE_INT:
5912 {
5913 int insns = 0;
5914 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5915 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5916 DImode);
5917 return insns;
5918 }
5919
5920 case CONST_DOUBLE:
5921 {
5922 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5923
5924 if (mode == SFmode || mode == SDmode)
5925 {
5926 long l;
5927
5928 if (mode == SDmode)
5929 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5930 else
5931 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5932 /* See the first define_split in rs6000.md handling a
5933 const_double_operand. */
5934 val = l;
5935 mode = SImode;
5936 }
5937 else if (mode == DFmode || mode == DDmode)
5938 {
5939 long l[2];
5940
5941 if (mode == DDmode)
5942 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5943 else
5944 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5945
5946 /* See the second (32-bit) and third (64-bit) define_split
5947 in rs6000.md handling a const_double_operand. */
5948 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5949 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5950 mode = DImode;
5951 }
5952 else if (mode == TFmode || mode == TDmode
5953 || mode == KFmode || mode == IFmode)
5954 {
5955 long l[4];
5956 int insns;
5957
5958 if (mode == TDmode)
5959 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5960 else
5961 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5962
5963 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5964 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5965 insns = num_insns_constant_multi (val, DImode);
5966 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5967 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5968 insns += num_insns_constant_multi (val, DImode);
5969 return insns;
5970 }
5971 else
5972 gcc_unreachable ();
5973 }
5974 break;
5975
5976 default:
5977 gcc_unreachable ();
5978 }
5979
5980 return num_insns_constant_multi (val, mode);
5981 }
5982
5983 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5984 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5985 corresponding element of the vector, but for V4SFmode, the
5986 corresponding "float" is interpreted as an SImode integer. */
5987
5988 HOST_WIDE_INT
5989 const_vector_elt_as_int (rtx op, unsigned int elt)
5990 {
5991 rtx tmp;
5992
5993 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5994 gcc_assert (GET_MODE (op) != V2DImode
5995 && GET_MODE (op) != V2DFmode);
5996
5997 tmp = CONST_VECTOR_ELT (op, elt);
5998 if (GET_MODE (op) == V4SFmode)
5999 tmp = gen_lowpart (SImode, tmp);
6000 return INTVAL (tmp);
6001 }
6002
6003 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6004 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6005 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6006 all items are set to the same value and contain COPIES replicas of the
6007 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6008 operand and the others are set to the value of the operand's msb. */
6009
6010 static bool
6011 vspltis_constant (rtx op, unsigned step, unsigned copies)
6012 {
6013 machine_mode mode = GET_MODE (op);
6014 machine_mode inner = GET_MODE_INNER (mode);
6015
6016 unsigned i;
6017 unsigned nunits;
6018 unsigned bitsize;
6019 unsigned mask;
6020
6021 HOST_WIDE_INT val;
6022 HOST_WIDE_INT splat_val;
6023 HOST_WIDE_INT msb_val;
6024
6025 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6026 return false;
6027
6028 nunits = GET_MODE_NUNITS (mode);
6029 bitsize = GET_MODE_BITSIZE (inner);
6030 mask = GET_MODE_MASK (inner);
6031
6032 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6033 splat_val = val;
6034 msb_val = val >= 0 ? 0 : -1;
6035
6036 /* Construct the value to be splatted, if possible. If not, return 0. */
6037 for (i = 2; i <= copies; i *= 2)
6038 {
6039 HOST_WIDE_INT small_val;
6040 bitsize /= 2;
6041 small_val = splat_val >> bitsize;
6042 mask >>= bitsize;
6043 if (splat_val != ((HOST_WIDE_INT)
6044 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6045 | (small_val & mask)))
6046 return false;
6047 splat_val = small_val;
6048 }
6049
6050 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6051 if (EASY_VECTOR_15 (splat_val))
6052 ;
6053
6054 /* Also check if we can splat, and then add the result to itself. Do so if
6055 the value is positive, of if the splat instruction is using OP's mode;
6056 for splat_val < 0, the splat and the add should use the same mode. */
6057 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6058 && (splat_val >= 0 || (step == 1 && copies == 1)))
6059 ;
6060
6061 /* Also check if are loading up the most significant bit which can be done by
6062 loading up -1 and shifting the value left by -1. */
6063 else if (EASY_VECTOR_MSB (splat_val, inner))
6064 ;
6065
6066 else
6067 return false;
6068
6069 /* Check if VAL is present in every STEP-th element, and the
6070 other elements are filled with its most significant bit. */
6071 for (i = 1; i < nunits; ++i)
6072 {
6073 HOST_WIDE_INT desired_val;
6074 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6075 if ((i & (step - 1)) == 0)
6076 desired_val = val;
6077 else
6078 desired_val = msb_val;
6079
6080 if (desired_val != const_vector_elt_as_int (op, elt))
6081 return false;
6082 }
6083
6084 return true;
6085 }
6086
6087 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6088 instruction, filling in the bottom elements with 0 or -1.
6089
6090 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6091 for the number of zeroes to shift in, or negative for the number of 0xff
6092 bytes to shift in.
6093
6094 OP is a CONST_VECTOR. */
6095
6096 int
6097 vspltis_shifted (rtx op)
6098 {
6099 machine_mode mode = GET_MODE (op);
6100 machine_mode inner = GET_MODE_INNER (mode);
6101
6102 unsigned i, j;
6103 unsigned nunits;
6104 unsigned mask;
6105
6106 HOST_WIDE_INT val;
6107
6108 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6109 return false;
6110
6111 /* We need to create pseudo registers to do the shift, so don't recognize
6112 shift vector constants after reload. */
6113 if (!can_create_pseudo_p ())
6114 return false;
6115
6116 nunits = GET_MODE_NUNITS (mode);
6117 mask = GET_MODE_MASK (inner);
6118
6119 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6120
6121 /* Check if the value can really be the operand of a vspltis[bhw]. */
6122 if (EASY_VECTOR_15 (val))
6123 ;
6124
6125 /* Also check if we are loading up the most significant bit which can be done
6126 by loading up -1 and shifting the value left by -1. */
6127 else if (EASY_VECTOR_MSB (val, inner))
6128 ;
6129
6130 else
6131 return 0;
6132
6133 /* Check if VAL is present in every STEP-th element until we find elements
6134 that are 0 or all 1 bits. */
6135 for (i = 1; i < nunits; ++i)
6136 {
6137 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6138 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6139
6140 /* If the value isn't the splat value, check for the remaining elements
6141 being 0/-1. */
6142 if (val != elt_val)
6143 {
6144 if (elt_val == 0)
6145 {
6146 for (j = i+1; j < nunits; ++j)
6147 {
6148 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6149 if (const_vector_elt_as_int (op, elt2) != 0)
6150 return 0;
6151 }
6152
6153 return (nunits - i) * GET_MODE_SIZE (inner);
6154 }
6155
6156 else if ((elt_val & mask) == mask)
6157 {
6158 for (j = i+1; j < nunits; ++j)
6159 {
6160 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6161 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6162 return 0;
6163 }
6164
6165 return -((nunits - i) * GET_MODE_SIZE (inner));
6166 }
6167
6168 else
6169 return 0;
6170 }
6171 }
6172
6173 /* If all elements are equal, we don't need to do VLSDOI. */
6174 return 0;
6175 }
6176
6177
6178 /* Return true if OP is of the given MODE and can be synthesized
6179 with a vspltisb, vspltish or vspltisw. */
6180
6181 bool
6182 easy_altivec_constant (rtx op, machine_mode mode)
6183 {
6184 unsigned step, copies;
6185
6186 if (mode == VOIDmode)
6187 mode = GET_MODE (op);
6188 else if (mode != GET_MODE (op))
6189 return false;
6190
6191 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6192 constants. */
6193 if (mode == V2DFmode)
6194 return zero_constant (op, mode);
6195
6196 else if (mode == V2DImode)
6197 {
6198 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6199 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6200 return false;
6201
6202 if (zero_constant (op, mode))
6203 return true;
6204
6205 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6206 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6207 return true;
6208
6209 return false;
6210 }
6211
6212 /* V1TImode is a special container for TImode. Ignore for now. */
6213 else if (mode == V1TImode)
6214 return false;
6215
6216 /* Start with a vspltisw. */
6217 step = GET_MODE_NUNITS (mode) / 4;
6218 copies = 1;
6219
6220 if (vspltis_constant (op, step, copies))
6221 return true;
6222
6223 /* Then try with a vspltish. */
6224 if (step == 1)
6225 copies <<= 1;
6226 else
6227 step >>= 1;
6228
6229 if (vspltis_constant (op, step, copies))
6230 return true;
6231
6232 /* And finally a vspltisb. */
6233 if (step == 1)
6234 copies <<= 1;
6235 else
6236 step >>= 1;
6237
6238 if (vspltis_constant (op, step, copies))
6239 return true;
6240
6241 if (vspltis_shifted (op) != 0)
6242 return true;
6243
6244 return false;
6245 }
6246
6247 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6248 result is OP. Abort if it is not possible. */
6249
6250 rtx
6251 gen_easy_altivec_constant (rtx op)
6252 {
6253 machine_mode mode = GET_MODE (op);
6254 int nunits = GET_MODE_NUNITS (mode);
6255 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6256 unsigned step = nunits / 4;
6257 unsigned copies = 1;
6258
6259 /* Start with a vspltisw. */
6260 if (vspltis_constant (op, step, copies))
6261 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6262
6263 /* Then try with a vspltish. */
6264 if (step == 1)
6265 copies <<= 1;
6266 else
6267 step >>= 1;
6268
6269 if (vspltis_constant (op, step, copies))
6270 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6271
6272 /* And finally a vspltisb. */
6273 if (step == 1)
6274 copies <<= 1;
6275 else
6276 step >>= 1;
6277
6278 if (vspltis_constant (op, step, copies))
6279 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6280
6281 gcc_unreachable ();
6282 }
6283
6284 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6285 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6286
6287 Return the number of instructions needed (1 or 2) into the address pointed
6288 via NUM_INSNS_PTR.
6289
6290 Return the constant that is being split via CONSTANT_PTR. */
6291
6292 bool
6293 xxspltib_constant_p (rtx op,
6294 machine_mode mode,
6295 int *num_insns_ptr,
6296 int *constant_ptr)
6297 {
6298 size_t nunits = GET_MODE_NUNITS (mode);
6299 size_t i;
6300 HOST_WIDE_INT value;
6301 rtx element;
6302
6303 /* Set the returned values to out of bound values. */
6304 *num_insns_ptr = -1;
6305 *constant_ptr = 256;
6306
6307 if (!TARGET_P9_VECTOR)
6308 return false;
6309
6310 if (mode == VOIDmode)
6311 mode = GET_MODE (op);
6312
6313 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6314 return false;
6315
6316 /* Handle (vec_duplicate <constant>). */
6317 if (GET_CODE (op) == VEC_DUPLICATE)
6318 {
6319 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6320 && mode != V2DImode)
6321 return false;
6322
6323 element = XEXP (op, 0);
6324 if (!CONST_INT_P (element))
6325 return false;
6326
6327 value = INTVAL (element);
6328 if (!IN_RANGE (value, -128, 127))
6329 return false;
6330 }
6331
6332 /* Handle (const_vector [...]). */
6333 else if (GET_CODE (op) == CONST_VECTOR)
6334 {
6335 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6336 && mode != V2DImode)
6337 return false;
6338
6339 element = CONST_VECTOR_ELT (op, 0);
6340 if (!CONST_INT_P (element))
6341 return false;
6342
6343 value = INTVAL (element);
6344 if (!IN_RANGE (value, -128, 127))
6345 return false;
6346
6347 for (i = 1; i < nunits; i++)
6348 {
6349 element = CONST_VECTOR_ELT (op, i);
6350 if (!CONST_INT_P (element))
6351 return false;
6352
6353 if (value != INTVAL (element))
6354 return false;
6355 }
6356 }
6357
6358 /* Handle integer constants being loaded into the upper part of the VSX
6359 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6360 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6361 else if (CONST_INT_P (op))
6362 {
6363 if (!SCALAR_INT_MODE_P (mode))
6364 return false;
6365
6366 value = INTVAL (op);
6367 if (!IN_RANGE (value, -128, 127))
6368 return false;
6369
6370 if (!IN_RANGE (value, -1, 0))
6371 {
6372 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6373 return false;
6374
6375 if (EASY_VECTOR_15 (value))
6376 return false;
6377 }
6378 }
6379
6380 else
6381 return false;
6382
6383 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6384 sign extend. Special case 0/-1 to allow getting any VSX register instead
6385 of an Altivec register. */
6386 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6387 && EASY_VECTOR_15 (value))
6388 return false;
6389
6390 /* Return # of instructions and the constant byte for XXSPLTIB. */
6391 if (mode == V16QImode)
6392 *num_insns_ptr = 1;
6393
6394 else if (IN_RANGE (value, -1, 0))
6395 *num_insns_ptr = 1;
6396
6397 else
6398 *num_insns_ptr = 2;
6399
6400 *constant_ptr = (int) value;
6401 return true;
6402 }
6403
6404 const char *
6405 output_vec_const_move (rtx *operands)
6406 {
6407 int shift;
6408 machine_mode mode;
6409 rtx dest, vec;
6410
6411 dest = operands[0];
6412 vec = operands[1];
6413 mode = GET_MODE (dest);
6414
6415 if (TARGET_VSX)
6416 {
6417 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6418 int xxspltib_value = 256;
6419 int num_insns = -1;
6420
6421 if (zero_constant (vec, mode))
6422 {
6423 if (TARGET_P9_VECTOR)
6424 return "xxspltib %x0,0";
6425
6426 else if (dest_vmx_p)
6427 return "vspltisw %0,0";
6428
6429 else
6430 return "xxlxor %x0,%x0,%x0";
6431 }
6432
6433 if (all_ones_constant (vec, mode))
6434 {
6435 if (TARGET_P9_VECTOR)
6436 return "xxspltib %x0,255";
6437
6438 else if (dest_vmx_p)
6439 return "vspltisw %0,-1";
6440
6441 else if (TARGET_P8_VECTOR)
6442 return "xxlorc %x0,%x0,%x0";
6443
6444 else
6445 gcc_unreachable ();
6446 }
6447
6448 if (TARGET_P9_VECTOR
6449 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6450 {
6451 if (num_insns == 1)
6452 {
6453 operands[2] = GEN_INT (xxspltib_value & 0xff);
6454 return "xxspltib %x0,%2";
6455 }
6456
6457 return "#";
6458 }
6459 }
6460
6461 if (TARGET_ALTIVEC)
6462 {
6463 rtx splat_vec;
6464
6465 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6466 if (zero_constant (vec, mode))
6467 return "vspltisw %0,0";
6468
6469 if (all_ones_constant (vec, mode))
6470 return "vspltisw %0,-1";
6471
6472 /* Do we need to construct a value using VSLDOI? */
6473 shift = vspltis_shifted (vec);
6474 if (shift != 0)
6475 return "#";
6476
6477 splat_vec = gen_easy_altivec_constant (vec);
6478 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6479 operands[1] = XEXP (splat_vec, 0);
6480 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6481 return "#";
6482
6483 switch (GET_MODE (splat_vec))
6484 {
6485 case E_V4SImode:
6486 return "vspltisw %0,%1";
6487
6488 case E_V8HImode:
6489 return "vspltish %0,%1";
6490
6491 case E_V16QImode:
6492 return "vspltisb %0,%1";
6493
6494 default:
6495 gcc_unreachable ();
6496 }
6497 }
6498
6499 gcc_unreachable ();
6500 }
6501
6502 /* Initialize vector TARGET to VALS. */
6503
6504 void
6505 rs6000_expand_vector_init (rtx target, rtx vals)
6506 {
6507 machine_mode mode = GET_MODE (target);
6508 machine_mode inner_mode = GET_MODE_INNER (mode);
6509 int n_elts = GET_MODE_NUNITS (mode);
6510 int n_var = 0, one_var = -1;
6511 bool all_same = true, all_const_zero = true;
6512 rtx x, mem;
6513 int i;
6514
6515 for (i = 0; i < n_elts; ++i)
6516 {
6517 x = XVECEXP (vals, 0, i);
6518 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6519 ++n_var, one_var = i;
6520 else if (x != CONST0_RTX (inner_mode))
6521 all_const_zero = false;
6522
6523 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6524 all_same = false;
6525 }
6526
6527 if (n_var == 0)
6528 {
6529 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6530 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6531 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6532 {
6533 /* Zero register. */
6534 emit_move_insn (target, CONST0_RTX (mode));
6535 return;
6536 }
6537 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6538 {
6539 /* Splat immediate. */
6540 emit_insn (gen_rtx_SET (target, const_vec));
6541 return;
6542 }
6543 else
6544 {
6545 /* Load from constant pool. */
6546 emit_move_insn (target, const_vec);
6547 return;
6548 }
6549 }
6550
6551 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6552 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6553 {
6554 rtx op[2];
6555 size_t i;
6556 size_t num_elements = all_same ? 1 : 2;
6557 for (i = 0; i < num_elements; i++)
6558 {
6559 op[i] = XVECEXP (vals, 0, i);
6560 /* Just in case there is a SUBREG with a smaller mode, do a
6561 conversion. */
6562 if (GET_MODE (op[i]) != inner_mode)
6563 {
6564 rtx tmp = gen_reg_rtx (inner_mode);
6565 convert_move (tmp, op[i], 0);
6566 op[i] = tmp;
6567 }
6568 /* Allow load with splat double word. */
6569 else if (MEM_P (op[i]))
6570 {
6571 if (!all_same)
6572 op[i] = force_reg (inner_mode, op[i]);
6573 }
6574 else if (!REG_P (op[i]))
6575 op[i] = force_reg (inner_mode, op[i]);
6576 }
6577
6578 if (all_same)
6579 {
6580 if (mode == V2DFmode)
6581 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6582 else
6583 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6584 }
6585 else
6586 {
6587 if (mode == V2DFmode)
6588 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6589 else
6590 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6591 }
6592 return;
6593 }
6594
6595 /* Special case initializing vector int if we are on 64-bit systems with
6596 direct move or we have the ISA 3.0 instructions. */
6597 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6598 && TARGET_DIRECT_MOVE_64BIT)
6599 {
6600 if (all_same)
6601 {
6602 rtx element0 = XVECEXP (vals, 0, 0);
6603 if (MEM_P (element0))
6604 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6605 else
6606 element0 = force_reg (SImode, element0);
6607
6608 if (TARGET_P9_VECTOR)
6609 emit_insn (gen_vsx_splat_v4si (target, element0));
6610 else
6611 {
6612 rtx tmp = gen_reg_rtx (DImode);
6613 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6614 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6615 }
6616 return;
6617 }
6618 else
6619 {
6620 rtx elements[4];
6621 size_t i;
6622
6623 for (i = 0; i < 4; i++)
6624 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6625
6626 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6627 elements[2], elements[3]));
6628 return;
6629 }
6630 }
6631
6632 /* With single precision floating point on VSX, know that internally single
6633 precision is actually represented as a double, and either make 2 V2DF
6634 vectors, and convert these vectors to single precision, or do one
6635 conversion, and splat the result to the other elements. */
6636 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6637 {
6638 if (all_same)
6639 {
6640 rtx element0 = XVECEXP (vals, 0, 0);
6641
6642 if (TARGET_P9_VECTOR)
6643 {
6644 if (MEM_P (element0))
6645 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6646
6647 emit_insn (gen_vsx_splat_v4sf (target, element0));
6648 }
6649
6650 else
6651 {
6652 rtx freg = gen_reg_rtx (V4SFmode);
6653 rtx sreg = force_reg (SFmode, element0);
6654 rtx cvt = (TARGET_XSCVDPSPN
6655 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6656 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6657
6658 emit_insn (cvt);
6659 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6660 const0_rtx));
6661 }
6662 }
6663 else
6664 {
6665 rtx dbl_even = gen_reg_rtx (V2DFmode);
6666 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6667 rtx flt_even = gen_reg_rtx (V4SFmode);
6668 rtx flt_odd = gen_reg_rtx (V4SFmode);
6669 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6670 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6671 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6672 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6673
6674 /* Use VMRGEW if we can instead of doing a permute. */
6675 if (TARGET_P8_VECTOR)
6676 {
6677 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6678 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6679 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6680 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6681 if (BYTES_BIG_ENDIAN)
6682 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6683 else
6684 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6685 }
6686 else
6687 {
6688 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6689 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6690 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6691 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6692 rs6000_expand_extract_even (target, flt_even, flt_odd);
6693 }
6694 }
6695 return;
6696 }
6697
6698 /* Special case initializing vector short/char that are splats if we are on
6699 64-bit systems with direct move. */
6700 if (all_same && TARGET_DIRECT_MOVE_64BIT
6701 && (mode == V16QImode || mode == V8HImode))
6702 {
6703 rtx op0 = XVECEXP (vals, 0, 0);
6704 rtx di_tmp = gen_reg_rtx (DImode);
6705
6706 if (!REG_P (op0))
6707 op0 = force_reg (GET_MODE_INNER (mode), op0);
6708
6709 if (mode == V16QImode)
6710 {
6711 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6712 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6713 return;
6714 }
6715
6716 if (mode == V8HImode)
6717 {
6718 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6719 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6720 return;
6721 }
6722 }
6723
6724 /* Store value to stack temp. Load vector element. Splat. However, splat
6725 of 64-bit items is not supported on Altivec. */
6726 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6727 {
6728 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6729 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6730 XVECEXP (vals, 0, 0));
6731 x = gen_rtx_UNSPEC (VOIDmode,
6732 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6733 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6734 gen_rtvec (2,
6735 gen_rtx_SET (target, mem),
6736 x)));
6737 x = gen_rtx_VEC_SELECT (inner_mode, target,
6738 gen_rtx_PARALLEL (VOIDmode,
6739 gen_rtvec (1, const0_rtx)));
6740 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6741 return;
6742 }
6743
6744 /* One field is non-constant. Load constant then overwrite
6745 varying field. */
6746 if (n_var == 1)
6747 {
6748 rtx copy = copy_rtx (vals);
6749
6750 /* Load constant part of vector, substitute neighboring value for
6751 varying element. */
6752 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6753 rs6000_expand_vector_init (target, copy);
6754
6755 /* Insert variable. */
6756 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6757 return;
6758 }
6759
6760 /* Construct the vector in memory one field at a time
6761 and load the whole vector. */
6762 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6763 for (i = 0; i < n_elts; i++)
6764 emit_move_insn (adjust_address_nv (mem, inner_mode,
6765 i * GET_MODE_SIZE (inner_mode)),
6766 XVECEXP (vals, 0, i));
6767 emit_move_insn (target, mem);
6768 }
6769
6770 /* Set field ELT of TARGET to VAL. */
6771
6772 void
6773 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6774 {
6775 machine_mode mode = GET_MODE (target);
6776 machine_mode inner_mode = GET_MODE_INNER (mode);
6777 rtx reg = gen_reg_rtx (mode);
6778 rtx mask, mem, x;
6779 int width = GET_MODE_SIZE (inner_mode);
6780 int i;
6781
6782 val = force_reg (GET_MODE (val), val);
6783
6784 if (VECTOR_MEM_VSX_P (mode))
6785 {
6786 rtx insn = NULL_RTX;
6787 rtx elt_rtx = GEN_INT (elt);
6788
6789 if (mode == V2DFmode)
6790 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6791
6792 else if (mode == V2DImode)
6793 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6794
6795 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6796 {
6797 if (mode == V4SImode)
6798 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6799 else if (mode == V8HImode)
6800 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6801 else if (mode == V16QImode)
6802 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6803 else if (mode == V4SFmode)
6804 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6805 }
6806
6807 if (insn)
6808 {
6809 emit_insn (insn);
6810 return;
6811 }
6812 }
6813
6814 /* Simplify setting single element vectors like V1TImode. */
6815 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6816 {
6817 emit_move_insn (target, gen_lowpart (mode, val));
6818 return;
6819 }
6820
6821 /* Load single variable value. */
6822 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6823 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6824 x = gen_rtx_UNSPEC (VOIDmode,
6825 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6826 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6827 gen_rtvec (2,
6828 gen_rtx_SET (reg, mem),
6829 x)));
6830
6831 /* Linear sequence. */
6832 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6833 for (i = 0; i < 16; ++i)
6834 XVECEXP (mask, 0, i) = GEN_INT (i);
6835
6836 /* Set permute mask to insert element into target. */
6837 for (i = 0; i < width; ++i)
6838 XVECEXP (mask, 0, elt*width + i)
6839 = GEN_INT (i + 0x10);
6840 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6841
6842 if (BYTES_BIG_ENDIAN)
6843 x = gen_rtx_UNSPEC (mode,
6844 gen_rtvec (3, target, reg,
6845 force_reg (V16QImode, x)),
6846 UNSPEC_VPERM);
6847 else
6848 {
6849 if (TARGET_P9_VECTOR)
6850 x = gen_rtx_UNSPEC (mode,
6851 gen_rtvec (3, reg, target,
6852 force_reg (V16QImode, x)),
6853 UNSPEC_VPERMR);
6854 else
6855 {
6856 /* Invert selector. We prefer to generate VNAND on P8 so
6857 that future fusion opportunities can kick in, but must
6858 generate VNOR elsewhere. */
6859 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6860 rtx iorx = (TARGET_P8_VECTOR
6861 ? gen_rtx_IOR (V16QImode, notx, notx)
6862 : gen_rtx_AND (V16QImode, notx, notx));
6863 rtx tmp = gen_reg_rtx (V16QImode);
6864 emit_insn (gen_rtx_SET (tmp, iorx));
6865
6866 /* Permute with operands reversed and adjusted selector. */
6867 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6868 UNSPEC_VPERM);
6869 }
6870 }
6871
6872 emit_insn (gen_rtx_SET (target, x));
6873 }
6874
6875 /* Extract field ELT from VEC into TARGET. */
6876
6877 void
6878 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6879 {
6880 machine_mode mode = GET_MODE (vec);
6881 machine_mode inner_mode = GET_MODE_INNER (mode);
6882 rtx mem;
6883
6884 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6885 {
6886 switch (mode)
6887 {
6888 default:
6889 break;
6890 case E_V1TImode:
6891 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6892 emit_move_insn (target, gen_lowpart (TImode, vec));
6893 break;
6894 case E_V2DFmode:
6895 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6896 return;
6897 case E_V2DImode:
6898 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6899 return;
6900 case E_V4SFmode:
6901 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6902 return;
6903 case E_V16QImode:
6904 if (TARGET_DIRECT_MOVE_64BIT)
6905 {
6906 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6907 return;
6908 }
6909 else
6910 break;
6911 case E_V8HImode:
6912 if (TARGET_DIRECT_MOVE_64BIT)
6913 {
6914 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6915 return;
6916 }
6917 else
6918 break;
6919 case E_V4SImode:
6920 if (TARGET_DIRECT_MOVE_64BIT)
6921 {
6922 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6923 return;
6924 }
6925 break;
6926 }
6927 }
6928 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6929 && TARGET_DIRECT_MOVE_64BIT)
6930 {
6931 if (GET_MODE (elt) != DImode)
6932 {
6933 rtx tmp = gen_reg_rtx (DImode);
6934 convert_move (tmp, elt, 0);
6935 elt = tmp;
6936 }
6937 else if (!REG_P (elt))
6938 elt = force_reg (DImode, elt);
6939
6940 switch (mode)
6941 {
6942 case E_V2DFmode:
6943 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6944 return;
6945
6946 case E_V2DImode:
6947 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6948 return;
6949
6950 case E_V4SFmode:
6951 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6952 return;
6953
6954 case E_V4SImode:
6955 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6956 return;
6957
6958 case E_V8HImode:
6959 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6960 return;
6961
6962 case E_V16QImode:
6963 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6964 return;
6965
6966 default:
6967 gcc_unreachable ();
6968 }
6969 }
6970
6971 gcc_assert (CONST_INT_P (elt));
6972
6973 /* Allocate mode-sized buffer. */
6974 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6975
6976 emit_move_insn (mem, vec);
6977
6978 /* Add offset to field within buffer matching vector element. */
6979 mem = adjust_address_nv (mem, inner_mode,
6980 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6981
6982 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6983 }
6984
6985 /* Helper function to return the register number of a RTX. */
6986 static inline int
6987 regno_or_subregno (rtx op)
6988 {
6989 if (REG_P (op))
6990 return REGNO (op);
6991 else if (SUBREG_P (op))
6992 return subreg_regno (op);
6993 else
6994 gcc_unreachable ();
6995 }
6996
6997 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6998 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6999 temporary (BASE_TMP) to fixup the address. Return the new memory address
7000 that is valid for reads or writes to a given register (SCALAR_REG). */
7001
7002 rtx
7003 rs6000_adjust_vec_address (rtx scalar_reg,
7004 rtx mem,
7005 rtx element,
7006 rtx base_tmp,
7007 machine_mode scalar_mode)
7008 {
7009 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7010 rtx addr = XEXP (mem, 0);
7011 rtx element_offset;
7012 rtx new_addr;
7013 bool valid_addr_p;
7014
7015 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7016 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7017
7018 /* Calculate what we need to add to the address to get the element
7019 address. */
7020 if (CONST_INT_P (element))
7021 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7022 else
7023 {
7024 int byte_shift = exact_log2 (scalar_size);
7025 gcc_assert (byte_shift >= 0);
7026
7027 if (byte_shift == 0)
7028 element_offset = element;
7029
7030 else
7031 {
7032 if (TARGET_POWERPC64)
7033 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7034 else
7035 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7036
7037 element_offset = base_tmp;
7038 }
7039 }
7040
7041 /* Create the new address pointing to the element within the vector. If we
7042 are adding 0, we don't have to change the address. */
7043 if (element_offset == const0_rtx)
7044 new_addr = addr;
7045
7046 /* A simple indirect address can be converted into a reg + offset
7047 address. */
7048 else if (REG_P (addr) || SUBREG_P (addr))
7049 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7050
7051 /* Optimize D-FORM addresses with constant offset with a constant element, to
7052 include the element offset in the address directly. */
7053 else if (GET_CODE (addr) == PLUS)
7054 {
7055 rtx op0 = XEXP (addr, 0);
7056 rtx op1 = XEXP (addr, 1);
7057 rtx insn;
7058
7059 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7060 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7061 {
7062 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7063 rtx offset_rtx = GEN_INT (offset);
7064
7065 if (IN_RANGE (offset, -32768, 32767)
7066 && (scalar_size < 8 || (offset & 0x3) == 0))
7067 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7068 else
7069 {
7070 emit_move_insn (base_tmp, offset_rtx);
7071 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7072 }
7073 }
7074 else
7075 {
7076 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7077 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7078
7079 /* Note, ADDI requires the register being added to be a base
7080 register. If the register was R0, load it up into the temporary
7081 and do the add. */
7082 if (op1_reg_p
7083 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7084 {
7085 insn = gen_add3_insn (base_tmp, op1, element_offset);
7086 gcc_assert (insn != NULL_RTX);
7087 emit_insn (insn);
7088 }
7089
7090 else if (ele_reg_p
7091 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7092 {
7093 insn = gen_add3_insn (base_tmp, element_offset, op1);
7094 gcc_assert (insn != NULL_RTX);
7095 emit_insn (insn);
7096 }
7097
7098 else
7099 {
7100 emit_move_insn (base_tmp, op1);
7101 emit_insn (gen_add2_insn (base_tmp, element_offset));
7102 }
7103
7104 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7105 }
7106 }
7107
7108 else
7109 {
7110 emit_move_insn (base_tmp, addr);
7111 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7112 }
7113
7114 /* If we have a PLUS, we need to see whether the particular register class
7115 allows for D-FORM or X-FORM addressing. */
7116 if (GET_CODE (new_addr) == PLUS)
7117 {
7118 rtx op1 = XEXP (new_addr, 1);
7119 addr_mask_type addr_mask;
7120 int scalar_regno = regno_or_subregno (scalar_reg);
7121
7122 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7123 if (INT_REGNO_P (scalar_regno))
7124 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7125
7126 else if (FP_REGNO_P (scalar_regno))
7127 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7128
7129 else if (ALTIVEC_REGNO_P (scalar_regno))
7130 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7131
7132 else
7133 gcc_unreachable ();
7134
7135 if (REG_P (op1) || SUBREG_P (op1))
7136 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7137 else
7138 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7139 }
7140
7141 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7142 valid_addr_p = true;
7143
7144 else
7145 valid_addr_p = false;
7146
7147 if (!valid_addr_p)
7148 {
7149 emit_move_insn (base_tmp, new_addr);
7150 new_addr = base_tmp;
7151 }
7152
7153 return change_address (mem, scalar_mode, new_addr);
7154 }
7155
7156 /* Split a variable vec_extract operation into the component instructions. */
7157
7158 void
7159 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7160 rtx tmp_altivec)
7161 {
7162 machine_mode mode = GET_MODE (src);
7163 machine_mode scalar_mode = GET_MODE (dest);
7164 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7165 int byte_shift = exact_log2 (scalar_size);
7166
7167 gcc_assert (byte_shift >= 0);
7168
7169 /* If we are given a memory address, optimize to load just the element. We
7170 don't have to adjust the vector element number on little endian
7171 systems. */
7172 if (MEM_P (src))
7173 {
7174 gcc_assert (REG_P (tmp_gpr));
7175 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7176 tmp_gpr, scalar_mode));
7177 return;
7178 }
7179
7180 else if (REG_P (src) || SUBREG_P (src))
7181 {
7182 int bit_shift = byte_shift + 3;
7183 rtx element2;
7184 int dest_regno = regno_or_subregno (dest);
7185 int src_regno = regno_or_subregno (src);
7186 int element_regno = regno_or_subregno (element);
7187
7188 gcc_assert (REG_P (tmp_gpr));
7189
7190 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7191 a general purpose register. */
7192 if (TARGET_P9_VECTOR
7193 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7194 && INT_REGNO_P (dest_regno)
7195 && ALTIVEC_REGNO_P (src_regno)
7196 && INT_REGNO_P (element_regno))
7197 {
7198 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7199 rtx element_si = gen_rtx_REG (SImode, element_regno);
7200
7201 if (mode == V16QImode)
7202 emit_insn (BYTES_BIG_ENDIAN
7203 ? gen_vextublx (dest_si, element_si, src)
7204 : gen_vextubrx (dest_si, element_si, src));
7205
7206 else if (mode == V8HImode)
7207 {
7208 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7209 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7210 emit_insn (BYTES_BIG_ENDIAN
7211 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7212 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7213 }
7214
7215
7216 else
7217 {
7218 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7219 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7220 emit_insn (BYTES_BIG_ENDIAN
7221 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7222 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7223 }
7224
7225 return;
7226 }
7227
7228
7229 gcc_assert (REG_P (tmp_altivec));
7230
7231 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7232 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7233 will shift the element into the upper position (adding 3 to convert a
7234 byte shift into a bit shift). */
7235 if (scalar_size == 8)
7236 {
7237 if (!BYTES_BIG_ENDIAN)
7238 {
7239 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7240 element2 = tmp_gpr;
7241 }
7242 else
7243 element2 = element;
7244
7245 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7246 bit. */
7247 emit_insn (gen_rtx_SET (tmp_gpr,
7248 gen_rtx_AND (DImode,
7249 gen_rtx_ASHIFT (DImode,
7250 element2,
7251 GEN_INT (6)),
7252 GEN_INT (64))));
7253 }
7254 else
7255 {
7256 if (!BYTES_BIG_ENDIAN)
7257 {
7258 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7259
7260 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7261 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7262 element2 = tmp_gpr;
7263 }
7264 else
7265 element2 = element;
7266
7267 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7268 }
7269
7270 /* Get the value into the lower byte of the Altivec register where VSLO
7271 expects it. */
7272 if (TARGET_P9_VECTOR)
7273 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7274 else if (can_create_pseudo_p ())
7275 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7276 else
7277 {
7278 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7279 emit_move_insn (tmp_di, tmp_gpr);
7280 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7281 }
7282
7283 /* Do the VSLO to get the value into the final location. */
7284 switch (mode)
7285 {
7286 case E_V2DFmode:
7287 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7288 return;
7289
7290 case E_V2DImode:
7291 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7292 return;
7293
7294 case E_V4SFmode:
7295 {
7296 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7297 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7298 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7299 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7300 tmp_altivec));
7301
7302 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7303 return;
7304 }
7305
7306 case E_V4SImode:
7307 case E_V8HImode:
7308 case E_V16QImode:
7309 {
7310 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7311 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7312 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7313 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7314 tmp_altivec));
7315 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7316 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7317 GEN_INT (64 - (8 * scalar_size))));
7318 return;
7319 }
7320
7321 default:
7322 gcc_unreachable ();
7323 }
7324
7325 return;
7326 }
7327 else
7328 gcc_unreachable ();
7329 }
7330
7331 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7332 selects whether the alignment is abi mandated, optional, or
7333 both abi and optional alignment. */
7334
7335 unsigned int
7336 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7337 {
7338 if (how != align_opt)
7339 {
7340 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7341 align = 128;
7342 }
7343
7344 if (how != align_abi)
7345 {
7346 if (TREE_CODE (type) == ARRAY_TYPE
7347 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7348 {
7349 if (align < BITS_PER_WORD)
7350 align = BITS_PER_WORD;
7351 }
7352 }
7353
7354 return align;
7355 }
7356
7357 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7358 instructions simply ignore the low bits; VSX memory instructions
7359 are aligned to 4 or 8 bytes. */
7360
7361 static bool
7362 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7363 {
7364 return (STRICT_ALIGNMENT
7365 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7366 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7367 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7368 && (int) align < VECTOR_ALIGN (mode)))));
7369 }
7370
7371 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7372
7373 bool
7374 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7375 {
7376 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7377 {
7378 if (computed != 128)
7379 {
7380 static bool warned;
7381 if (!warned && warn_psabi)
7382 {
7383 warned = true;
7384 inform (input_location,
7385 "the layout of aggregates containing vectors with"
7386 " %d-byte alignment has changed in GCC 5",
7387 computed / BITS_PER_UNIT);
7388 }
7389 }
7390 /* In current GCC there is no special case. */
7391 return false;
7392 }
7393
7394 return false;
7395 }
7396
7397 /* AIX increases natural record alignment to doubleword if the first
7398 field is an FP double while the FP fields remain word aligned. */
7399
7400 unsigned int
7401 rs6000_special_round_type_align (tree type, unsigned int computed,
7402 unsigned int specified)
7403 {
7404 unsigned int align = MAX (computed, specified);
7405 tree field = TYPE_FIELDS (type);
7406
7407 /* Skip all non field decls */
7408 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7409 field = DECL_CHAIN (field);
7410
7411 if (field != NULL && field != type)
7412 {
7413 type = TREE_TYPE (field);
7414 while (TREE_CODE (type) == ARRAY_TYPE)
7415 type = TREE_TYPE (type);
7416
7417 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7418 align = MAX (align, 64);
7419 }
7420
7421 return align;
7422 }
7423
7424 /* Darwin increases record alignment to the natural alignment of
7425 the first field. */
7426
7427 unsigned int
7428 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7429 unsigned int specified)
7430 {
7431 unsigned int align = MAX (computed, specified);
7432
7433 if (TYPE_PACKED (type))
7434 return align;
7435
7436 /* Find the first field, looking down into aggregates. */
7437 do {
7438 tree field = TYPE_FIELDS (type);
7439 /* Skip all non field decls */
7440 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7441 field = DECL_CHAIN (field);
7442 if (! field)
7443 break;
7444 /* A packed field does not contribute any extra alignment. */
7445 if (DECL_PACKED (field))
7446 return align;
7447 type = TREE_TYPE (field);
7448 while (TREE_CODE (type) == ARRAY_TYPE)
7449 type = TREE_TYPE (type);
7450 } while (AGGREGATE_TYPE_P (type));
7451
7452 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7453 align = MAX (align, TYPE_ALIGN (type));
7454
7455 return align;
7456 }
7457
7458 /* Return 1 for an operand in small memory on V.4/eabi. */
7459
7460 int
7461 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7462 machine_mode mode ATTRIBUTE_UNUSED)
7463 {
7464 #if TARGET_ELF
7465 rtx sym_ref;
7466
7467 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7468 return 0;
7469
7470 if (DEFAULT_ABI != ABI_V4)
7471 return 0;
7472
7473 if (GET_CODE (op) == SYMBOL_REF)
7474 sym_ref = op;
7475
7476 else if (GET_CODE (op) != CONST
7477 || GET_CODE (XEXP (op, 0)) != PLUS
7478 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7479 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7480 return 0;
7481
7482 else
7483 {
7484 rtx sum = XEXP (op, 0);
7485 HOST_WIDE_INT summand;
7486
7487 /* We have to be careful here, because it is the referenced address
7488 that must be 32k from _SDA_BASE_, not just the symbol. */
7489 summand = INTVAL (XEXP (sum, 1));
7490 if (summand < 0 || summand > g_switch_value)
7491 return 0;
7492
7493 sym_ref = XEXP (sum, 0);
7494 }
7495
7496 return SYMBOL_REF_SMALL_P (sym_ref);
7497 #else
7498 return 0;
7499 #endif
7500 }
7501
7502 /* Return true if either operand is a general purpose register. */
7503
7504 bool
7505 gpr_or_gpr_p (rtx op0, rtx op1)
7506 {
7507 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7508 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7509 }
7510
7511 /* Return true if this is a move direct operation between GPR registers and
7512 floating point/VSX registers. */
7513
7514 bool
7515 direct_move_p (rtx op0, rtx op1)
7516 {
7517 int regno0, regno1;
7518
7519 if (!REG_P (op0) || !REG_P (op1))
7520 return false;
7521
7522 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7523 return false;
7524
7525 regno0 = REGNO (op0);
7526 regno1 = REGNO (op1);
7527 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7528 return false;
7529
7530 if (INT_REGNO_P (regno0))
7531 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7532
7533 else if (INT_REGNO_P (regno1))
7534 {
7535 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7536 return true;
7537
7538 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7539 return true;
7540 }
7541
7542 return false;
7543 }
7544
7545 /* Return true if the OFFSET is valid for the quad address instructions that
7546 use d-form (register + offset) addressing. */
7547
7548 static inline bool
7549 quad_address_offset_p (HOST_WIDE_INT offset)
7550 {
7551 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7552 }
7553
7554 /* Return true if the ADDR is an acceptable address for a quad memory
7555 operation of mode MODE (either LQ/STQ for general purpose registers, or
7556 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7557 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7558 3.0 LXV/STXV instruction. */
7559
7560 bool
7561 quad_address_p (rtx addr, machine_mode mode, bool strict)
7562 {
7563 rtx op0, op1;
7564
7565 if (GET_MODE_SIZE (mode) != 16)
7566 return false;
7567
7568 if (legitimate_indirect_address_p (addr, strict))
7569 return true;
7570
7571 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7572 return false;
7573
7574 if (GET_CODE (addr) != PLUS)
7575 return false;
7576
7577 op0 = XEXP (addr, 0);
7578 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7579 return false;
7580
7581 op1 = XEXP (addr, 1);
7582 if (!CONST_INT_P (op1))
7583 return false;
7584
7585 return quad_address_offset_p (INTVAL (op1));
7586 }
7587
7588 /* Return true if this is a load or store quad operation. This function does
7589 not handle the atomic quad memory instructions. */
7590
7591 bool
7592 quad_load_store_p (rtx op0, rtx op1)
7593 {
7594 bool ret;
7595
7596 if (!TARGET_QUAD_MEMORY)
7597 ret = false;
7598
7599 else if (REG_P (op0) && MEM_P (op1))
7600 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7601 && quad_memory_operand (op1, GET_MODE (op1))
7602 && !reg_overlap_mentioned_p (op0, op1));
7603
7604 else if (MEM_P (op0) && REG_P (op1))
7605 ret = (quad_memory_operand (op0, GET_MODE (op0))
7606 && quad_int_reg_operand (op1, GET_MODE (op1)));
7607
7608 else
7609 ret = false;
7610
7611 if (TARGET_DEBUG_ADDR)
7612 {
7613 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7614 ret ? "true" : "false");
7615 debug_rtx (gen_rtx_SET (op0, op1));
7616 }
7617
7618 return ret;
7619 }
7620
7621 /* Given an address, return a constant offset term if one exists. */
7622
7623 static rtx
7624 address_offset (rtx op)
7625 {
7626 if (GET_CODE (op) == PRE_INC
7627 || GET_CODE (op) == PRE_DEC)
7628 op = XEXP (op, 0);
7629 else if (GET_CODE (op) == PRE_MODIFY
7630 || GET_CODE (op) == LO_SUM)
7631 op = XEXP (op, 1);
7632
7633 if (GET_CODE (op) == CONST)
7634 op = XEXP (op, 0);
7635
7636 if (GET_CODE (op) == PLUS)
7637 op = XEXP (op, 1);
7638
7639 if (CONST_INT_P (op))
7640 return op;
7641
7642 return NULL_RTX;
7643 }
7644
7645 /* Return true if the MEM operand is a memory operand suitable for use
7646 with a (full width, possibly multiple) gpr load/store. On
7647 powerpc64 this means the offset must be divisible by 4.
7648 Implements 'Y' constraint.
7649
7650 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7651 a constraint function we know the operand has satisfied a suitable
7652 memory predicate. Also accept some odd rtl generated by reload
7653 (see rs6000_legitimize_reload_address for various forms). It is
7654 important that reload rtl be accepted by appropriate constraints
7655 but not by the operand predicate.
7656
7657 Offsetting a lo_sum should not be allowed, except where we know by
7658 alignment that a 32k boundary is not crossed, but see the ???
7659 comment in rs6000_legitimize_reload_address. Note that by
7660 "offsetting" here we mean a further offset to access parts of the
7661 MEM. It's fine to have a lo_sum where the inner address is offset
7662 from a sym, since the same sym+offset will appear in the high part
7663 of the address calculation. */
7664
7665 bool
7666 mem_operand_gpr (rtx op, machine_mode mode)
7667 {
7668 unsigned HOST_WIDE_INT offset;
7669 int extra;
7670 rtx addr = XEXP (op, 0);
7671
7672 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7673 if (TARGET_UPDATE
7674 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7675 && mode_supports_pre_incdec_p (mode)
7676 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7677 return true;
7678
7679 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7680 if (!rs6000_offsettable_memref_p (op, mode, false))
7681 return false;
7682
7683 op = address_offset (addr);
7684 if (op == NULL_RTX)
7685 return true;
7686
7687 offset = INTVAL (op);
7688 if (TARGET_POWERPC64 && (offset & 3) != 0)
7689 return false;
7690
7691 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7692 if (extra < 0)
7693 extra = 0;
7694
7695 if (GET_CODE (addr) == LO_SUM)
7696 /* For lo_sum addresses, we must allow any offset except one that
7697 causes a wrap, so test only the low 16 bits. */
7698 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7699
7700 return offset + 0x8000 < 0x10000u - extra;
7701 }
7702
7703 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7704 enforce an offset divisible by 4 even for 32-bit. */
7705
7706 bool
7707 mem_operand_ds_form (rtx op, machine_mode mode)
7708 {
7709 unsigned HOST_WIDE_INT offset;
7710 int extra;
7711 rtx addr = XEXP (op, 0);
7712
7713 if (!offsettable_address_p (false, mode, addr))
7714 return false;
7715
7716 op = address_offset (addr);
7717 if (op == NULL_RTX)
7718 return true;
7719
7720 offset = INTVAL (op);
7721 if ((offset & 3) != 0)
7722 return false;
7723
7724 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7725 if (extra < 0)
7726 extra = 0;
7727
7728 if (GET_CODE (addr) == LO_SUM)
7729 /* For lo_sum addresses, we must allow any offset except one that
7730 causes a wrap, so test only the low 16 bits. */
7731 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7732
7733 return offset + 0x8000 < 0x10000u - extra;
7734 }
7735 \f
7736 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7737
7738 static bool
7739 reg_offset_addressing_ok_p (machine_mode mode)
7740 {
7741 switch (mode)
7742 {
7743 case E_V16QImode:
7744 case E_V8HImode:
7745 case E_V4SFmode:
7746 case E_V4SImode:
7747 case E_V2DFmode:
7748 case E_V2DImode:
7749 case E_V1TImode:
7750 case E_TImode:
7751 case E_TFmode:
7752 case E_KFmode:
7753 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7754 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7755 a vector mode, if we want to use the VSX registers to move it around,
7756 we need to restrict ourselves to reg+reg addressing. Similarly for
7757 IEEE 128-bit floating point that is passed in a single vector
7758 register. */
7759 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7760 return mode_supports_dq_form (mode);
7761 break;
7762
7763 case E_SDmode:
7764 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7765 addressing for the LFIWZX and STFIWX instructions. */
7766 if (TARGET_NO_SDMODE_STACK)
7767 return false;
7768 break;
7769
7770 default:
7771 break;
7772 }
7773
7774 return true;
7775 }
7776
7777 static bool
7778 virtual_stack_registers_memory_p (rtx op)
7779 {
7780 int regnum;
7781
7782 if (GET_CODE (op) == REG)
7783 regnum = REGNO (op);
7784
7785 else if (GET_CODE (op) == PLUS
7786 && GET_CODE (XEXP (op, 0)) == REG
7787 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7788 regnum = REGNO (XEXP (op, 0));
7789
7790 else
7791 return false;
7792
7793 return (regnum >= FIRST_VIRTUAL_REGISTER
7794 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7795 }
7796
7797 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7798 is known to not straddle a 32k boundary. This function is used
7799 to determine whether -mcmodel=medium code can use TOC pointer
7800 relative addressing for OP. This means the alignment of the TOC
7801 pointer must also be taken into account, and unfortunately that is
7802 only 8 bytes. */
7803
7804 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7805 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7806 #endif
7807
7808 static bool
7809 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7810 machine_mode mode)
7811 {
7812 tree decl;
7813 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7814
7815 if (GET_CODE (op) != SYMBOL_REF)
7816 return false;
7817
7818 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7819 SYMBOL_REF. */
7820 if (mode_supports_dq_form (mode))
7821 return false;
7822
7823 dsize = GET_MODE_SIZE (mode);
7824 decl = SYMBOL_REF_DECL (op);
7825 if (!decl)
7826 {
7827 if (dsize == 0)
7828 return false;
7829
7830 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7831 replacing memory addresses with an anchor plus offset. We
7832 could find the decl by rummaging around in the block->objects
7833 VEC for the given offset but that seems like too much work. */
7834 dalign = BITS_PER_UNIT;
7835 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7836 && SYMBOL_REF_ANCHOR_P (op)
7837 && SYMBOL_REF_BLOCK (op) != NULL)
7838 {
7839 struct object_block *block = SYMBOL_REF_BLOCK (op);
7840
7841 dalign = block->alignment;
7842 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7843 }
7844 else if (CONSTANT_POOL_ADDRESS_P (op))
7845 {
7846 /* It would be nice to have get_pool_align().. */
7847 machine_mode cmode = get_pool_mode (op);
7848
7849 dalign = GET_MODE_ALIGNMENT (cmode);
7850 }
7851 }
7852 else if (DECL_P (decl))
7853 {
7854 dalign = DECL_ALIGN (decl);
7855
7856 if (dsize == 0)
7857 {
7858 /* Allow BLKmode when the entire object is known to not
7859 cross a 32k boundary. */
7860 if (!DECL_SIZE_UNIT (decl))
7861 return false;
7862
7863 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7864 return false;
7865
7866 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7867 if (dsize > 32768)
7868 return false;
7869
7870 dalign /= BITS_PER_UNIT;
7871 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7872 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7873 return dalign >= dsize;
7874 }
7875 }
7876 else
7877 gcc_unreachable ();
7878
7879 /* Find how many bits of the alignment we know for this access. */
7880 dalign /= BITS_PER_UNIT;
7881 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7882 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7883 mask = dalign - 1;
7884 lsb = offset & -offset;
7885 mask &= lsb - 1;
7886 dalign = mask + 1;
7887
7888 return dalign >= dsize;
7889 }
7890
7891 static bool
7892 constant_pool_expr_p (rtx op)
7893 {
7894 rtx base, offset;
7895
7896 split_const (op, &base, &offset);
7897 return (GET_CODE (base) == SYMBOL_REF
7898 && CONSTANT_POOL_ADDRESS_P (base)
7899 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7900 }
7901
7902 /* These are only used to pass through from print_operand/print_operand_address
7903 to rs6000_output_addr_const_extra over the intervening function
7904 output_addr_const which is not target code. */
7905 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7906
7907 /* Return true if OP is a toc pointer relative address (the output
7908 of create_TOC_reference). If STRICT, do not match non-split
7909 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7910 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7911 TOCREL_OFFSET_RET respectively. */
7912
7913 bool
7914 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7915 const_rtx *tocrel_offset_ret)
7916 {
7917 if (!TARGET_TOC)
7918 return false;
7919
7920 if (TARGET_CMODEL != CMODEL_SMALL)
7921 {
7922 /* When strict ensure we have everything tidy. */
7923 if (strict
7924 && !(GET_CODE (op) == LO_SUM
7925 && REG_P (XEXP (op, 0))
7926 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7927 return false;
7928
7929 /* When not strict, allow non-split TOC addresses and also allow
7930 (lo_sum (high ..)) TOC addresses created during reload. */
7931 if (GET_CODE (op) == LO_SUM)
7932 op = XEXP (op, 1);
7933 }
7934
7935 const_rtx tocrel_base = op;
7936 const_rtx tocrel_offset = const0_rtx;
7937
7938 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7939 {
7940 tocrel_base = XEXP (op, 0);
7941 tocrel_offset = XEXP (op, 1);
7942 }
7943
7944 if (tocrel_base_ret)
7945 *tocrel_base_ret = tocrel_base;
7946 if (tocrel_offset_ret)
7947 *tocrel_offset_ret = tocrel_offset;
7948
7949 return (GET_CODE (tocrel_base) == UNSPEC
7950 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7951 && REG_P (XVECEXP (tocrel_base, 0, 1))
7952 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7953 }
7954
7955 /* Return true if X is a constant pool address, and also for cmodel=medium
7956 if X is a toc-relative address known to be offsettable within MODE. */
7957
7958 bool
7959 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7960 bool strict)
7961 {
7962 const_rtx tocrel_base, tocrel_offset;
7963 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7964 && (TARGET_CMODEL != CMODEL_MEDIUM
7965 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7966 || mode == QImode
7967 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7968 INTVAL (tocrel_offset), mode)));
7969 }
7970
7971 static bool
7972 legitimate_small_data_p (machine_mode mode, rtx x)
7973 {
7974 return (DEFAULT_ABI == ABI_V4
7975 && !flag_pic && !TARGET_TOC
7976 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7977 && small_data_operand (x, mode));
7978 }
7979
7980 bool
7981 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7982 bool strict, bool worst_case)
7983 {
7984 unsigned HOST_WIDE_INT offset;
7985 unsigned int extra;
7986
7987 if (GET_CODE (x) != PLUS)
7988 return false;
7989 if (!REG_P (XEXP (x, 0)))
7990 return false;
7991 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7992 return false;
7993 if (mode_supports_dq_form (mode))
7994 return quad_address_p (x, mode, strict);
7995 if (!reg_offset_addressing_ok_p (mode))
7996 return virtual_stack_registers_memory_p (x);
7997 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7998 return true;
7999 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8000 return false;
8001
8002 offset = INTVAL (XEXP (x, 1));
8003 extra = 0;
8004 switch (mode)
8005 {
8006 case E_DFmode:
8007 case E_DDmode:
8008 case E_DImode:
8009 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8010 addressing. */
8011 if (VECTOR_MEM_VSX_P (mode))
8012 return false;
8013
8014 if (!worst_case)
8015 break;
8016 if (!TARGET_POWERPC64)
8017 extra = 4;
8018 else if (offset & 3)
8019 return false;
8020 break;
8021
8022 case E_TFmode:
8023 case E_IFmode:
8024 case E_KFmode:
8025 case E_TDmode:
8026 case E_TImode:
8027 case E_PTImode:
8028 extra = 8;
8029 if (!worst_case)
8030 break;
8031 if (!TARGET_POWERPC64)
8032 extra = 12;
8033 else if (offset & 3)
8034 return false;
8035 break;
8036
8037 default:
8038 break;
8039 }
8040
8041 offset += 0x8000;
8042 return offset < 0x10000 - extra;
8043 }
8044
8045 bool
8046 legitimate_indexed_address_p (rtx x, int strict)
8047 {
8048 rtx op0, op1;
8049
8050 if (GET_CODE (x) != PLUS)
8051 return false;
8052
8053 op0 = XEXP (x, 0);
8054 op1 = XEXP (x, 1);
8055
8056 return (REG_P (op0) && REG_P (op1)
8057 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8058 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8059 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8060 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8061 }
8062
8063 bool
8064 avoiding_indexed_address_p (machine_mode mode)
8065 {
8066 /* Avoid indexed addressing for modes that have non-indexed
8067 load/store instruction forms. */
8068 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8069 }
8070
8071 bool
8072 legitimate_indirect_address_p (rtx x, int strict)
8073 {
8074 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8075 }
8076
8077 bool
8078 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8079 {
8080 if (!TARGET_MACHO || !flag_pic
8081 || mode != SImode || GET_CODE (x) != MEM)
8082 return false;
8083 x = XEXP (x, 0);
8084
8085 if (GET_CODE (x) != LO_SUM)
8086 return false;
8087 if (GET_CODE (XEXP (x, 0)) != REG)
8088 return false;
8089 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8090 return false;
8091 x = XEXP (x, 1);
8092
8093 return CONSTANT_P (x);
8094 }
8095
8096 static bool
8097 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8098 {
8099 if (GET_CODE (x) != LO_SUM)
8100 return false;
8101 if (GET_CODE (XEXP (x, 0)) != REG)
8102 return false;
8103 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8104 return false;
8105 /* quad word addresses are restricted, and we can't use LO_SUM. */
8106 if (mode_supports_dq_form (mode))
8107 return false;
8108 x = XEXP (x, 1);
8109
8110 if (TARGET_ELF || TARGET_MACHO)
8111 {
8112 bool large_toc_ok;
8113
8114 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8115 return false;
8116 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8117 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8118 recognizes some LO_SUM addresses as valid although this
8119 function says opposite. In most cases, LRA through different
8120 transformations can generate correct code for address reloads.
8121 It can not manage only some LO_SUM cases. So we need to add
8122 code analogous to one in rs6000_legitimize_reload_address for
8123 LOW_SUM here saying that some addresses are still valid. */
8124 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8125 && small_toc_ref (x, VOIDmode));
8126 if (TARGET_TOC && ! large_toc_ok)
8127 return false;
8128 if (GET_MODE_NUNITS (mode) != 1)
8129 return false;
8130 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8131 && !(/* ??? Assume floating point reg based on mode? */
8132 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8133 return false;
8134
8135 return CONSTANT_P (x) || large_toc_ok;
8136 }
8137
8138 return false;
8139 }
8140
8141
8142 /* Try machine-dependent ways of modifying an illegitimate address
8143 to be legitimate. If we find one, return the new, valid address.
8144 This is used from only one place: `memory_address' in explow.c.
8145
8146 OLDX is the address as it was before break_out_memory_refs was
8147 called. In some cases it is useful to look at this to decide what
8148 needs to be done.
8149
8150 It is always safe for this function to do nothing. It exists to
8151 recognize opportunities to optimize the output.
8152
8153 On RS/6000, first check for the sum of a register with a constant
8154 integer that is out of range. If so, generate code to add the
8155 constant with the low-order 16 bits masked to the register and force
8156 this result into another register (this can be done with `cau').
8157 Then generate an address of REG+(CONST&0xffff), allowing for the
8158 possibility of bit 16 being a one.
8159
8160 Then check for the sum of a register and something not constant, try to
8161 load the other things into a register and return the sum. */
8162
8163 static rtx
8164 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8165 machine_mode mode)
8166 {
8167 unsigned int extra;
8168
8169 if (!reg_offset_addressing_ok_p (mode)
8170 || mode_supports_dq_form (mode))
8171 {
8172 if (virtual_stack_registers_memory_p (x))
8173 return x;
8174
8175 /* In theory we should not be seeing addresses of the form reg+0,
8176 but just in case it is generated, optimize it away. */
8177 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8178 return force_reg (Pmode, XEXP (x, 0));
8179
8180 /* For TImode with load/store quad, restrict addresses to just a single
8181 pointer, so it works with both GPRs and VSX registers. */
8182 /* Make sure both operands are registers. */
8183 else if (GET_CODE (x) == PLUS
8184 && (mode != TImode || !TARGET_VSX))
8185 return gen_rtx_PLUS (Pmode,
8186 force_reg (Pmode, XEXP (x, 0)),
8187 force_reg (Pmode, XEXP (x, 1)));
8188 else
8189 return force_reg (Pmode, x);
8190 }
8191 if (GET_CODE (x) == SYMBOL_REF)
8192 {
8193 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8194 if (model != 0)
8195 return rs6000_legitimize_tls_address (x, model);
8196 }
8197
8198 extra = 0;
8199 switch (mode)
8200 {
8201 case E_TFmode:
8202 case E_TDmode:
8203 case E_TImode:
8204 case E_PTImode:
8205 case E_IFmode:
8206 case E_KFmode:
8207 /* As in legitimate_offset_address_p we do not assume
8208 worst-case. The mode here is just a hint as to the registers
8209 used. A TImode is usually in gprs, but may actually be in
8210 fprs. Leave worst-case scenario for reload to handle via
8211 insn constraints. PTImode is only GPRs. */
8212 extra = 8;
8213 break;
8214 default:
8215 break;
8216 }
8217
8218 if (GET_CODE (x) == PLUS
8219 && GET_CODE (XEXP (x, 0)) == REG
8220 && GET_CODE (XEXP (x, 1)) == CONST_INT
8221 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8222 >= 0x10000 - extra))
8223 {
8224 HOST_WIDE_INT high_int, low_int;
8225 rtx sum;
8226 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8227 if (low_int >= 0x8000 - extra)
8228 low_int = 0;
8229 high_int = INTVAL (XEXP (x, 1)) - low_int;
8230 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8231 GEN_INT (high_int)), 0);
8232 return plus_constant (Pmode, sum, low_int);
8233 }
8234 else if (GET_CODE (x) == PLUS
8235 && GET_CODE (XEXP (x, 0)) == REG
8236 && GET_CODE (XEXP (x, 1)) != CONST_INT
8237 && GET_MODE_NUNITS (mode) == 1
8238 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8239 || (/* ??? Assume floating point reg based on mode? */
8240 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8241 && !avoiding_indexed_address_p (mode))
8242 {
8243 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8244 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8245 }
8246 else if ((TARGET_ELF
8247 #if TARGET_MACHO
8248 || !MACHO_DYNAMIC_NO_PIC_P
8249 #endif
8250 )
8251 && TARGET_32BIT
8252 && TARGET_NO_TOC
8253 && ! flag_pic
8254 && GET_CODE (x) != CONST_INT
8255 && GET_CODE (x) != CONST_WIDE_INT
8256 && GET_CODE (x) != CONST_DOUBLE
8257 && CONSTANT_P (x)
8258 && GET_MODE_NUNITS (mode) == 1
8259 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8260 || (/* ??? Assume floating point reg based on mode? */
8261 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8262 {
8263 rtx reg = gen_reg_rtx (Pmode);
8264 if (TARGET_ELF)
8265 emit_insn (gen_elf_high (reg, x));
8266 else
8267 emit_insn (gen_macho_high (reg, x));
8268 return gen_rtx_LO_SUM (Pmode, reg, x);
8269 }
8270 else if (TARGET_TOC
8271 && GET_CODE (x) == SYMBOL_REF
8272 && constant_pool_expr_p (x)
8273 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8274 return create_TOC_reference (x, NULL_RTX);
8275 else
8276 return x;
8277 }
8278
8279 /* Debug version of rs6000_legitimize_address. */
8280 static rtx
8281 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8282 {
8283 rtx ret;
8284 rtx_insn *insns;
8285
8286 start_sequence ();
8287 ret = rs6000_legitimize_address (x, oldx, mode);
8288 insns = get_insns ();
8289 end_sequence ();
8290
8291 if (ret != x)
8292 {
8293 fprintf (stderr,
8294 "\nrs6000_legitimize_address: mode %s, old code %s, "
8295 "new code %s, modified\n",
8296 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8297 GET_RTX_NAME (GET_CODE (ret)));
8298
8299 fprintf (stderr, "Original address:\n");
8300 debug_rtx (x);
8301
8302 fprintf (stderr, "oldx:\n");
8303 debug_rtx (oldx);
8304
8305 fprintf (stderr, "New address:\n");
8306 debug_rtx (ret);
8307
8308 if (insns)
8309 {
8310 fprintf (stderr, "Insns added:\n");
8311 debug_rtx_list (insns, 20);
8312 }
8313 }
8314 else
8315 {
8316 fprintf (stderr,
8317 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8318 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8319
8320 debug_rtx (x);
8321 }
8322
8323 if (insns)
8324 emit_insn (insns);
8325
8326 return ret;
8327 }
8328
8329 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8330 We need to emit DTP-relative relocations. */
8331
8332 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8333 static void
8334 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8335 {
8336 switch (size)
8337 {
8338 case 4:
8339 fputs ("\t.long\t", file);
8340 break;
8341 case 8:
8342 fputs (DOUBLE_INT_ASM_OP, file);
8343 break;
8344 default:
8345 gcc_unreachable ();
8346 }
8347 output_addr_const (file, x);
8348 if (TARGET_ELF)
8349 fputs ("@dtprel+0x8000", file);
8350 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8351 {
8352 switch (SYMBOL_REF_TLS_MODEL (x))
8353 {
8354 case 0:
8355 break;
8356 case TLS_MODEL_LOCAL_EXEC:
8357 fputs ("@le", file);
8358 break;
8359 case TLS_MODEL_INITIAL_EXEC:
8360 fputs ("@ie", file);
8361 break;
8362 case TLS_MODEL_GLOBAL_DYNAMIC:
8363 case TLS_MODEL_LOCAL_DYNAMIC:
8364 fputs ("@m", file);
8365 break;
8366 default:
8367 gcc_unreachable ();
8368 }
8369 }
8370 }
8371
8372 /* Return true if X is a symbol that refers to real (rather than emulated)
8373 TLS. */
8374
8375 static bool
8376 rs6000_real_tls_symbol_ref_p (rtx x)
8377 {
8378 return (GET_CODE (x) == SYMBOL_REF
8379 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8380 }
8381
8382 /* In the name of slightly smaller debug output, and to cater to
8383 general assembler lossage, recognize various UNSPEC sequences
8384 and turn them back into a direct symbol reference. */
8385
8386 static rtx
8387 rs6000_delegitimize_address (rtx orig_x)
8388 {
8389 rtx x, y, offset;
8390
8391 orig_x = delegitimize_mem_from_attrs (orig_x);
8392 x = orig_x;
8393 if (MEM_P (x))
8394 x = XEXP (x, 0);
8395
8396 y = x;
8397 if (TARGET_CMODEL != CMODEL_SMALL
8398 && GET_CODE (y) == LO_SUM)
8399 y = XEXP (y, 1);
8400
8401 offset = NULL_RTX;
8402 if (GET_CODE (y) == PLUS
8403 && GET_MODE (y) == Pmode
8404 && CONST_INT_P (XEXP (y, 1)))
8405 {
8406 offset = XEXP (y, 1);
8407 y = XEXP (y, 0);
8408 }
8409
8410 if (GET_CODE (y) == UNSPEC
8411 && XINT (y, 1) == UNSPEC_TOCREL)
8412 {
8413 y = XVECEXP (y, 0, 0);
8414
8415 #ifdef HAVE_AS_TLS
8416 /* Do not associate thread-local symbols with the original
8417 constant pool symbol. */
8418 if (TARGET_XCOFF
8419 && GET_CODE (y) == SYMBOL_REF
8420 && CONSTANT_POOL_ADDRESS_P (y)
8421 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8422 return orig_x;
8423 #endif
8424
8425 if (offset != NULL_RTX)
8426 y = gen_rtx_PLUS (Pmode, y, offset);
8427 if (!MEM_P (orig_x))
8428 return y;
8429 else
8430 return replace_equiv_address_nv (orig_x, y);
8431 }
8432
8433 if (TARGET_MACHO
8434 && GET_CODE (orig_x) == LO_SUM
8435 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8436 {
8437 y = XEXP (XEXP (orig_x, 1), 0);
8438 if (GET_CODE (y) == UNSPEC
8439 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8440 return XVECEXP (y, 0, 0);
8441 }
8442
8443 return orig_x;
8444 }
8445
8446 /* Return true if X shouldn't be emitted into the debug info.
8447 The linker doesn't like .toc section references from
8448 .debug_* sections, so reject .toc section symbols. */
8449
8450 static bool
8451 rs6000_const_not_ok_for_debug_p (rtx x)
8452 {
8453 if (GET_CODE (x) == UNSPEC)
8454 return true;
8455 if (GET_CODE (x) == SYMBOL_REF
8456 && CONSTANT_POOL_ADDRESS_P (x))
8457 {
8458 rtx c = get_pool_constant (x);
8459 machine_mode cmode = get_pool_mode (x);
8460 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8461 return true;
8462 }
8463
8464 return false;
8465 }
8466
8467 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8468
8469 static bool
8470 rs6000_legitimate_combined_insn (rtx_insn *insn)
8471 {
8472 int icode = INSN_CODE (insn);
8473
8474 /* Reject creating doloop insns. Combine should not be allowed
8475 to create these for a number of reasons:
8476 1) In a nested loop, if combine creates one of these in an
8477 outer loop and the register allocator happens to allocate ctr
8478 to the outer loop insn, then the inner loop can't use ctr.
8479 Inner loops ought to be more highly optimized.
8480 2) Combine often wants to create one of these from what was
8481 originally a three insn sequence, first combining the three
8482 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8483 allocated ctr, the splitter takes use back to the three insn
8484 sequence. It's better to stop combine at the two insn
8485 sequence.
8486 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8487 insns, the register allocator sometimes uses floating point
8488 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8489 jump insn and output reloads are not implemented for jumps,
8490 the ctrsi/ctrdi splitters need to handle all possible cases.
8491 That's a pain, and it gets to be seriously difficult when a
8492 splitter that runs after reload needs memory to transfer from
8493 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8494 for the difficult case. It's better to not create problems
8495 in the first place. */
8496 if (icode != CODE_FOR_nothing
8497 && (icode == CODE_FOR_bdz_si
8498 || icode == CODE_FOR_bdz_di
8499 || icode == CODE_FOR_bdnz_si
8500 || icode == CODE_FOR_bdnz_di
8501 || icode == CODE_FOR_bdztf_si
8502 || icode == CODE_FOR_bdztf_di
8503 || icode == CODE_FOR_bdnztf_si
8504 || icode == CODE_FOR_bdnztf_di))
8505 return false;
8506
8507 return true;
8508 }
8509
8510 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8511
8512 static GTY(()) rtx rs6000_tls_symbol;
8513 static rtx
8514 rs6000_tls_get_addr (void)
8515 {
8516 if (!rs6000_tls_symbol)
8517 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8518
8519 return rs6000_tls_symbol;
8520 }
8521
8522 /* Construct the SYMBOL_REF for TLS GOT references. */
8523
8524 static GTY(()) rtx rs6000_got_symbol;
8525 static rtx
8526 rs6000_got_sym (void)
8527 {
8528 if (!rs6000_got_symbol)
8529 {
8530 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8531 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8532 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8533 }
8534
8535 return rs6000_got_symbol;
8536 }
8537
8538 /* AIX Thread-Local Address support. */
8539
8540 static rtx
8541 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8542 {
8543 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8544 const char *name;
8545 char *tlsname;
8546
8547 name = XSTR (addr, 0);
8548 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8549 or the symbol will be in TLS private data section. */
8550 if (name[strlen (name) - 1] != ']'
8551 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8552 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8553 {
8554 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8555 strcpy (tlsname, name);
8556 strcat (tlsname,
8557 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8558 tlsaddr = copy_rtx (addr);
8559 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8560 }
8561 else
8562 tlsaddr = addr;
8563
8564 /* Place addr into TOC constant pool. */
8565 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8566
8567 /* Output the TOC entry and create the MEM referencing the value. */
8568 if (constant_pool_expr_p (XEXP (sym, 0))
8569 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8570 {
8571 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8572 mem = gen_const_mem (Pmode, tocref);
8573 set_mem_alias_set (mem, get_TOC_alias_set ());
8574 }
8575 else
8576 return sym;
8577
8578 /* Use global-dynamic for local-dynamic. */
8579 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8580 || model == TLS_MODEL_LOCAL_DYNAMIC)
8581 {
8582 /* Create new TOC reference for @m symbol. */
8583 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8584 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8585 strcpy (tlsname, "*LCM");
8586 strcat (tlsname, name + 3);
8587 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8588 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8589 tocref = create_TOC_reference (modaddr, NULL_RTX);
8590 rtx modmem = gen_const_mem (Pmode, tocref);
8591 set_mem_alias_set (modmem, get_TOC_alias_set ());
8592
8593 rtx modreg = gen_reg_rtx (Pmode);
8594 emit_insn (gen_rtx_SET (modreg, modmem));
8595
8596 tmpreg = gen_reg_rtx (Pmode);
8597 emit_insn (gen_rtx_SET (tmpreg, mem));
8598
8599 dest = gen_reg_rtx (Pmode);
8600 if (TARGET_32BIT)
8601 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8602 else
8603 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8604 return dest;
8605 }
8606 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8607 else if (TARGET_32BIT)
8608 {
8609 tlsreg = gen_reg_rtx (SImode);
8610 emit_insn (gen_tls_get_tpointer (tlsreg));
8611 }
8612 else
8613 tlsreg = gen_rtx_REG (DImode, 13);
8614
8615 /* Load the TOC value into temporary register. */
8616 tmpreg = gen_reg_rtx (Pmode);
8617 emit_insn (gen_rtx_SET (tmpreg, mem));
8618 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8619 gen_rtx_MINUS (Pmode, addr, tlsreg));
8620
8621 /* Add TOC symbol value to TLS pointer. */
8622 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8623
8624 return dest;
8625 }
8626
8627 /* Mess with a call, to make it look like the tls_gdld insns when
8628 !TARGET_TLS_MARKERS. These insns have an extra unspec to
8629 differentiate them from standard calls, because they need to emit
8630 the arg setup insns as well as the actual call. That keeps the
8631 arg setup insns immediately adjacent to the branch and link. */
8632
8633 static void
8634 edit_tls_call_insn (rtx arg)
8635 {
8636 rtx call_insn = last_call_insn ();
8637 if (!TARGET_TLS_MARKERS)
8638 {
8639 rtx patt = PATTERN (call_insn);
8640 gcc_assert (GET_CODE (patt) == PARALLEL);
8641 rtvec orig = XVEC (patt, 0);
8642 rtvec v = rtvec_alloc (GET_NUM_ELEM (orig) + 1);
8643 gcc_assert (GET_NUM_ELEM (orig) > 0);
8644 /* The (set (..) (call (mem ..))). */
8645 RTVEC_ELT (v, 0) = RTVEC_ELT (orig, 0);
8646 /* The extra unspec. */
8647 RTVEC_ELT (v, 1) = arg;
8648 /* All other assorted call pattern pieces. */
8649 for (int i = 1; i < GET_NUM_ELEM (orig); i++)
8650 RTVEC_ELT (v, i + 1) = RTVEC_ELT (orig, i);
8651 XVEC (patt, 0) = v;
8652 }
8653 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8654 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8655 pic_offset_table_rtx);
8656 }
8657
8658 /* Passes the tls arg value for global dynamic and local dynamic
8659 emit_library_call_value in rs6000_legitimize_tls_address to
8660 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8661 marker relocs put on __tls_get_addr calls. */
8662 static rtx global_tlsarg;
8663
8664 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8665 this (thread-local) address. */
8666
8667 static rtx
8668 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8669 {
8670 rtx dest, insn;
8671
8672 if (TARGET_XCOFF)
8673 return rs6000_legitimize_tls_address_aix (addr, model);
8674
8675 dest = gen_reg_rtx (Pmode);
8676 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8677 {
8678 rtx tlsreg;
8679
8680 if (TARGET_64BIT)
8681 {
8682 tlsreg = gen_rtx_REG (Pmode, 13);
8683 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8684 }
8685 else
8686 {
8687 tlsreg = gen_rtx_REG (Pmode, 2);
8688 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8689 }
8690 emit_insn (insn);
8691 }
8692 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8693 {
8694 rtx tlsreg, tmp;
8695
8696 tmp = gen_reg_rtx (Pmode);
8697 if (TARGET_64BIT)
8698 {
8699 tlsreg = gen_rtx_REG (Pmode, 13);
8700 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8701 }
8702 else
8703 {
8704 tlsreg = gen_rtx_REG (Pmode, 2);
8705 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8706 }
8707 emit_insn (insn);
8708 if (TARGET_64BIT)
8709 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8710 else
8711 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8712 emit_insn (insn);
8713 }
8714 else
8715 {
8716 rtx got, tga, tmp1, tmp2;
8717
8718 /* We currently use relocations like @got@tlsgd for tls, which
8719 means the linker will handle allocation of tls entries, placing
8720 them in the .got section. So use a pointer to the .got section,
8721 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8722 or to secondary GOT sections used by 32-bit -fPIC. */
8723 if (TARGET_64BIT)
8724 got = gen_rtx_REG (Pmode, 2);
8725 else
8726 {
8727 if (flag_pic == 1)
8728 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8729 else
8730 {
8731 rtx gsym = rs6000_got_sym ();
8732 got = gen_reg_rtx (Pmode);
8733 if (flag_pic == 0)
8734 rs6000_emit_move (got, gsym, Pmode);
8735 else
8736 {
8737 rtx mem, lab;
8738
8739 tmp1 = gen_reg_rtx (Pmode);
8740 tmp2 = gen_reg_rtx (Pmode);
8741 mem = gen_const_mem (Pmode, tmp1);
8742 lab = gen_label_rtx ();
8743 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8744 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8745 if (TARGET_LINK_STACK)
8746 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8747 emit_move_insn (tmp2, mem);
8748 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8749 set_unique_reg_note (last, REG_EQUAL, gsym);
8750 }
8751 }
8752 }
8753
8754 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8755 {
8756 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8757 UNSPEC_TLSGD);
8758 global_tlsarg = arg;
8759 rtx argreg = const0_rtx;
8760 if (TARGET_TLS_MARKERS)
8761 {
8762 argreg = gen_rtx_REG (Pmode, 3);
8763 emit_insn (gen_rtx_SET (argreg, arg));
8764 }
8765
8766 tga = rs6000_tls_get_addr ();
8767 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8768 argreg, Pmode);
8769 global_tlsarg = NULL_RTX;
8770
8771 edit_tls_call_insn (arg);
8772 }
8773 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8774 {
8775 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got),
8776 UNSPEC_TLSLD);
8777 global_tlsarg = arg;
8778 rtx argreg = const0_rtx;
8779 if (TARGET_TLS_MARKERS)
8780 {
8781 argreg = gen_rtx_REG (Pmode, 3);
8782 emit_insn (gen_rtx_SET (argreg, arg));
8783 }
8784
8785 tga = rs6000_tls_get_addr ();
8786 tmp1 = gen_reg_rtx (Pmode);
8787 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8788 argreg, Pmode);
8789 global_tlsarg = NULL_RTX;
8790
8791 edit_tls_call_insn (arg);
8792
8793 if (rs6000_tls_size == 16)
8794 {
8795 if (TARGET_64BIT)
8796 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8797 else
8798 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8799 }
8800 else if (rs6000_tls_size == 32)
8801 {
8802 tmp2 = gen_reg_rtx (Pmode);
8803 if (TARGET_64BIT)
8804 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8805 else
8806 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8807 emit_insn (insn);
8808 if (TARGET_64BIT)
8809 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8810 else
8811 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8812 }
8813 else
8814 {
8815 tmp2 = gen_reg_rtx (Pmode);
8816 if (TARGET_64BIT)
8817 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8818 else
8819 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8820 emit_insn (insn);
8821 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8822 }
8823 emit_insn (insn);
8824 }
8825 else
8826 {
8827 /* IE, or 64-bit offset LE. */
8828 tmp2 = gen_reg_rtx (Pmode);
8829 if (TARGET_64BIT)
8830 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8831 else
8832 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8833 emit_insn (insn);
8834 if (TARGET_64BIT)
8835 insn = gen_tls_tls_64 (dest, tmp2, addr);
8836 else
8837 insn = gen_tls_tls_32 (dest, tmp2, addr);
8838 emit_insn (insn);
8839 }
8840 }
8841
8842 return dest;
8843 }
8844
8845 /* Only create the global variable for the stack protect guard if we are using
8846 the global flavor of that guard. */
8847 static tree
8848 rs6000_init_stack_protect_guard (void)
8849 {
8850 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8851 return default_stack_protect_guard ();
8852
8853 return NULL_TREE;
8854 }
8855
8856 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8857
8858 static bool
8859 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8860 {
8861 if (GET_CODE (x) == HIGH
8862 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8863 return true;
8864
8865 /* A TLS symbol in the TOC cannot contain a sum. */
8866 if (GET_CODE (x) == CONST
8867 && GET_CODE (XEXP (x, 0)) == PLUS
8868 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8869 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8870 return true;
8871
8872 /* Do not place an ELF TLS symbol in the constant pool. */
8873 return TARGET_ELF && tls_referenced_p (x);
8874 }
8875
8876 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8877 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8878 can be addressed relative to the toc pointer. */
8879
8880 static bool
8881 use_toc_relative_ref (rtx sym, machine_mode mode)
8882 {
8883 return ((constant_pool_expr_p (sym)
8884 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8885 get_pool_mode (sym)))
8886 || (TARGET_CMODEL == CMODEL_MEDIUM
8887 && SYMBOL_REF_LOCAL_P (sym)
8888 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8889 }
8890
8891 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8892 replace the input X, or the original X if no replacement is called for.
8893 The output parameter *WIN is 1 if the calling macro should goto WIN,
8894 0 if it should not.
8895
8896 For RS/6000, we wish to handle large displacements off a base
8897 register by splitting the addend across an addiu/addis and the mem insn.
8898 This cuts number of extra insns needed from 3 to 1.
8899
8900 On Darwin, we use this to generate code for floating point constants.
8901 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8902 The Darwin code is inside #if TARGET_MACHO because only then are the
8903 machopic_* functions defined. */
8904 static rtx
8905 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8906 int opnum, int type,
8907 int ind_levels ATTRIBUTE_UNUSED, int *win)
8908 {
8909 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8910 bool quad_offset_p = mode_supports_dq_form (mode);
8911
8912 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8913 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8914 if (reg_offset_p
8915 && opnum == 1
8916 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8917 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8918 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8919 && TARGET_P9_VECTOR)
8920 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8921 && TARGET_P9_VECTOR)))
8922 reg_offset_p = false;
8923
8924 /* We must recognize output that we have already generated ourselves. */
8925 if (GET_CODE (x) == PLUS
8926 && GET_CODE (XEXP (x, 0)) == PLUS
8927 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8928 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8929 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8930 {
8931 if (TARGET_DEBUG_ADDR)
8932 {
8933 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8934 debug_rtx (x);
8935 }
8936 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8937 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8938 opnum, (enum reload_type) type);
8939 *win = 1;
8940 return x;
8941 }
8942
8943 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8944 if (GET_CODE (x) == LO_SUM
8945 && GET_CODE (XEXP (x, 0)) == HIGH)
8946 {
8947 if (TARGET_DEBUG_ADDR)
8948 {
8949 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8950 debug_rtx (x);
8951 }
8952 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8953 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8954 opnum, (enum reload_type) type);
8955 *win = 1;
8956 return x;
8957 }
8958
8959 #if TARGET_MACHO
8960 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8961 && GET_CODE (x) == LO_SUM
8962 && GET_CODE (XEXP (x, 0)) == PLUS
8963 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8964 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8965 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8966 && machopic_operand_p (XEXP (x, 1)))
8967 {
8968 /* Result of previous invocation of this function on Darwin
8969 floating point constant. */
8970 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8971 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8972 opnum, (enum reload_type) type);
8973 *win = 1;
8974 return x;
8975 }
8976 #endif
8977
8978 if (TARGET_CMODEL != CMODEL_SMALL
8979 && reg_offset_p
8980 && !quad_offset_p
8981 && small_toc_ref (x, VOIDmode))
8982 {
8983 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8984 x = gen_rtx_LO_SUM (Pmode, hi, x);
8985 if (TARGET_DEBUG_ADDR)
8986 {
8987 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8988 debug_rtx (x);
8989 }
8990 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8991 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8992 opnum, (enum reload_type) type);
8993 *win = 1;
8994 return x;
8995 }
8996
8997 if (GET_CODE (x) == PLUS
8998 && REG_P (XEXP (x, 0))
8999 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9000 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9001 && CONST_INT_P (XEXP (x, 1))
9002 && reg_offset_p
9003 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9004 {
9005 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9006 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9007 HOST_WIDE_INT high
9008 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9009
9010 /* Check for 32-bit overflow or quad addresses with one of the
9011 four least significant bits set. */
9012 if (high + low != val
9013 || (quad_offset_p && (low & 0xf)))
9014 {
9015 *win = 0;
9016 return x;
9017 }
9018
9019 /* Reload the high part into a base reg; leave the low part
9020 in the mem directly. */
9021
9022 x = gen_rtx_PLUS (GET_MODE (x),
9023 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9024 GEN_INT (high)),
9025 GEN_INT (low));
9026
9027 if (TARGET_DEBUG_ADDR)
9028 {
9029 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9030 debug_rtx (x);
9031 }
9032 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9033 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9034 opnum, (enum reload_type) type);
9035 *win = 1;
9036 return x;
9037 }
9038
9039 if (GET_CODE (x) == SYMBOL_REF
9040 && reg_offset_p
9041 && !quad_offset_p
9042 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9043 #if TARGET_MACHO
9044 && DEFAULT_ABI == ABI_DARWIN
9045 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9046 && machopic_symbol_defined_p (x)
9047 #else
9048 && DEFAULT_ABI == ABI_V4
9049 && !flag_pic
9050 #endif
9051 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9052 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9053 without fprs.
9054 ??? Assume floating point reg based on mode? This assumption is
9055 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9056 where reload ends up doing a DFmode load of a constant from
9057 mem using two gprs. Unfortunately, at this point reload
9058 hasn't yet selected regs so poking around in reload data
9059 won't help and even if we could figure out the regs reliably,
9060 we'd still want to allow this transformation when the mem is
9061 naturally aligned. Since we say the address is good here, we
9062 can't disable offsets from LO_SUMs in mem_operand_gpr.
9063 FIXME: Allow offset from lo_sum for other modes too, when
9064 mem is sufficiently aligned.
9065
9066 Also disallow this if the type can go in VMX/Altivec registers, since
9067 those registers do not have d-form (reg+offset) address modes. */
9068 && !reg_addr[mode].scalar_in_vmx_p
9069 && mode != TFmode
9070 && mode != TDmode
9071 && mode != IFmode
9072 && mode != KFmode
9073 && (mode != TImode || !TARGET_VSX)
9074 && mode != PTImode
9075 && (mode != DImode || TARGET_POWERPC64)
9076 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9077 || TARGET_HARD_FLOAT))
9078 {
9079 #if TARGET_MACHO
9080 if (flag_pic)
9081 {
9082 rtx offset = machopic_gen_offset (x);
9083 x = gen_rtx_LO_SUM (GET_MODE (x),
9084 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9085 gen_rtx_HIGH (Pmode, offset)), offset);
9086 }
9087 else
9088 #endif
9089 x = gen_rtx_LO_SUM (GET_MODE (x),
9090 gen_rtx_HIGH (Pmode, x), x);
9091
9092 if (TARGET_DEBUG_ADDR)
9093 {
9094 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9095 debug_rtx (x);
9096 }
9097 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9098 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9099 opnum, (enum reload_type) type);
9100 *win = 1;
9101 return x;
9102 }
9103
9104 /* Reload an offset address wrapped by an AND that represents the
9105 masking of the lower bits. Strip the outer AND and let reload
9106 convert the offset address into an indirect address. For VSX,
9107 force reload to create the address with an AND in a separate
9108 register, because we can't guarantee an altivec register will
9109 be used. */
9110 if (VECTOR_MEM_ALTIVEC_P (mode)
9111 && GET_CODE (x) == AND
9112 && GET_CODE (XEXP (x, 0)) == PLUS
9113 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9114 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9115 && GET_CODE (XEXP (x, 1)) == CONST_INT
9116 && INTVAL (XEXP (x, 1)) == -16)
9117 {
9118 x = XEXP (x, 0);
9119 *win = 1;
9120 return x;
9121 }
9122
9123 if (TARGET_TOC
9124 && reg_offset_p
9125 && !quad_offset_p
9126 && GET_CODE (x) == SYMBOL_REF
9127 && use_toc_relative_ref (x, mode))
9128 {
9129 x = create_TOC_reference (x, NULL_RTX);
9130 if (TARGET_CMODEL != CMODEL_SMALL)
9131 {
9132 if (TARGET_DEBUG_ADDR)
9133 {
9134 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9135 debug_rtx (x);
9136 }
9137 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9138 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9139 opnum, (enum reload_type) type);
9140 }
9141 *win = 1;
9142 return x;
9143 }
9144 *win = 0;
9145 return x;
9146 }
9147
9148 /* Debug version of rs6000_legitimize_reload_address. */
9149 static rtx
9150 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9151 int opnum, int type,
9152 int ind_levels, int *win)
9153 {
9154 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9155 ind_levels, win);
9156 fprintf (stderr,
9157 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9158 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9159 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9160 debug_rtx (x);
9161
9162 if (x == ret)
9163 fprintf (stderr, "Same address returned\n");
9164 else if (!ret)
9165 fprintf (stderr, "NULL returned\n");
9166 else
9167 {
9168 fprintf (stderr, "New address:\n");
9169 debug_rtx (ret);
9170 }
9171
9172 return ret;
9173 }
9174
9175 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9176 that is a valid memory address for an instruction.
9177 The MODE argument is the machine mode for the MEM expression
9178 that wants to use this address.
9179
9180 On the RS/6000, there are four valid address: a SYMBOL_REF that
9181 refers to a constant pool entry of an address (or the sum of it
9182 plus a constant), a short (16-bit signed) constant plus a register,
9183 the sum of two registers, or a register indirect, possibly with an
9184 auto-increment. For DFmode, DDmode and DImode with a constant plus
9185 register, we must ensure that both words are addressable or PowerPC64
9186 with offset word aligned.
9187
9188 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9189 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9190 because adjacent memory cells are accessed by adding word-sized offsets
9191 during assembly output. */
9192 static bool
9193 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9194 {
9195 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9196 bool quad_offset_p = mode_supports_dq_form (mode);
9197
9198 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9199 if (VECTOR_MEM_ALTIVEC_P (mode)
9200 && GET_CODE (x) == AND
9201 && GET_CODE (XEXP (x, 1)) == CONST_INT
9202 && INTVAL (XEXP (x, 1)) == -16)
9203 x = XEXP (x, 0);
9204
9205 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9206 return 0;
9207 if (legitimate_indirect_address_p (x, reg_ok_strict))
9208 return 1;
9209 if (TARGET_UPDATE
9210 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9211 && mode_supports_pre_incdec_p (mode)
9212 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9213 return 1;
9214 /* Handle restricted vector d-form offsets in ISA 3.0. */
9215 if (quad_offset_p)
9216 {
9217 if (quad_address_p (x, mode, reg_ok_strict))
9218 return 1;
9219 }
9220 else if (virtual_stack_registers_memory_p (x))
9221 return 1;
9222
9223 else if (reg_offset_p)
9224 {
9225 if (legitimate_small_data_p (mode, x))
9226 return 1;
9227 if (legitimate_constant_pool_address_p (x, mode,
9228 reg_ok_strict || lra_in_progress))
9229 return 1;
9230 }
9231
9232 /* For TImode, if we have TImode in VSX registers, only allow register
9233 indirect addresses. This will allow the values to go in either GPRs
9234 or VSX registers without reloading. The vector types would tend to
9235 go into VSX registers, so we allow REG+REG, while TImode seems
9236 somewhat split, in that some uses are GPR based, and some VSX based. */
9237 /* FIXME: We could loosen this by changing the following to
9238 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9239 but currently we cannot allow REG+REG addressing for TImode. See
9240 PR72827 for complete details on how this ends up hoodwinking DSE. */
9241 if (mode == TImode && TARGET_VSX)
9242 return 0;
9243 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9244 if (! reg_ok_strict
9245 && reg_offset_p
9246 && GET_CODE (x) == PLUS
9247 && GET_CODE (XEXP (x, 0)) == REG
9248 && (XEXP (x, 0) == virtual_stack_vars_rtx
9249 || XEXP (x, 0) == arg_pointer_rtx)
9250 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9251 return 1;
9252 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9253 return 1;
9254 if (!FLOAT128_2REG_P (mode)
9255 && (TARGET_HARD_FLOAT
9256 || TARGET_POWERPC64
9257 || (mode != DFmode && mode != DDmode))
9258 && (TARGET_POWERPC64 || mode != DImode)
9259 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9260 && mode != PTImode
9261 && !avoiding_indexed_address_p (mode)
9262 && legitimate_indexed_address_p (x, reg_ok_strict))
9263 return 1;
9264 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9265 && mode_supports_pre_modify_p (mode)
9266 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9267 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9268 reg_ok_strict, false)
9269 || (!avoiding_indexed_address_p (mode)
9270 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9271 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9272 return 1;
9273 if (reg_offset_p && !quad_offset_p
9274 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9275 return 1;
9276 return 0;
9277 }
9278
9279 /* Debug version of rs6000_legitimate_address_p. */
9280 static bool
9281 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9282 bool reg_ok_strict)
9283 {
9284 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9285 fprintf (stderr,
9286 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9287 "strict = %d, reload = %s, code = %s\n",
9288 ret ? "true" : "false",
9289 GET_MODE_NAME (mode),
9290 reg_ok_strict,
9291 (reload_completed ? "after" : "before"),
9292 GET_RTX_NAME (GET_CODE (x)));
9293 debug_rtx (x);
9294
9295 return ret;
9296 }
9297
9298 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9299
9300 static bool
9301 rs6000_mode_dependent_address_p (const_rtx addr,
9302 addr_space_t as ATTRIBUTE_UNUSED)
9303 {
9304 return rs6000_mode_dependent_address_ptr (addr);
9305 }
9306
9307 /* Go to LABEL if ADDR (a legitimate address expression)
9308 has an effect that depends on the machine mode it is used for.
9309
9310 On the RS/6000 this is true of all integral offsets (since AltiVec
9311 and VSX modes don't allow them) or is a pre-increment or decrement.
9312
9313 ??? Except that due to conceptual problems in offsettable_address_p
9314 we can't really report the problems of integral offsets. So leave
9315 this assuming that the adjustable offset must be valid for the
9316 sub-words of a TFmode operand, which is what we had before. */
9317
9318 static bool
9319 rs6000_mode_dependent_address (const_rtx addr)
9320 {
9321 switch (GET_CODE (addr))
9322 {
9323 case PLUS:
9324 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9325 is considered a legitimate address before reload, so there
9326 are no offset restrictions in that case. Note that this
9327 condition is safe in strict mode because any address involving
9328 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9329 been rejected as illegitimate. */
9330 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9331 && XEXP (addr, 0) != arg_pointer_rtx
9332 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9333 {
9334 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9335 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9336 }
9337 break;
9338
9339 case LO_SUM:
9340 /* Anything in the constant pool is sufficiently aligned that
9341 all bytes have the same high part address. */
9342 return !legitimate_constant_pool_address_p (addr, QImode, false);
9343
9344 /* Auto-increment cases are now treated generically in recog.c. */
9345 case PRE_MODIFY:
9346 return TARGET_UPDATE;
9347
9348 /* AND is only allowed in Altivec loads. */
9349 case AND:
9350 return true;
9351
9352 default:
9353 break;
9354 }
9355
9356 return false;
9357 }
9358
9359 /* Debug version of rs6000_mode_dependent_address. */
9360 static bool
9361 rs6000_debug_mode_dependent_address (const_rtx addr)
9362 {
9363 bool ret = rs6000_mode_dependent_address (addr);
9364
9365 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9366 ret ? "true" : "false");
9367 debug_rtx (addr);
9368
9369 return ret;
9370 }
9371
9372 /* Implement FIND_BASE_TERM. */
9373
9374 rtx
9375 rs6000_find_base_term (rtx op)
9376 {
9377 rtx base;
9378
9379 base = op;
9380 if (GET_CODE (base) == CONST)
9381 base = XEXP (base, 0);
9382 if (GET_CODE (base) == PLUS)
9383 base = XEXP (base, 0);
9384 if (GET_CODE (base) == UNSPEC)
9385 switch (XINT (base, 1))
9386 {
9387 case UNSPEC_TOCREL:
9388 case UNSPEC_MACHOPIC_OFFSET:
9389 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9390 for aliasing purposes. */
9391 return XVECEXP (base, 0, 0);
9392 }
9393
9394 return op;
9395 }
9396
9397 /* More elaborate version of recog's offsettable_memref_p predicate
9398 that works around the ??? note of rs6000_mode_dependent_address.
9399 In particular it accepts
9400
9401 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9402
9403 in 32-bit mode, that the recog predicate rejects. */
9404
9405 static bool
9406 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9407 {
9408 bool worst_case;
9409
9410 if (!MEM_P (op))
9411 return false;
9412
9413 /* First mimic offsettable_memref_p. */
9414 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9415 return true;
9416
9417 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9418 the latter predicate knows nothing about the mode of the memory
9419 reference and, therefore, assumes that it is the largest supported
9420 mode (TFmode). As a consequence, legitimate offsettable memory
9421 references are rejected. rs6000_legitimate_offset_address_p contains
9422 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9423 at least with a little bit of help here given that we know the
9424 actual registers used. */
9425 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9426 || GET_MODE_SIZE (reg_mode) == 4);
9427 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9428 strict, worst_case);
9429 }
9430
9431 /* Determine the reassociation width to be used in reassociate_bb.
9432 This takes into account how many parallel operations we
9433 can actually do of a given type, and also the latency.
9434 P8:
9435 int add/sub 6/cycle
9436 mul 2/cycle
9437 vect add/sub/mul 2/cycle
9438 fp add/sub/mul 2/cycle
9439 dfp 1/cycle
9440 */
9441
9442 static int
9443 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9444 machine_mode mode)
9445 {
9446 switch (rs6000_tune)
9447 {
9448 case PROCESSOR_POWER8:
9449 case PROCESSOR_POWER9:
9450 if (DECIMAL_FLOAT_MODE_P (mode))
9451 return 1;
9452 if (VECTOR_MODE_P (mode))
9453 return 4;
9454 if (INTEGRAL_MODE_P (mode))
9455 return 1;
9456 if (FLOAT_MODE_P (mode))
9457 return 4;
9458 break;
9459 default:
9460 break;
9461 }
9462 return 1;
9463 }
9464
9465 /* Change register usage conditional on target flags. */
9466 static void
9467 rs6000_conditional_register_usage (void)
9468 {
9469 int i;
9470
9471 if (TARGET_DEBUG_TARGET)
9472 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9473
9474 /* Set MQ register fixed (already call_used) so that it will not be
9475 allocated. */
9476 fixed_regs[64] = 1;
9477
9478 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9479 if (TARGET_64BIT)
9480 fixed_regs[13] = call_used_regs[13]
9481 = call_really_used_regs[13] = 1;
9482
9483 /* Conditionally disable FPRs. */
9484 if (TARGET_SOFT_FLOAT)
9485 for (i = 32; i < 64; i++)
9486 fixed_regs[i] = call_used_regs[i]
9487 = call_really_used_regs[i] = 1;
9488
9489 /* The TOC register is not killed across calls in a way that is
9490 visible to the compiler. */
9491 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9492 call_really_used_regs[2] = 0;
9493
9494 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9495 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9496
9497 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9498 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9499 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9500 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9501
9502 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9503 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9504 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9505 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9506
9507 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9508 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9509 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9510
9511 if (!TARGET_ALTIVEC && !TARGET_VSX)
9512 {
9513 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9514 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9515 call_really_used_regs[VRSAVE_REGNO] = 1;
9516 }
9517
9518 if (TARGET_ALTIVEC || TARGET_VSX)
9519 global_regs[VSCR_REGNO] = 1;
9520
9521 if (TARGET_ALTIVEC_ABI)
9522 {
9523 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9524 call_used_regs[i] = call_really_used_regs[i] = 1;
9525
9526 /* AIX reserves VR20:31 in non-extended ABI mode. */
9527 if (TARGET_XCOFF)
9528 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9529 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9530 }
9531 }
9532
9533 \f
9534 /* Output insns to set DEST equal to the constant SOURCE as a series of
9535 lis, ori and shl instructions and return TRUE. */
9536
9537 bool
9538 rs6000_emit_set_const (rtx dest, rtx source)
9539 {
9540 machine_mode mode = GET_MODE (dest);
9541 rtx temp, set;
9542 rtx_insn *insn;
9543 HOST_WIDE_INT c;
9544
9545 gcc_checking_assert (CONST_INT_P (source));
9546 c = INTVAL (source);
9547 switch (mode)
9548 {
9549 case E_QImode:
9550 case E_HImode:
9551 emit_insn (gen_rtx_SET (dest, source));
9552 return true;
9553
9554 case E_SImode:
9555 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9556
9557 emit_insn (gen_rtx_SET (copy_rtx (temp),
9558 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9559 emit_insn (gen_rtx_SET (dest,
9560 gen_rtx_IOR (SImode, copy_rtx (temp),
9561 GEN_INT (c & 0xffff))));
9562 break;
9563
9564 case E_DImode:
9565 if (!TARGET_POWERPC64)
9566 {
9567 rtx hi, lo;
9568
9569 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9570 DImode);
9571 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9572 DImode);
9573 emit_move_insn (hi, GEN_INT (c >> 32));
9574 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9575 emit_move_insn (lo, GEN_INT (c));
9576 }
9577 else
9578 rs6000_emit_set_long_const (dest, c);
9579 break;
9580
9581 default:
9582 gcc_unreachable ();
9583 }
9584
9585 insn = get_last_insn ();
9586 set = single_set (insn);
9587 if (! CONSTANT_P (SET_SRC (set)))
9588 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9589
9590 return true;
9591 }
9592
9593 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9594 Output insns to set DEST equal to the constant C as a series of
9595 lis, ori and shl instructions. */
9596
9597 static void
9598 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9599 {
9600 rtx temp;
9601 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9602
9603 ud1 = c & 0xffff;
9604 c = c >> 16;
9605 ud2 = c & 0xffff;
9606 c = c >> 16;
9607 ud3 = c & 0xffff;
9608 c = c >> 16;
9609 ud4 = c & 0xffff;
9610
9611 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9612 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9613 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9614
9615 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9616 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9617 {
9618 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9619
9620 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9621 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9622 if (ud1 != 0)
9623 emit_move_insn (dest,
9624 gen_rtx_IOR (DImode, copy_rtx (temp),
9625 GEN_INT (ud1)));
9626 }
9627 else if (ud3 == 0 && ud4 == 0)
9628 {
9629 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9630
9631 gcc_assert (ud2 & 0x8000);
9632 emit_move_insn (copy_rtx (temp),
9633 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9634 if (ud1 != 0)
9635 emit_move_insn (copy_rtx (temp),
9636 gen_rtx_IOR (DImode, copy_rtx (temp),
9637 GEN_INT (ud1)));
9638 emit_move_insn (dest,
9639 gen_rtx_ZERO_EXTEND (DImode,
9640 gen_lowpart (SImode,
9641 copy_rtx (temp))));
9642 }
9643 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9644 || (ud4 == 0 && ! (ud3 & 0x8000)))
9645 {
9646 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9647
9648 emit_move_insn (copy_rtx (temp),
9649 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9650 if (ud2 != 0)
9651 emit_move_insn (copy_rtx (temp),
9652 gen_rtx_IOR (DImode, copy_rtx (temp),
9653 GEN_INT (ud2)));
9654 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9655 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9656 GEN_INT (16)));
9657 if (ud1 != 0)
9658 emit_move_insn (dest,
9659 gen_rtx_IOR (DImode, copy_rtx (temp),
9660 GEN_INT (ud1)));
9661 }
9662 else
9663 {
9664 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9665
9666 emit_move_insn (copy_rtx (temp),
9667 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9668 if (ud3 != 0)
9669 emit_move_insn (copy_rtx (temp),
9670 gen_rtx_IOR (DImode, copy_rtx (temp),
9671 GEN_INT (ud3)));
9672
9673 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9674 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9675 GEN_INT (32)));
9676 if (ud2 != 0)
9677 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9678 gen_rtx_IOR (DImode, copy_rtx (temp),
9679 GEN_INT (ud2 << 16)));
9680 if (ud1 != 0)
9681 emit_move_insn (dest,
9682 gen_rtx_IOR (DImode, copy_rtx (temp),
9683 GEN_INT (ud1)));
9684 }
9685 }
9686
9687 /* Helper for the following. Get rid of [r+r] memory refs
9688 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9689
9690 static void
9691 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9692 {
9693 if (GET_CODE (operands[0]) == MEM
9694 && GET_CODE (XEXP (operands[0], 0)) != REG
9695 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9696 GET_MODE (operands[0]), false))
9697 operands[0]
9698 = replace_equiv_address (operands[0],
9699 copy_addr_to_reg (XEXP (operands[0], 0)));
9700
9701 if (GET_CODE (operands[1]) == MEM
9702 && GET_CODE (XEXP (operands[1], 0)) != REG
9703 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9704 GET_MODE (operands[1]), false))
9705 operands[1]
9706 = replace_equiv_address (operands[1],
9707 copy_addr_to_reg (XEXP (operands[1], 0)));
9708 }
9709
9710 /* Generate a vector of constants to permute MODE for a little-endian
9711 storage operation by swapping the two halves of a vector. */
9712 static rtvec
9713 rs6000_const_vec (machine_mode mode)
9714 {
9715 int i, subparts;
9716 rtvec v;
9717
9718 switch (mode)
9719 {
9720 case E_V1TImode:
9721 subparts = 1;
9722 break;
9723 case E_V2DFmode:
9724 case E_V2DImode:
9725 subparts = 2;
9726 break;
9727 case E_V4SFmode:
9728 case E_V4SImode:
9729 subparts = 4;
9730 break;
9731 case E_V8HImode:
9732 subparts = 8;
9733 break;
9734 case E_V16QImode:
9735 subparts = 16;
9736 break;
9737 default:
9738 gcc_unreachable();
9739 }
9740
9741 v = rtvec_alloc (subparts);
9742
9743 for (i = 0; i < subparts / 2; ++i)
9744 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9745 for (i = subparts / 2; i < subparts; ++i)
9746 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9747
9748 return v;
9749 }
9750
9751 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9752 store operation. */
9753 void
9754 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9755 {
9756 /* Scalar permutations are easier to express in integer modes rather than
9757 floating-point modes, so cast them here. We use V1TImode instead
9758 of TImode to ensure that the values don't go through GPRs. */
9759 if (FLOAT128_VECTOR_P (mode))
9760 {
9761 dest = gen_lowpart (V1TImode, dest);
9762 source = gen_lowpart (V1TImode, source);
9763 mode = V1TImode;
9764 }
9765
9766 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9767 scalar. */
9768 if (mode == TImode || mode == V1TImode)
9769 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9770 GEN_INT (64))));
9771 else
9772 {
9773 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9774 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9775 }
9776 }
9777
9778 /* Emit a little-endian load from vector memory location SOURCE to VSX
9779 register DEST in mode MODE. The load is done with two permuting
9780 insn's that represent an lxvd2x and xxpermdi. */
9781 void
9782 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9783 {
9784 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9785 V1TImode). */
9786 if (mode == TImode || mode == V1TImode)
9787 {
9788 mode = V2DImode;
9789 dest = gen_lowpart (V2DImode, dest);
9790 source = adjust_address (source, V2DImode, 0);
9791 }
9792
9793 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9794 rs6000_emit_le_vsx_permute (tmp, source, mode);
9795 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9796 }
9797
9798 /* Emit a little-endian store to vector memory location DEST from VSX
9799 register SOURCE in mode MODE. The store is done with two permuting
9800 insn's that represent an xxpermdi and an stxvd2x. */
9801 void
9802 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9803 {
9804 /* This should never be called during or after LRA, because it does
9805 not re-permute the source register. It is intended only for use
9806 during expand. */
9807 gcc_assert (!lra_in_progress && !reload_completed);
9808
9809 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9810 V1TImode). */
9811 if (mode == TImode || mode == V1TImode)
9812 {
9813 mode = V2DImode;
9814 dest = adjust_address (dest, V2DImode, 0);
9815 source = gen_lowpart (V2DImode, source);
9816 }
9817
9818 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9819 rs6000_emit_le_vsx_permute (tmp, source, mode);
9820 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9821 }
9822
9823 /* Emit a sequence representing a little-endian VSX load or store,
9824 moving data from SOURCE to DEST in mode MODE. This is done
9825 separately from rs6000_emit_move to ensure it is called only
9826 during expand. LE VSX loads and stores introduced later are
9827 handled with a split. The expand-time RTL generation allows
9828 us to optimize away redundant pairs of register-permutes. */
9829 void
9830 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9831 {
9832 gcc_assert (!BYTES_BIG_ENDIAN
9833 && VECTOR_MEM_VSX_P (mode)
9834 && !TARGET_P9_VECTOR
9835 && !gpr_or_gpr_p (dest, source)
9836 && (MEM_P (source) ^ MEM_P (dest)));
9837
9838 if (MEM_P (source))
9839 {
9840 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9841 rs6000_emit_le_vsx_load (dest, source, mode);
9842 }
9843 else
9844 {
9845 if (!REG_P (source))
9846 source = force_reg (mode, source);
9847 rs6000_emit_le_vsx_store (dest, source, mode);
9848 }
9849 }
9850
9851 /* Return whether a SFmode or SImode move can be done without converting one
9852 mode to another. This arrises when we have:
9853
9854 (SUBREG:SF (REG:SI ...))
9855 (SUBREG:SI (REG:SF ...))
9856
9857 and one of the values is in a floating point/vector register, where SFmode
9858 scalars are stored in DFmode format. */
9859
9860 bool
9861 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9862 {
9863 if (TARGET_ALLOW_SF_SUBREG)
9864 return true;
9865
9866 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9867 return true;
9868
9869 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9870 return true;
9871
9872 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9873 if (SUBREG_P (dest))
9874 {
9875 rtx dest_subreg = SUBREG_REG (dest);
9876 rtx src_subreg = SUBREG_REG (src);
9877 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9878 }
9879
9880 return false;
9881 }
9882
9883
9884 /* Helper function to change moves with:
9885
9886 (SUBREG:SF (REG:SI)) and
9887 (SUBREG:SI (REG:SF))
9888
9889 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9890 values are stored as DFmode values in the VSX registers. We need to convert
9891 the bits before we can use a direct move or operate on the bits in the
9892 vector register as an integer type.
9893
9894 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9895
9896 static bool
9897 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9898 {
9899 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
9900 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9901 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9902 {
9903 rtx inner_source = SUBREG_REG (source);
9904 machine_mode inner_mode = GET_MODE (inner_source);
9905
9906 if (mode == SImode && inner_mode == SFmode)
9907 {
9908 emit_insn (gen_movsi_from_sf (dest, inner_source));
9909 return true;
9910 }
9911
9912 if (mode == SFmode && inner_mode == SImode)
9913 {
9914 emit_insn (gen_movsf_from_si (dest, inner_source));
9915 return true;
9916 }
9917 }
9918
9919 return false;
9920 }
9921
9922 /* Emit a move from SOURCE to DEST in mode MODE. */
9923 void
9924 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9925 {
9926 rtx operands[2];
9927 operands[0] = dest;
9928 operands[1] = source;
9929
9930 if (TARGET_DEBUG_ADDR)
9931 {
9932 fprintf (stderr,
9933 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9934 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9935 GET_MODE_NAME (mode),
9936 lra_in_progress,
9937 reload_completed,
9938 can_create_pseudo_p ());
9939 debug_rtx (dest);
9940 fprintf (stderr, "source:\n");
9941 debug_rtx (source);
9942 }
9943
9944 /* Check that we get CONST_WIDE_INT only when we should. */
9945 if (CONST_WIDE_INT_P (operands[1])
9946 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9947 gcc_unreachable ();
9948
9949 #ifdef HAVE_AS_GNU_ATTRIBUTE
9950 /* If we use a long double type, set the flags in .gnu_attribute that say
9951 what the long double type is. This is to allow the linker's warning
9952 message for the wrong long double to be useful, even if the function does
9953 not do a call (for example, doing a 128-bit add on power9 if the long
9954 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9955 used if they aren't the default long dobule type. */
9956 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9957 {
9958 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9959 rs6000_passes_float = rs6000_passes_long_double = true;
9960
9961 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9962 rs6000_passes_float = rs6000_passes_long_double = true;
9963 }
9964 #endif
9965
9966 /* See if we need to special case SImode/SFmode SUBREG moves. */
9967 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9968 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9969 return;
9970
9971 /* Check if GCC is setting up a block move that will end up using FP
9972 registers as temporaries. We must make sure this is acceptable. */
9973 if (GET_CODE (operands[0]) == MEM
9974 && GET_CODE (operands[1]) == MEM
9975 && mode == DImode
9976 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9977 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9978 && ! (rs6000_slow_unaligned_access (SImode,
9979 (MEM_ALIGN (operands[0]) > 32
9980 ? 32 : MEM_ALIGN (operands[0])))
9981 || rs6000_slow_unaligned_access (SImode,
9982 (MEM_ALIGN (operands[1]) > 32
9983 ? 32 : MEM_ALIGN (operands[1]))))
9984 && ! MEM_VOLATILE_P (operands [0])
9985 && ! MEM_VOLATILE_P (operands [1]))
9986 {
9987 emit_move_insn (adjust_address (operands[0], SImode, 0),
9988 adjust_address (operands[1], SImode, 0));
9989 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9990 adjust_address (copy_rtx (operands[1]), SImode, 4));
9991 return;
9992 }
9993
9994 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9995 && !gpc_reg_operand (operands[1], mode))
9996 operands[1] = force_reg (mode, operands[1]);
9997
9998 /* Recognize the case where operand[1] is a reference to thread-local
9999 data and load its address to a register. */
10000 if (tls_referenced_p (operands[1]))
10001 {
10002 enum tls_model model;
10003 rtx tmp = operands[1];
10004 rtx addend = NULL;
10005
10006 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10007 {
10008 addend = XEXP (XEXP (tmp, 0), 1);
10009 tmp = XEXP (XEXP (tmp, 0), 0);
10010 }
10011
10012 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10013 model = SYMBOL_REF_TLS_MODEL (tmp);
10014 gcc_assert (model != 0);
10015
10016 tmp = rs6000_legitimize_tls_address (tmp, model);
10017 if (addend)
10018 {
10019 tmp = gen_rtx_PLUS (mode, tmp, addend);
10020 tmp = force_operand (tmp, operands[0]);
10021 }
10022 operands[1] = tmp;
10023 }
10024
10025 /* 128-bit constant floating-point values on Darwin should really be loaded
10026 as two parts. However, this premature splitting is a problem when DFmode
10027 values can go into Altivec registers. */
10028 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
10029 && !reg_addr[DFmode].scalar_in_vmx_p)
10030 {
10031 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10032 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10033 DFmode);
10034 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10035 GET_MODE_SIZE (DFmode)),
10036 simplify_gen_subreg (DFmode, operands[1], mode,
10037 GET_MODE_SIZE (DFmode)),
10038 DFmode);
10039 return;
10040 }
10041
10042 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10043 p1:SD) if p1 is not of floating point class and p0 is spilled as
10044 we can have no analogous movsd_store for this. */
10045 if (lra_in_progress && mode == DDmode
10046 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10047 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10048 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10049 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10050 {
10051 enum reg_class cl;
10052 int regno = REGNO (SUBREG_REG (operands[1]));
10053
10054 if (regno >= FIRST_PSEUDO_REGISTER)
10055 {
10056 cl = reg_preferred_class (regno);
10057 regno = reg_renumber[regno];
10058 if (regno < 0)
10059 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10060 }
10061 if (regno >= 0 && ! FP_REGNO_P (regno))
10062 {
10063 mode = SDmode;
10064 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10065 operands[1] = SUBREG_REG (operands[1]);
10066 }
10067 }
10068 if (lra_in_progress
10069 && mode == SDmode
10070 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10071 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10072 && (REG_P (operands[1])
10073 || (GET_CODE (operands[1]) == SUBREG
10074 && REG_P (SUBREG_REG (operands[1])))))
10075 {
10076 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10077 ? SUBREG_REG (operands[1]) : operands[1]);
10078 enum reg_class cl;
10079
10080 if (regno >= FIRST_PSEUDO_REGISTER)
10081 {
10082 cl = reg_preferred_class (regno);
10083 gcc_assert (cl != NO_REGS);
10084 regno = reg_renumber[regno];
10085 if (regno < 0)
10086 regno = ira_class_hard_regs[cl][0];
10087 }
10088 if (FP_REGNO_P (regno))
10089 {
10090 if (GET_MODE (operands[0]) != DDmode)
10091 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10092 emit_insn (gen_movsd_store (operands[0], operands[1]));
10093 }
10094 else if (INT_REGNO_P (regno))
10095 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10096 else
10097 gcc_unreachable();
10098 return;
10099 }
10100 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10101 p:DD)) if p0 is not of floating point class and p1 is spilled as
10102 we can have no analogous movsd_load for this. */
10103 if (lra_in_progress && mode == DDmode
10104 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10105 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10106 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10107 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10108 {
10109 enum reg_class cl;
10110 int regno = REGNO (SUBREG_REG (operands[0]));
10111
10112 if (regno >= FIRST_PSEUDO_REGISTER)
10113 {
10114 cl = reg_preferred_class (regno);
10115 regno = reg_renumber[regno];
10116 if (regno < 0)
10117 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10118 }
10119 if (regno >= 0 && ! FP_REGNO_P (regno))
10120 {
10121 mode = SDmode;
10122 operands[0] = SUBREG_REG (operands[0]);
10123 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10124 }
10125 }
10126 if (lra_in_progress
10127 && mode == SDmode
10128 && (REG_P (operands[0])
10129 || (GET_CODE (operands[0]) == SUBREG
10130 && REG_P (SUBREG_REG (operands[0]))))
10131 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10132 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10133 {
10134 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10135 ? SUBREG_REG (operands[0]) : operands[0]);
10136 enum reg_class cl;
10137
10138 if (regno >= FIRST_PSEUDO_REGISTER)
10139 {
10140 cl = reg_preferred_class (regno);
10141 gcc_assert (cl != NO_REGS);
10142 regno = reg_renumber[regno];
10143 if (regno < 0)
10144 regno = ira_class_hard_regs[cl][0];
10145 }
10146 if (FP_REGNO_P (regno))
10147 {
10148 if (GET_MODE (operands[1]) != DDmode)
10149 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10150 emit_insn (gen_movsd_load (operands[0], operands[1]));
10151 }
10152 else if (INT_REGNO_P (regno))
10153 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10154 else
10155 gcc_unreachable();
10156 return;
10157 }
10158
10159 /* FIXME: In the long term, this switch statement should go away
10160 and be replaced by a sequence of tests based on things like
10161 mode == Pmode. */
10162 switch (mode)
10163 {
10164 case E_HImode:
10165 case E_QImode:
10166 if (CONSTANT_P (operands[1])
10167 && GET_CODE (operands[1]) != CONST_INT)
10168 operands[1] = force_const_mem (mode, operands[1]);
10169 break;
10170
10171 case E_TFmode:
10172 case E_TDmode:
10173 case E_IFmode:
10174 case E_KFmode:
10175 if (FLOAT128_2REG_P (mode))
10176 rs6000_eliminate_indexed_memrefs (operands);
10177 /* fall through */
10178
10179 case E_DFmode:
10180 case E_DDmode:
10181 case E_SFmode:
10182 case E_SDmode:
10183 if (CONSTANT_P (operands[1])
10184 && ! easy_fp_constant (operands[1], mode))
10185 operands[1] = force_const_mem (mode, operands[1]);
10186 break;
10187
10188 case E_V16QImode:
10189 case E_V8HImode:
10190 case E_V4SFmode:
10191 case E_V4SImode:
10192 case E_V2DFmode:
10193 case E_V2DImode:
10194 case E_V1TImode:
10195 if (CONSTANT_P (operands[1])
10196 && !easy_vector_constant (operands[1], mode))
10197 operands[1] = force_const_mem (mode, operands[1]);
10198 break;
10199
10200 case E_SImode:
10201 case E_DImode:
10202 /* Use default pattern for address of ELF small data */
10203 if (TARGET_ELF
10204 && mode == Pmode
10205 && DEFAULT_ABI == ABI_V4
10206 && (GET_CODE (operands[1]) == SYMBOL_REF
10207 || GET_CODE (operands[1]) == CONST)
10208 && small_data_operand (operands[1], mode))
10209 {
10210 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10211 return;
10212 }
10213
10214 if (DEFAULT_ABI == ABI_V4
10215 && mode == Pmode && mode == SImode
10216 && flag_pic == 1 && got_operand (operands[1], mode))
10217 {
10218 emit_insn (gen_movsi_got (operands[0], operands[1]));
10219 return;
10220 }
10221
10222 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10223 && TARGET_NO_TOC
10224 && ! flag_pic
10225 && mode == Pmode
10226 && CONSTANT_P (operands[1])
10227 && GET_CODE (operands[1]) != HIGH
10228 && GET_CODE (operands[1]) != CONST_INT)
10229 {
10230 rtx target = (!can_create_pseudo_p ()
10231 ? operands[0]
10232 : gen_reg_rtx (mode));
10233
10234 /* If this is a function address on -mcall-aixdesc,
10235 convert it to the address of the descriptor. */
10236 if (DEFAULT_ABI == ABI_AIX
10237 && GET_CODE (operands[1]) == SYMBOL_REF
10238 && XSTR (operands[1], 0)[0] == '.')
10239 {
10240 const char *name = XSTR (operands[1], 0);
10241 rtx new_ref;
10242 while (*name == '.')
10243 name++;
10244 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10245 CONSTANT_POOL_ADDRESS_P (new_ref)
10246 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10247 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10248 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10249 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10250 operands[1] = new_ref;
10251 }
10252
10253 if (DEFAULT_ABI == ABI_DARWIN)
10254 {
10255 #if TARGET_MACHO
10256 if (MACHO_DYNAMIC_NO_PIC_P)
10257 {
10258 /* Take care of any required data indirection. */
10259 operands[1] = rs6000_machopic_legitimize_pic_address (
10260 operands[1], mode, operands[0]);
10261 if (operands[0] != operands[1])
10262 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10263 return;
10264 }
10265 #endif
10266 emit_insn (gen_macho_high (target, operands[1]));
10267 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10268 return;
10269 }
10270
10271 emit_insn (gen_elf_high (target, operands[1]));
10272 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10273 return;
10274 }
10275
10276 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10277 and we have put it in the TOC, we just need to make a TOC-relative
10278 reference to it. */
10279 if (TARGET_TOC
10280 && GET_CODE (operands[1]) == SYMBOL_REF
10281 && use_toc_relative_ref (operands[1], mode))
10282 operands[1] = create_TOC_reference (operands[1], operands[0]);
10283 else if (mode == Pmode
10284 && CONSTANT_P (operands[1])
10285 && GET_CODE (operands[1]) != HIGH
10286 && ((REG_P (operands[0])
10287 && FP_REGNO_P (REGNO (operands[0])))
10288 || !CONST_INT_P (operands[1])
10289 || (num_insns_constant (operands[1], mode)
10290 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10291 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10292 && (TARGET_CMODEL == CMODEL_SMALL
10293 || can_create_pseudo_p ()
10294 || (REG_P (operands[0])
10295 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10296 {
10297
10298 #if TARGET_MACHO
10299 /* Darwin uses a special PIC legitimizer. */
10300 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10301 {
10302 operands[1] =
10303 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10304 operands[0]);
10305 if (operands[0] != operands[1])
10306 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10307 return;
10308 }
10309 #endif
10310
10311 /* If we are to limit the number of things we put in the TOC and
10312 this is a symbol plus a constant we can add in one insn,
10313 just put the symbol in the TOC and add the constant. */
10314 if (GET_CODE (operands[1]) == CONST
10315 && TARGET_NO_SUM_IN_TOC
10316 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10317 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10318 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10319 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10320 && ! side_effects_p (operands[0]))
10321 {
10322 rtx sym =
10323 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10324 rtx other = XEXP (XEXP (operands[1], 0), 1);
10325
10326 sym = force_reg (mode, sym);
10327 emit_insn (gen_add3_insn (operands[0], sym, other));
10328 return;
10329 }
10330
10331 operands[1] = force_const_mem (mode, operands[1]);
10332
10333 if (TARGET_TOC
10334 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10335 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10336 {
10337 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10338 operands[0]);
10339 operands[1] = gen_const_mem (mode, tocref);
10340 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10341 }
10342 }
10343 break;
10344
10345 case E_TImode:
10346 if (!VECTOR_MEM_VSX_P (TImode))
10347 rs6000_eliminate_indexed_memrefs (operands);
10348 break;
10349
10350 case E_PTImode:
10351 rs6000_eliminate_indexed_memrefs (operands);
10352 break;
10353
10354 default:
10355 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10356 }
10357
10358 /* Above, we may have called force_const_mem which may have returned
10359 an invalid address. If we can, fix this up; otherwise, reload will
10360 have to deal with it. */
10361 if (GET_CODE (operands[1]) == MEM)
10362 operands[1] = validize_mem (operands[1]);
10363
10364 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10365 }
10366 \f
10367 /* Nonzero if we can use a floating-point register to pass this arg. */
10368 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10369 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10370 && (CUM)->fregno <= FP_ARG_MAX_REG \
10371 && TARGET_HARD_FLOAT)
10372
10373 /* Nonzero if we can use an AltiVec register to pass this arg. */
10374 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10375 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10376 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10377 && TARGET_ALTIVEC_ABI \
10378 && (NAMED))
10379
10380 /* Walk down the type tree of TYPE counting consecutive base elements.
10381 If *MODEP is VOIDmode, then set it to the first valid floating point
10382 or vector type. If a non-floating point or vector type is found, or
10383 if a floating point or vector type that doesn't match a non-VOIDmode
10384 *MODEP is found, then return -1, otherwise return the count in the
10385 sub-tree. */
10386
10387 static int
10388 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10389 {
10390 machine_mode mode;
10391 HOST_WIDE_INT size;
10392
10393 switch (TREE_CODE (type))
10394 {
10395 case REAL_TYPE:
10396 mode = TYPE_MODE (type);
10397 if (!SCALAR_FLOAT_MODE_P (mode))
10398 return -1;
10399
10400 if (*modep == VOIDmode)
10401 *modep = mode;
10402
10403 if (*modep == mode)
10404 return 1;
10405
10406 break;
10407
10408 case COMPLEX_TYPE:
10409 mode = TYPE_MODE (TREE_TYPE (type));
10410 if (!SCALAR_FLOAT_MODE_P (mode))
10411 return -1;
10412
10413 if (*modep == VOIDmode)
10414 *modep = mode;
10415
10416 if (*modep == mode)
10417 return 2;
10418
10419 break;
10420
10421 case VECTOR_TYPE:
10422 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10423 return -1;
10424
10425 /* Use V4SImode as representative of all 128-bit vector types. */
10426 size = int_size_in_bytes (type);
10427 switch (size)
10428 {
10429 case 16:
10430 mode = V4SImode;
10431 break;
10432 default:
10433 return -1;
10434 }
10435
10436 if (*modep == VOIDmode)
10437 *modep = mode;
10438
10439 /* Vector modes are considered to be opaque: two vectors are
10440 equivalent for the purposes of being homogeneous aggregates
10441 if they are the same size. */
10442 if (*modep == mode)
10443 return 1;
10444
10445 break;
10446
10447 case ARRAY_TYPE:
10448 {
10449 int count;
10450 tree index = TYPE_DOMAIN (type);
10451
10452 /* Can't handle incomplete types nor sizes that are not
10453 fixed. */
10454 if (!COMPLETE_TYPE_P (type)
10455 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10456 return -1;
10457
10458 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10459 if (count == -1
10460 || !index
10461 || !TYPE_MAX_VALUE (index)
10462 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10463 || !TYPE_MIN_VALUE (index)
10464 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10465 || count < 0)
10466 return -1;
10467
10468 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10469 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10470
10471 /* There must be no padding. */
10472 if (wi::to_wide (TYPE_SIZE (type))
10473 != count * GET_MODE_BITSIZE (*modep))
10474 return -1;
10475
10476 return count;
10477 }
10478
10479 case RECORD_TYPE:
10480 {
10481 int count = 0;
10482 int sub_count;
10483 tree field;
10484
10485 /* Can't handle incomplete types nor sizes that are not
10486 fixed. */
10487 if (!COMPLETE_TYPE_P (type)
10488 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10489 return -1;
10490
10491 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10492 {
10493 if (TREE_CODE (field) != FIELD_DECL)
10494 continue;
10495
10496 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10497 if (sub_count < 0)
10498 return -1;
10499 count += sub_count;
10500 }
10501
10502 /* There must be no padding. */
10503 if (wi::to_wide (TYPE_SIZE (type))
10504 != count * GET_MODE_BITSIZE (*modep))
10505 return -1;
10506
10507 return count;
10508 }
10509
10510 case UNION_TYPE:
10511 case QUAL_UNION_TYPE:
10512 {
10513 /* These aren't very interesting except in a degenerate case. */
10514 int count = 0;
10515 int sub_count;
10516 tree field;
10517
10518 /* Can't handle incomplete types nor sizes that are not
10519 fixed. */
10520 if (!COMPLETE_TYPE_P (type)
10521 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10522 return -1;
10523
10524 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10525 {
10526 if (TREE_CODE (field) != FIELD_DECL)
10527 continue;
10528
10529 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10530 if (sub_count < 0)
10531 return -1;
10532 count = count > sub_count ? count : sub_count;
10533 }
10534
10535 /* There must be no padding. */
10536 if (wi::to_wide (TYPE_SIZE (type))
10537 != count * GET_MODE_BITSIZE (*modep))
10538 return -1;
10539
10540 return count;
10541 }
10542
10543 default:
10544 break;
10545 }
10546
10547 return -1;
10548 }
10549
10550 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10551 float or vector aggregate that shall be passed in FP/vector registers
10552 according to the ELFv2 ABI, return the homogeneous element mode in
10553 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10554
10555 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10556
10557 static bool
10558 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10559 machine_mode *elt_mode,
10560 int *n_elts)
10561 {
10562 /* Note that we do not accept complex types at the top level as
10563 homogeneous aggregates; these types are handled via the
10564 targetm.calls.split_complex_arg mechanism. Complex types
10565 can be elements of homogeneous aggregates, however. */
10566 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10567 && AGGREGATE_TYPE_P (type))
10568 {
10569 machine_mode field_mode = VOIDmode;
10570 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10571
10572 if (field_count > 0)
10573 {
10574 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10575 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10576
10577 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10578 up to AGGR_ARG_NUM_REG registers. */
10579 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10580 {
10581 if (elt_mode)
10582 *elt_mode = field_mode;
10583 if (n_elts)
10584 *n_elts = field_count;
10585 return true;
10586 }
10587 }
10588 }
10589
10590 if (elt_mode)
10591 *elt_mode = mode;
10592 if (n_elts)
10593 *n_elts = 1;
10594 return false;
10595 }
10596
10597 /* Return a nonzero value to say to return the function value in
10598 memory, just as large structures are always returned. TYPE will be
10599 the data type of the value, and FNTYPE will be the type of the
10600 function doing the returning, or @code{NULL} for libcalls.
10601
10602 The AIX ABI for the RS/6000 specifies that all structures are
10603 returned in memory. The Darwin ABI does the same.
10604
10605 For the Darwin 64 Bit ABI, a function result can be returned in
10606 registers or in memory, depending on the size of the return data
10607 type. If it is returned in registers, the value occupies the same
10608 registers as it would if it were the first and only function
10609 argument. Otherwise, the function places its result in memory at
10610 the location pointed to by GPR3.
10611
10612 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10613 but a draft put them in memory, and GCC used to implement the draft
10614 instead of the final standard. Therefore, aix_struct_return
10615 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10616 compatibility can change DRAFT_V4_STRUCT_RET to override the
10617 default, and -m switches get the final word. See
10618 rs6000_option_override_internal for more details.
10619
10620 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10621 long double support is enabled. These values are returned in memory.
10622
10623 int_size_in_bytes returns -1 for variable size objects, which go in
10624 memory always. The cast to unsigned makes -1 > 8. */
10625
10626 static bool
10627 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10628 {
10629 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10630 if (TARGET_MACHO
10631 && rs6000_darwin64_abi
10632 && TREE_CODE (type) == RECORD_TYPE
10633 && int_size_in_bytes (type) > 0)
10634 {
10635 CUMULATIVE_ARGS valcum;
10636 rtx valret;
10637
10638 valcum.words = 0;
10639 valcum.fregno = FP_ARG_MIN_REG;
10640 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10641 /* Do a trial code generation as if this were going to be passed
10642 as an argument; if any part goes in memory, we return NULL. */
10643 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10644 if (valret)
10645 return false;
10646 /* Otherwise fall through to more conventional ABI rules. */
10647 }
10648
10649 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10650 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10651 NULL, NULL))
10652 return false;
10653
10654 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10655 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10656 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10657 return false;
10658
10659 if (AGGREGATE_TYPE_P (type)
10660 && (aix_struct_return
10661 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10662 return true;
10663
10664 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10665 modes only exist for GCC vector types if -maltivec. */
10666 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10667 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10668 return false;
10669
10670 /* Return synthetic vectors in memory. */
10671 if (TREE_CODE (type) == VECTOR_TYPE
10672 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10673 {
10674 static bool warned_for_return_big_vectors = false;
10675 if (!warned_for_return_big_vectors)
10676 {
10677 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10678 "non-standard ABI extension with no compatibility "
10679 "guarantee");
10680 warned_for_return_big_vectors = true;
10681 }
10682 return true;
10683 }
10684
10685 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10686 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10687 return true;
10688
10689 return false;
10690 }
10691
10692 /* Specify whether values returned in registers should be at the most
10693 significant end of a register. We want aggregates returned by
10694 value to match the way aggregates are passed to functions. */
10695
10696 static bool
10697 rs6000_return_in_msb (const_tree valtype)
10698 {
10699 return (DEFAULT_ABI == ABI_ELFv2
10700 && BYTES_BIG_ENDIAN
10701 && AGGREGATE_TYPE_P (valtype)
10702 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10703 == PAD_UPWARD));
10704 }
10705
10706 #ifdef HAVE_AS_GNU_ATTRIBUTE
10707 /* Return TRUE if a call to function FNDECL may be one that
10708 potentially affects the function calling ABI of the object file. */
10709
10710 static bool
10711 call_ABI_of_interest (tree fndecl)
10712 {
10713 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10714 {
10715 struct cgraph_node *c_node;
10716
10717 /* Libcalls are always interesting. */
10718 if (fndecl == NULL_TREE)
10719 return true;
10720
10721 /* Any call to an external function is interesting. */
10722 if (DECL_EXTERNAL (fndecl))
10723 return true;
10724
10725 /* Interesting functions that we are emitting in this object file. */
10726 c_node = cgraph_node::get (fndecl);
10727 c_node = c_node->ultimate_alias_target ();
10728 return !c_node->only_called_directly_p ();
10729 }
10730 return false;
10731 }
10732 #endif
10733
10734 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10735 for a call to a function whose data type is FNTYPE.
10736 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10737
10738 For incoming args we set the number of arguments in the prototype large
10739 so we never return a PARALLEL. */
10740
10741 void
10742 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10743 rtx libname ATTRIBUTE_UNUSED, int incoming,
10744 int libcall, int n_named_args,
10745 tree fndecl,
10746 machine_mode return_mode ATTRIBUTE_UNUSED)
10747 {
10748 static CUMULATIVE_ARGS zero_cumulative;
10749
10750 *cum = zero_cumulative;
10751 cum->words = 0;
10752 cum->fregno = FP_ARG_MIN_REG;
10753 cum->vregno = ALTIVEC_ARG_MIN_REG;
10754 cum->prototype = (fntype && prototype_p (fntype));
10755 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10756 ? CALL_LIBCALL : CALL_NORMAL);
10757 cum->sysv_gregno = GP_ARG_MIN_REG;
10758 cum->stdarg = stdarg_p (fntype);
10759 cum->libcall = libcall;
10760
10761 cum->nargs_prototype = 0;
10762 if (incoming || cum->prototype)
10763 cum->nargs_prototype = n_named_args;
10764
10765 /* Check for a longcall attribute. */
10766 if ((!fntype && rs6000_default_long_calls)
10767 || (fntype
10768 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10769 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10770 cum->call_cookie |= CALL_LONG;
10771 else if (DEFAULT_ABI != ABI_DARWIN)
10772 {
10773 bool is_local = (fndecl
10774 && !DECL_EXTERNAL (fndecl)
10775 && !DECL_WEAK (fndecl)
10776 && (*targetm.binds_local_p) (fndecl));
10777 if (is_local)
10778 ;
10779 else if (flag_plt)
10780 {
10781 if (fntype
10782 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10783 cum->call_cookie |= CALL_LONG;
10784 }
10785 else
10786 {
10787 if (!(fntype
10788 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10789 cum->call_cookie |= CALL_LONG;
10790 }
10791 }
10792
10793 if (TARGET_DEBUG_ARG)
10794 {
10795 fprintf (stderr, "\ninit_cumulative_args:");
10796 if (fntype)
10797 {
10798 tree ret_type = TREE_TYPE (fntype);
10799 fprintf (stderr, " ret code = %s,",
10800 get_tree_code_name (TREE_CODE (ret_type)));
10801 }
10802
10803 if (cum->call_cookie & CALL_LONG)
10804 fprintf (stderr, " longcall,");
10805
10806 fprintf (stderr, " proto = %d, nargs = %d\n",
10807 cum->prototype, cum->nargs_prototype);
10808 }
10809
10810 #ifdef HAVE_AS_GNU_ATTRIBUTE
10811 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10812 {
10813 cum->escapes = call_ABI_of_interest (fndecl);
10814 if (cum->escapes)
10815 {
10816 tree return_type;
10817
10818 if (fntype)
10819 {
10820 return_type = TREE_TYPE (fntype);
10821 return_mode = TYPE_MODE (return_type);
10822 }
10823 else
10824 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10825
10826 if (return_type != NULL)
10827 {
10828 if (TREE_CODE (return_type) == RECORD_TYPE
10829 && TYPE_TRANSPARENT_AGGR (return_type))
10830 {
10831 return_type = TREE_TYPE (first_field (return_type));
10832 return_mode = TYPE_MODE (return_type);
10833 }
10834 if (AGGREGATE_TYPE_P (return_type)
10835 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10836 <= 8))
10837 rs6000_returns_struct = true;
10838 }
10839 if (SCALAR_FLOAT_MODE_P (return_mode))
10840 {
10841 rs6000_passes_float = true;
10842 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10843 && (FLOAT128_IBM_P (return_mode)
10844 || FLOAT128_IEEE_P (return_mode)
10845 || (return_type != NULL
10846 && (TYPE_MAIN_VARIANT (return_type)
10847 == long_double_type_node))))
10848 rs6000_passes_long_double = true;
10849
10850 /* Note if we passed or return a IEEE 128-bit type. We changed
10851 the mangling for these types, and we may need to make an alias
10852 with the old mangling. */
10853 if (FLOAT128_IEEE_P (return_mode))
10854 rs6000_passes_ieee128 = true;
10855 }
10856 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10857 rs6000_passes_vector = true;
10858 }
10859 }
10860 #endif
10861
10862 if (fntype
10863 && !TARGET_ALTIVEC
10864 && TARGET_ALTIVEC_ABI
10865 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10866 {
10867 error ("cannot return value in vector register because"
10868 " altivec instructions are disabled, use %qs"
10869 " to enable them", "-maltivec");
10870 }
10871 }
10872 \f
10873 /* The mode the ABI uses for a word. This is not the same as word_mode
10874 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10875
10876 static scalar_int_mode
10877 rs6000_abi_word_mode (void)
10878 {
10879 return TARGET_32BIT ? SImode : DImode;
10880 }
10881
10882 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10883 static char *
10884 rs6000_offload_options (void)
10885 {
10886 if (TARGET_64BIT)
10887 return xstrdup ("-foffload-abi=lp64");
10888 else
10889 return xstrdup ("-foffload-abi=ilp32");
10890 }
10891
10892 /* On rs6000, function arguments are promoted, as are function return
10893 values. */
10894
10895 static machine_mode
10896 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10897 machine_mode mode,
10898 int *punsignedp ATTRIBUTE_UNUSED,
10899 const_tree, int)
10900 {
10901 PROMOTE_MODE (mode, *punsignedp, type);
10902
10903 return mode;
10904 }
10905
10906 /* Return true if TYPE must be passed on the stack and not in registers. */
10907
10908 static bool
10909 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10910 {
10911 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10912 return must_pass_in_stack_var_size (mode, type);
10913 else
10914 return must_pass_in_stack_var_size_or_pad (mode, type);
10915 }
10916
10917 static inline bool
10918 is_complex_IBM_long_double (machine_mode mode)
10919 {
10920 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10921 }
10922
10923 /* Whether ABI_V4 passes MODE args to a function in floating point
10924 registers. */
10925
10926 static bool
10927 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10928 {
10929 if (!TARGET_HARD_FLOAT)
10930 return false;
10931 if (mode == DFmode)
10932 return true;
10933 if (mode == SFmode && named)
10934 return true;
10935 /* ABI_V4 passes complex IBM long double in 8 gprs.
10936 Stupid, but we can't change the ABI now. */
10937 if (is_complex_IBM_long_double (mode))
10938 return false;
10939 if (FLOAT128_2REG_P (mode))
10940 return true;
10941 if (DECIMAL_FLOAT_MODE_P (mode))
10942 return true;
10943 return false;
10944 }
10945
10946 /* Implement TARGET_FUNCTION_ARG_PADDING.
10947
10948 For the AIX ABI structs are always stored left shifted in their
10949 argument slot. */
10950
10951 static pad_direction
10952 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10953 {
10954 #ifndef AGGREGATE_PADDING_FIXED
10955 #define AGGREGATE_PADDING_FIXED 0
10956 #endif
10957 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10958 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10959 #endif
10960
10961 if (!AGGREGATE_PADDING_FIXED)
10962 {
10963 /* GCC used to pass structures of the same size as integer types as
10964 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10965 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10966 passed padded downward, except that -mstrict-align further
10967 muddied the water in that multi-component structures of 2 and 4
10968 bytes in size were passed padded upward.
10969
10970 The following arranges for best compatibility with previous
10971 versions of gcc, but removes the -mstrict-align dependency. */
10972 if (BYTES_BIG_ENDIAN)
10973 {
10974 HOST_WIDE_INT size = 0;
10975
10976 if (mode == BLKmode)
10977 {
10978 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10979 size = int_size_in_bytes (type);
10980 }
10981 else
10982 size = GET_MODE_SIZE (mode);
10983
10984 if (size == 1 || size == 2 || size == 4)
10985 return PAD_DOWNWARD;
10986 }
10987 return PAD_UPWARD;
10988 }
10989
10990 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10991 {
10992 if (type != 0 && AGGREGATE_TYPE_P (type))
10993 return PAD_UPWARD;
10994 }
10995
10996 /* Fall back to the default. */
10997 return default_function_arg_padding (mode, type);
10998 }
10999
11000 /* If defined, a C expression that gives the alignment boundary, in bits,
11001 of an argument with the specified mode and type. If it is not defined,
11002 PARM_BOUNDARY is used for all arguments.
11003
11004 V.4 wants long longs and doubles to be double word aligned. Just
11005 testing the mode size is a boneheaded way to do this as it means
11006 that other types such as complex int are also double word aligned.
11007 However, we're stuck with this because changing the ABI might break
11008 existing library interfaces.
11009
11010 Quadword align Altivec/VSX vectors.
11011 Quadword align large synthetic vector types. */
11012
11013 static unsigned int
11014 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11015 {
11016 machine_mode elt_mode;
11017 int n_elts;
11018
11019 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11020
11021 if (DEFAULT_ABI == ABI_V4
11022 && (GET_MODE_SIZE (mode) == 8
11023 || (TARGET_HARD_FLOAT
11024 && !is_complex_IBM_long_double (mode)
11025 && FLOAT128_2REG_P (mode))))
11026 return 64;
11027 else if (FLOAT128_VECTOR_P (mode))
11028 return 128;
11029 else if (type && TREE_CODE (type) == VECTOR_TYPE
11030 && int_size_in_bytes (type) >= 8
11031 && int_size_in_bytes (type) < 16)
11032 return 64;
11033 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11034 || (type && TREE_CODE (type) == VECTOR_TYPE
11035 && int_size_in_bytes (type) >= 16))
11036 return 128;
11037
11038 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11039 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11040 -mcompat-align-parm is used. */
11041 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11042 || DEFAULT_ABI == ABI_ELFv2)
11043 && type && TYPE_ALIGN (type) > 64)
11044 {
11045 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11046 or homogeneous float/vector aggregates here. We already handled
11047 vector aggregates above, but still need to check for float here. */
11048 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11049 && !SCALAR_FLOAT_MODE_P (elt_mode));
11050
11051 /* We used to check for BLKmode instead of the above aggregate type
11052 check. Warn when this results in any difference to the ABI. */
11053 if (aggregate_p != (mode == BLKmode))
11054 {
11055 static bool warned;
11056 if (!warned && warn_psabi)
11057 {
11058 warned = true;
11059 inform (input_location,
11060 "the ABI of passing aggregates with %d-byte alignment"
11061 " has changed in GCC 5",
11062 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11063 }
11064 }
11065
11066 if (aggregate_p)
11067 return 128;
11068 }
11069
11070 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11071 implement the "aggregate type" check as a BLKmode check here; this
11072 means certain aggregate types are in fact not aligned. */
11073 if (TARGET_MACHO && rs6000_darwin64_abi
11074 && mode == BLKmode
11075 && type && TYPE_ALIGN (type) > 64)
11076 return 128;
11077
11078 return PARM_BOUNDARY;
11079 }
11080
11081 /* The offset in words to the start of the parameter save area. */
11082
11083 static unsigned int
11084 rs6000_parm_offset (void)
11085 {
11086 return (DEFAULT_ABI == ABI_V4 ? 2
11087 : DEFAULT_ABI == ABI_ELFv2 ? 4
11088 : 6);
11089 }
11090
11091 /* For a function parm of MODE and TYPE, return the starting word in
11092 the parameter area. NWORDS of the parameter area are already used. */
11093
11094 static unsigned int
11095 rs6000_parm_start (machine_mode mode, const_tree type,
11096 unsigned int nwords)
11097 {
11098 unsigned int align;
11099
11100 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11101 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11102 }
11103
11104 /* Compute the size (in words) of a function argument. */
11105
11106 static unsigned long
11107 rs6000_arg_size (machine_mode mode, const_tree type)
11108 {
11109 unsigned long size;
11110
11111 if (mode != BLKmode)
11112 size = GET_MODE_SIZE (mode);
11113 else
11114 size = int_size_in_bytes (type);
11115
11116 if (TARGET_32BIT)
11117 return (size + 3) >> 2;
11118 else
11119 return (size + 7) >> 3;
11120 }
11121 \f
11122 /* Use this to flush pending int fields. */
11123
11124 static void
11125 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11126 HOST_WIDE_INT bitpos, int final)
11127 {
11128 unsigned int startbit, endbit;
11129 int intregs, intoffset;
11130
11131 /* Handle the situations where a float is taking up the first half
11132 of the GPR, and the other half is empty (typically due to
11133 alignment restrictions). We can detect this by a 8-byte-aligned
11134 int field, or by seeing that this is the final flush for this
11135 argument. Count the word and continue on. */
11136 if (cum->floats_in_gpr == 1
11137 && (cum->intoffset % 64 == 0
11138 || (cum->intoffset == -1 && final)))
11139 {
11140 cum->words++;
11141 cum->floats_in_gpr = 0;
11142 }
11143
11144 if (cum->intoffset == -1)
11145 return;
11146
11147 intoffset = cum->intoffset;
11148 cum->intoffset = -1;
11149 cum->floats_in_gpr = 0;
11150
11151 if (intoffset % BITS_PER_WORD != 0)
11152 {
11153 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11154 if (!int_mode_for_size (bits, 0).exists ())
11155 {
11156 /* We couldn't find an appropriate mode, which happens,
11157 e.g., in packed structs when there are 3 bytes to load.
11158 Back intoffset back to the beginning of the word in this
11159 case. */
11160 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11161 }
11162 }
11163
11164 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11165 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11166 intregs = (endbit - startbit) / BITS_PER_WORD;
11167 cum->words += intregs;
11168 /* words should be unsigned. */
11169 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11170 {
11171 int pad = (endbit/BITS_PER_WORD) - cum->words;
11172 cum->words += pad;
11173 }
11174 }
11175
11176 /* The darwin64 ABI calls for us to recurse down through structs,
11177 looking for elements passed in registers. Unfortunately, we have
11178 to track int register count here also because of misalignments
11179 in powerpc alignment mode. */
11180
11181 static void
11182 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11183 const_tree type,
11184 HOST_WIDE_INT startbitpos)
11185 {
11186 tree f;
11187
11188 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11189 if (TREE_CODE (f) == FIELD_DECL)
11190 {
11191 HOST_WIDE_INT bitpos = startbitpos;
11192 tree ftype = TREE_TYPE (f);
11193 machine_mode mode;
11194 if (ftype == error_mark_node)
11195 continue;
11196 mode = TYPE_MODE (ftype);
11197
11198 if (DECL_SIZE (f) != 0
11199 && tree_fits_uhwi_p (bit_position (f)))
11200 bitpos += int_bit_position (f);
11201
11202 /* ??? FIXME: else assume zero offset. */
11203
11204 if (TREE_CODE (ftype) == RECORD_TYPE)
11205 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11206 else if (USE_FP_FOR_ARG_P (cum, mode))
11207 {
11208 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11209 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11210 cum->fregno += n_fpregs;
11211 /* Single-precision floats present a special problem for
11212 us, because they are smaller than an 8-byte GPR, and so
11213 the structure-packing rules combined with the standard
11214 varargs behavior mean that we want to pack float/float
11215 and float/int combinations into a single register's
11216 space. This is complicated by the arg advance flushing,
11217 which works on arbitrarily large groups of int-type
11218 fields. */
11219 if (mode == SFmode)
11220 {
11221 if (cum->floats_in_gpr == 1)
11222 {
11223 /* Two floats in a word; count the word and reset
11224 the float count. */
11225 cum->words++;
11226 cum->floats_in_gpr = 0;
11227 }
11228 else if (bitpos % 64 == 0)
11229 {
11230 /* A float at the beginning of an 8-byte word;
11231 count it and put off adjusting cum->words until
11232 we see if a arg advance flush is going to do it
11233 for us. */
11234 cum->floats_in_gpr++;
11235 }
11236 else
11237 {
11238 /* The float is at the end of a word, preceded
11239 by integer fields, so the arg advance flush
11240 just above has already set cum->words and
11241 everything is taken care of. */
11242 }
11243 }
11244 else
11245 cum->words += n_fpregs;
11246 }
11247 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11248 {
11249 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11250 cum->vregno++;
11251 cum->words += 2;
11252 }
11253 else if (cum->intoffset == -1)
11254 cum->intoffset = bitpos;
11255 }
11256 }
11257
11258 /* Check for an item that needs to be considered specially under the darwin 64
11259 bit ABI. These are record types where the mode is BLK or the structure is
11260 8 bytes in size. */
11261 static int
11262 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11263 {
11264 return rs6000_darwin64_abi
11265 && ((mode == BLKmode
11266 && TREE_CODE (type) == RECORD_TYPE
11267 && int_size_in_bytes (type) > 0)
11268 || (type && TREE_CODE (type) == RECORD_TYPE
11269 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11270 }
11271
11272 /* Update the data in CUM to advance over an argument
11273 of mode MODE and data type TYPE.
11274 (TYPE is null for libcalls where that information may not be available.)
11275
11276 Note that for args passed by reference, function_arg will be called
11277 with MODE and TYPE set to that of the pointer to the arg, not the arg
11278 itself. */
11279
11280 static void
11281 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11282 const_tree type, bool named, int depth)
11283 {
11284 machine_mode elt_mode;
11285 int n_elts;
11286
11287 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11288
11289 /* Only tick off an argument if we're not recursing. */
11290 if (depth == 0)
11291 cum->nargs_prototype--;
11292
11293 #ifdef HAVE_AS_GNU_ATTRIBUTE
11294 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11295 && cum->escapes)
11296 {
11297 if (SCALAR_FLOAT_MODE_P (mode))
11298 {
11299 rs6000_passes_float = true;
11300 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11301 && (FLOAT128_IBM_P (mode)
11302 || FLOAT128_IEEE_P (mode)
11303 || (type != NULL
11304 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11305 rs6000_passes_long_double = true;
11306
11307 /* Note if we passed or return a IEEE 128-bit type. We changed the
11308 mangling for these types, and we may need to make an alias with
11309 the old mangling. */
11310 if (FLOAT128_IEEE_P (mode))
11311 rs6000_passes_ieee128 = true;
11312 }
11313 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11314 rs6000_passes_vector = true;
11315 }
11316 #endif
11317
11318 if (TARGET_ALTIVEC_ABI
11319 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11320 || (type && TREE_CODE (type) == VECTOR_TYPE
11321 && int_size_in_bytes (type) == 16)))
11322 {
11323 bool stack = false;
11324
11325 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11326 {
11327 cum->vregno += n_elts;
11328
11329 if (!TARGET_ALTIVEC)
11330 error ("cannot pass argument in vector register because"
11331 " altivec instructions are disabled, use %qs"
11332 " to enable them", "-maltivec");
11333
11334 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11335 even if it is going to be passed in a vector register.
11336 Darwin does the same for variable-argument functions. */
11337 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11338 && TARGET_64BIT)
11339 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11340 stack = true;
11341 }
11342 else
11343 stack = true;
11344
11345 if (stack)
11346 {
11347 int align;
11348
11349 /* Vector parameters must be 16-byte aligned. In 32-bit
11350 mode this means we need to take into account the offset
11351 to the parameter save area. In 64-bit mode, they just
11352 have to start on an even word, since the parameter save
11353 area is 16-byte aligned. */
11354 if (TARGET_32BIT)
11355 align = -(rs6000_parm_offset () + cum->words) & 3;
11356 else
11357 align = cum->words & 1;
11358 cum->words += align + rs6000_arg_size (mode, type);
11359
11360 if (TARGET_DEBUG_ARG)
11361 {
11362 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11363 cum->words, align);
11364 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11365 cum->nargs_prototype, cum->prototype,
11366 GET_MODE_NAME (mode));
11367 }
11368 }
11369 }
11370 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11371 {
11372 int size = int_size_in_bytes (type);
11373 /* Variable sized types have size == -1 and are
11374 treated as if consisting entirely of ints.
11375 Pad to 16 byte boundary if needed. */
11376 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11377 && (cum->words % 2) != 0)
11378 cum->words++;
11379 /* For varargs, we can just go up by the size of the struct. */
11380 if (!named)
11381 cum->words += (size + 7) / 8;
11382 else
11383 {
11384 /* It is tempting to say int register count just goes up by
11385 sizeof(type)/8, but this is wrong in a case such as
11386 { int; double; int; } [powerpc alignment]. We have to
11387 grovel through the fields for these too. */
11388 cum->intoffset = 0;
11389 cum->floats_in_gpr = 0;
11390 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11391 rs6000_darwin64_record_arg_advance_flush (cum,
11392 size * BITS_PER_UNIT, 1);
11393 }
11394 if (TARGET_DEBUG_ARG)
11395 {
11396 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11397 cum->words, TYPE_ALIGN (type), size);
11398 fprintf (stderr,
11399 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11400 cum->nargs_prototype, cum->prototype,
11401 GET_MODE_NAME (mode));
11402 }
11403 }
11404 else if (DEFAULT_ABI == ABI_V4)
11405 {
11406 if (abi_v4_pass_in_fpr (mode, named))
11407 {
11408 /* _Decimal128 must use an even/odd register pair. This assumes
11409 that the register number is odd when fregno is odd. */
11410 if (mode == TDmode && (cum->fregno % 2) == 1)
11411 cum->fregno++;
11412
11413 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11414 <= FP_ARG_V4_MAX_REG)
11415 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11416 else
11417 {
11418 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11419 if (mode == DFmode || FLOAT128_IBM_P (mode)
11420 || mode == DDmode || mode == TDmode)
11421 cum->words += cum->words & 1;
11422 cum->words += rs6000_arg_size (mode, type);
11423 }
11424 }
11425 else
11426 {
11427 int n_words = rs6000_arg_size (mode, type);
11428 int gregno = cum->sysv_gregno;
11429
11430 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11431 As does any other 2 word item such as complex int due to a
11432 historical mistake. */
11433 if (n_words == 2)
11434 gregno += (1 - gregno) & 1;
11435
11436 /* Multi-reg args are not split between registers and stack. */
11437 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11438 {
11439 /* Long long is aligned on the stack. So are other 2 word
11440 items such as complex int due to a historical mistake. */
11441 if (n_words == 2)
11442 cum->words += cum->words & 1;
11443 cum->words += n_words;
11444 }
11445
11446 /* Note: continuing to accumulate gregno past when we've started
11447 spilling to the stack indicates the fact that we've started
11448 spilling to the stack to expand_builtin_saveregs. */
11449 cum->sysv_gregno = gregno + n_words;
11450 }
11451
11452 if (TARGET_DEBUG_ARG)
11453 {
11454 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11455 cum->words, cum->fregno);
11456 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11457 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11458 fprintf (stderr, "mode = %4s, named = %d\n",
11459 GET_MODE_NAME (mode), named);
11460 }
11461 }
11462 else
11463 {
11464 int n_words = rs6000_arg_size (mode, type);
11465 int start_words = cum->words;
11466 int align_words = rs6000_parm_start (mode, type, start_words);
11467
11468 cum->words = align_words + n_words;
11469
11470 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11471 {
11472 /* _Decimal128 must be passed in an even/odd float register pair.
11473 This assumes that the register number is odd when fregno is
11474 odd. */
11475 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11476 cum->fregno++;
11477 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11478 }
11479
11480 if (TARGET_DEBUG_ARG)
11481 {
11482 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11483 cum->words, cum->fregno);
11484 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11485 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11486 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11487 named, align_words - start_words, depth);
11488 }
11489 }
11490 }
11491
11492 static void
11493 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11494 const_tree type, bool named)
11495 {
11496 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11497 0);
11498 }
11499
11500 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11501 structure between cum->intoffset and bitpos to integer registers. */
11502
11503 static void
11504 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11505 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11506 {
11507 machine_mode mode;
11508 unsigned int regno;
11509 unsigned int startbit, endbit;
11510 int this_regno, intregs, intoffset;
11511 rtx reg;
11512
11513 if (cum->intoffset == -1)
11514 return;
11515
11516 intoffset = cum->intoffset;
11517 cum->intoffset = -1;
11518
11519 /* If this is the trailing part of a word, try to only load that
11520 much into the register. Otherwise load the whole register. Note
11521 that in the latter case we may pick up unwanted bits. It's not a
11522 problem at the moment but may wish to revisit. */
11523
11524 if (intoffset % BITS_PER_WORD != 0)
11525 {
11526 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11527 if (!int_mode_for_size (bits, 0).exists (&mode))
11528 {
11529 /* We couldn't find an appropriate mode, which happens,
11530 e.g., in packed structs when there are 3 bytes to load.
11531 Back intoffset back to the beginning of the word in this
11532 case. */
11533 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11534 mode = word_mode;
11535 }
11536 }
11537 else
11538 mode = word_mode;
11539
11540 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11541 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11542 intregs = (endbit - startbit) / BITS_PER_WORD;
11543 this_regno = cum->words + intoffset / BITS_PER_WORD;
11544
11545 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11546 cum->use_stack = 1;
11547
11548 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11549 if (intregs <= 0)
11550 return;
11551
11552 intoffset /= BITS_PER_UNIT;
11553 do
11554 {
11555 regno = GP_ARG_MIN_REG + this_regno;
11556 reg = gen_rtx_REG (mode, regno);
11557 rvec[(*k)++] =
11558 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11559
11560 this_regno += 1;
11561 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11562 mode = word_mode;
11563 intregs -= 1;
11564 }
11565 while (intregs > 0);
11566 }
11567
11568 /* Recursive workhorse for the following. */
11569
11570 static void
11571 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11572 HOST_WIDE_INT startbitpos, rtx rvec[],
11573 int *k)
11574 {
11575 tree f;
11576
11577 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11578 if (TREE_CODE (f) == FIELD_DECL)
11579 {
11580 HOST_WIDE_INT bitpos = startbitpos;
11581 tree ftype = TREE_TYPE (f);
11582 machine_mode mode;
11583 if (ftype == error_mark_node)
11584 continue;
11585 mode = TYPE_MODE (ftype);
11586
11587 if (DECL_SIZE (f) != 0
11588 && tree_fits_uhwi_p (bit_position (f)))
11589 bitpos += int_bit_position (f);
11590
11591 /* ??? FIXME: else assume zero offset. */
11592
11593 if (TREE_CODE (ftype) == RECORD_TYPE)
11594 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11595 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11596 {
11597 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11598 #if 0
11599 switch (mode)
11600 {
11601 case E_SCmode: mode = SFmode; break;
11602 case E_DCmode: mode = DFmode; break;
11603 case E_TCmode: mode = TFmode; break;
11604 default: break;
11605 }
11606 #endif
11607 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11608 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11609 {
11610 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11611 && (mode == TFmode || mode == TDmode));
11612 /* Long double or _Decimal128 split over regs and memory. */
11613 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11614 cum->use_stack=1;
11615 }
11616 rvec[(*k)++]
11617 = gen_rtx_EXPR_LIST (VOIDmode,
11618 gen_rtx_REG (mode, cum->fregno++),
11619 GEN_INT (bitpos / BITS_PER_UNIT));
11620 if (FLOAT128_2REG_P (mode))
11621 cum->fregno++;
11622 }
11623 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11624 {
11625 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11626 rvec[(*k)++]
11627 = gen_rtx_EXPR_LIST (VOIDmode,
11628 gen_rtx_REG (mode, cum->vregno++),
11629 GEN_INT (bitpos / BITS_PER_UNIT));
11630 }
11631 else if (cum->intoffset == -1)
11632 cum->intoffset = bitpos;
11633 }
11634 }
11635
11636 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11637 the register(s) to be used for each field and subfield of a struct
11638 being passed by value, along with the offset of where the
11639 register's value may be found in the block. FP fields go in FP
11640 register, vector fields go in vector registers, and everything
11641 else goes in int registers, packed as in memory.
11642
11643 This code is also used for function return values. RETVAL indicates
11644 whether this is the case.
11645
11646 Much of this is taken from the SPARC V9 port, which has a similar
11647 calling convention. */
11648
11649 static rtx
11650 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11651 bool named, bool retval)
11652 {
11653 rtx rvec[FIRST_PSEUDO_REGISTER];
11654 int k = 1, kbase = 1;
11655 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11656 /* This is a copy; modifications are not visible to our caller. */
11657 CUMULATIVE_ARGS copy_cum = *orig_cum;
11658 CUMULATIVE_ARGS *cum = &copy_cum;
11659
11660 /* Pad to 16 byte boundary if needed. */
11661 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11662 && (cum->words % 2) != 0)
11663 cum->words++;
11664
11665 cum->intoffset = 0;
11666 cum->use_stack = 0;
11667 cum->named = named;
11668
11669 /* Put entries into rvec[] for individual FP and vector fields, and
11670 for the chunks of memory that go in int regs. Note we start at
11671 element 1; 0 is reserved for an indication of using memory, and
11672 may or may not be filled in below. */
11673 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11674 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11675
11676 /* If any part of the struct went on the stack put all of it there.
11677 This hack is because the generic code for
11678 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11679 parts of the struct are not at the beginning. */
11680 if (cum->use_stack)
11681 {
11682 if (retval)
11683 return NULL_RTX; /* doesn't go in registers at all */
11684 kbase = 0;
11685 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11686 }
11687 if (k > 1 || cum->use_stack)
11688 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11689 else
11690 return NULL_RTX;
11691 }
11692
11693 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11694
11695 static rtx
11696 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11697 int align_words)
11698 {
11699 int n_units;
11700 int i, k;
11701 rtx rvec[GP_ARG_NUM_REG + 1];
11702
11703 if (align_words >= GP_ARG_NUM_REG)
11704 return NULL_RTX;
11705
11706 n_units = rs6000_arg_size (mode, type);
11707
11708 /* Optimize the simple case where the arg fits in one gpr, except in
11709 the case of BLKmode due to assign_parms assuming that registers are
11710 BITS_PER_WORD wide. */
11711 if (n_units == 0
11712 || (n_units == 1 && mode != BLKmode))
11713 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11714
11715 k = 0;
11716 if (align_words + n_units > GP_ARG_NUM_REG)
11717 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11718 using a magic NULL_RTX component.
11719 This is not strictly correct. Only some of the arg belongs in
11720 memory, not all of it. However, the normal scheme using
11721 function_arg_partial_nregs can result in unusual subregs, eg.
11722 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11723 store the whole arg to memory is often more efficient than code
11724 to store pieces, and we know that space is available in the right
11725 place for the whole arg. */
11726 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11727
11728 i = 0;
11729 do
11730 {
11731 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11732 rtx off = GEN_INT (i++ * 4);
11733 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11734 }
11735 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11736
11737 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11738 }
11739
11740 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11741 but must also be copied into the parameter save area starting at
11742 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11743 to the GPRs and/or memory. Return the number of elements used. */
11744
11745 static int
11746 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11747 int align_words, rtx *rvec)
11748 {
11749 int k = 0;
11750
11751 if (align_words < GP_ARG_NUM_REG)
11752 {
11753 int n_words = rs6000_arg_size (mode, type);
11754
11755 if (align_words + n_words > GP_ARG_NUM_REG
11756 || mode == BLKmode
11757 || (TARGET_32BIT && TARGET_POWERPC64))
11758 {
11759 /* If this is partially on the stack, then we only
11760 include the portion actually in registers here. */
11761 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11762 int i = 0;
11763
11764 if (align_words + n_words > GP_ARG_NUM_REG)
11765 {
11766 /* Not all of the arg fits in gprs. Say that it goes in memory
11767 too, using a magic NULL_RTX component. Also see comment in
11768 rs6000_mixed_function_arg for why the normal
11769 function_arg_partial_nregs scheme doesn't work in this case. */
11770 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11771 }
11772
11773 do
11774 {
11775 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11776 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11777 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11778 }
11779 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11780 }
11781 else
11782 {
11783 /* The whole arg fits in gprs. */
11784 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11785 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11786 }
11787 }
11788 else
11789 {
11790 /* It's entirely in memory. */
11791 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11792 }
11793
11794 return k;
11795 }
11796
11797 /* RVEC is a vector of K components of an argument of mode MODE.
11798 Construct the final function_arg return value from it. */
11799
11800 static rtx
11801 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11802 {
11803 gcc_assert (k >= 1);
11804
11805 /* Avoid returning a PARALLEL in the trivial cases. */
11806 if (k == 1)
11807 {
11808 if (XEXP (rvec[0], 0) == NULL_RTX)
11809 return NULL_RTX;
11810
11811 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11812 return XEXP (rvec[0], 0);
11813 }
11814
11815 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11816 }
11817
11818 /* Determine where to put an argument to a function.
11819 Value is zero to push the argument on the stack,
11820 or a hard register in which to store the argument.
11821
11822 MODE is the argument's machine mode.
11823 TYPE is the data type of the argument (as a tree).
11824 This is null for libcalls where that information may
11825 not be available.
11826 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11827 the preceding args and about the function being called. It is
11828 not modified in this routine.
11829 NAMED is nonzero if this argument is a named parameter
11830 (otherwise it is an extra parameter matching an ellipsis).
11831
11832 On RS/6000 the first eight words of non-FP are normally in registers
11833 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11834 Under V.4, the first 8 FP args are in registers.
11835
11836 If this is floating-point and no prototype is specified, we use
11837 both an FP and integer register (or possibly FP reg and stack). Library
11838 functions (when CALL_LIBCALL is set) always have the proper types for args,
11839 so we can pass the FP value just in one register. emit_library_function
11840 doesn't support PARALLEL anyway.
11841
11842 Note that for args passed by reference, function_arg will be called
11843 with MODE and TYPE set to that of the pointer to the arg, not the arg
11844 itself. */
11845
11846 static rtx
11847 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11848 const_tree type, bool named)
11849 {
11850 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11851 enum rs6000_abi abi = DEFAULT_ABI;
11852 machine_mode elt_mode;
11853 int n_elts;
11854
11855 /* Return a marker to indicate whether CR1 needs to set or clear the
11856 bit that V.4 uses to say fp args were passed in registers.
11857 Assume that we don't need the marker for software floating point,
11858 or compiler generated library calls. */
11859 if (mode == VOIDmode)
11860 {
11861 if (abi == ABI_V4
11862 && (cum->call_cookie & CALL_LIBCALL) == 0
11863 && (cum->stdarg
11864 || (cum->nargs_prototype < 0
11865 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11866 && TARGET_HARD_FLOAT)
11867 return GEN_INT (cum->call_cookie
11868 | ((cum->fregno == FP_ARG_MIN_REG)
11869 ? CALL_V4_SET_FP_ARGS
11870 : CALL_V4_CLEAR_FP_ARGS));
11871
11872 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11873 }
11874
11875 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11876
11877 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11878 {
11879 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11880 if (rslt != NULL_RTX)
11881 return rslt;
11882 /* Else fall through to usual handling. */
11883 }
11884
11885 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11886 {
11887 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11888 rtx r, off;
11889 int i, k = 0;
11890
11891 /* Do we also need to pass this argument in the parameter save area?
11892 Library support functions for IEEE 128-bit are assumed to not need the
11893 value passed both in GPRs and in vector registers. */
11894 if (TARGET_64BIT && !cum->prototype
11895 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11896 {
11897 int align_words = ROUND_UP (cum->words, 2);
11898 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11899 }
11900
11901 /* Describe where this argument goes in the vector registers. */
11902 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11903 {
11904 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11905 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11906 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11907 }
11908
11909 return rs6000_finish_function_arg (mode, rvec, k);
11910 }
11911 else if (TARGET_ALTIVEC_ABI
11912 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11913 || (type && TREE_CODE (type) == VECTOR_TYPE
11914 && int_size_in_bytes (type) == 16)))
11915 {
11916 if (named || abi == ABI_V4)
11917 return NULL_RTX;
11918 else
11919 {
11920 /* Vector parameters to varargs functions under AIX or Darwin
11921 get passed in memory and possibly also in GPRs. */
11922 int align, align_words, n_words;
11923 machine_mode part_mode;
11924
11925 /* Vector parameters must be 16-byte aligned. In 32-bit
11926 mode this means we need to take into account the offset
11927 to the parameter save area. In 64-bit mode, they just
11928 have to start on an even word, since the parameter save
11929 area is 16-byte aligned. */
11930 if (TARGET_32BIT)
11931 align = -(rs6000_parm_offset () + cum->words) & 3;
11932 else
11933 align = cum->words & 1;
11934 align_words = cum->words + align;
11935
11936 /* Out of registers? Memory, then. */
11937 if (align_words >= GP_ARG_NUM_REG)
11938 return NULL_RTX;
11939
11940 if (TARGET_32BIT && TARGET_POWERPC64)
11941 return rs6000_mixed_function_arg (mode, type, align_words);
11942
11943 /* The vector value goes in GPRs. Only the part of the
11944 value in GPRs is reported here. */
11945 part_mode = mode;
11946 n_words = rs6000_arg_size (mode, type);
11947 if (align_words + n_words > GP_ARG_NUM_REG)
11948 /* Fortunately, there are only two possibilities, the value
11949 is either wholly in GPRs or half in GPRs and half not. */
11950 part_mode = DImode;
11951
11952 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11953 }
11954 }
11955
11956 else if (abi == ABI_V4)
11957 {
11958 if (abi_v4_pass_in_fpr (mode, named))
11959 {
11960 /* _Decimal128 must use an even/odd register pair. This assumes
11961 that the register number is odd when fregno is odd. */
11962 if (mode == TDmode && (cum->fregno % 2) == 1)
11963 cum->fregno++;
11964
11965 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11966 <= FP_ARG_V4_MAX_REG)
11967 return gen_rtx_REG (mode, cum->fregno);
11968 else
11969 return NULL_RTX;
11970 }
11971 else
11972 {
11973 int n_words = rs6000_arg_size (mode, type);
11974 int gregno = cum->sysv_gregno;
11975
11976 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11977 As does any other 2 word item such as complex int due to a
11978 historical mistake. */
11979 if (n_words == 2)
11980 gregno += (1 - gregno) & 1;
11981
11982 /* Multi-reg args are not split between registers and stack. */
11983 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11984 return NULL_RTX;
11985
11986 if (TARGET_32BIT && TARGET_POWERPC64)
11987 return rs6000_mixed_function_arg (mode, type,
11988 gregno - GP_ARG_MIN_REG);
11989 return gen_rtx_REG (mode, gregno);
11990 }
11991 }
11992 else
11993 {
11994 int align_words = rs6000_parm_start (mode, type, cum->words);
11995
11996 /* _Decimal128 must be passed in an even/odd float register pair.
11997 This assumes that the register number is odd when fregno is odd. */
11998 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11999 cum->fregno++;
12000
12001 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12002 && !(TARGET_AIX && !TARGET_ELF && AGGREGATE_TYPE_P (type)))
12003 {
12004 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12005 rtx r, off;
12006 int i, k = 0;
12007 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12008 int fpr_words;
12009
12010 /* Do we also need to pass this argument in the parameter
12011 save area? */
12012 if (type && (cum->nargs_prototype <= 0
12013 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12014 && TARGET_XL_COMPAT
12015 && align_words >= GP_ARG_NUM_REG)))
12016 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12017
12018 /* Describe where this argument goes in the fprs. */
12019 for (i = 0; i < n_elts
12020 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12021 {
12022 /* Check if the argument is split over registers and memory.
12023 This can only ever happen for long double or _Decimal128;
12024 complex types are handled via split_complex_arg. */
12025 machine_mode fmode = elt_mode;
12026 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12027 {
12028 gcc_assert (FLOAT128_2REG_P (fmode));
12029 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12030 }
12031
12032 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12033 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12034 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12035 }
12036
12037 /* If there were not enough FPRs to hold the argument, the rest
12038 usually goes into memory. However, if the current position
12039 is still within the register parameter area, a portion may
12040 actually have to go into GPRs.
12041
12042 Note that it may happen that the portion of the argument
12043 passed in the first "half" of the first GPR was already
12044 passed in the last FPR as well.
12045
12046 For unnamed arguments, we already set up GPRs to cover the
12047 whole argument in rs6000_psave_function_arg, so there is
12048 nothing further to do at this point. */
12049 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12050 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12051 && cum->nargs_prototype > 0)
12052 {
12053 static bool warned;
12054
12055 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12056 int n_words = rs6000_arg_size (mode, type);
12057
12058 align_words += fpr_words;
12059 n_words -= fpr_words;
12060
12061 do
12062 {
12063 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12064 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12065 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12066 }
12067 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12068
12069 if (!warned && warn_psabi)
12070 {
12071 warned = true;
12072 inform (input_location,
12073 "the ABI of passing homogeneous float aggregates"
12074 " has changed in GCC 5");
12075 }
12076 }
12077
12078 return rs6000_finish_function_arg (mode, rvec, k);
12079 }
12080 else if (align_words < GP_ARG_NUM_REG)
12081 {
12082 if (TARGET_32BIT && TARGET_POWERPC64)
12083 return rs6000_mixed_function_arg (mode, type, align_words);
12084
12085 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12086 }
12087 else
12088 return NULL_RTX;
12089 }
12090 }
12091 \f
12092 /* For an arg passed partly in registers and partly in memory, this is
12093 the number of bytes passed in registers. For args passed entirely in
12094 registers or entirely in memory, zero. When an arg is described by a
12095 PARALLEL, perhaps using more than one register type, this function
12096 returns the number of bytes used by the first element of the PARALLEL. */
12097
12098 static int
12099 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12100 tree type, bool named)
12101 {
12102 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12103 bool passed_in_gprs = true;
12104 int ret = 0;
12105 int align_words;
12106 machine_mode elt_mode;
12107 int n_elts;
12108
12109 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12110
12111 if (DEFAULT_ABI == ABI_V4)
12112 return 0;
12113
12114 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12115 {
12116 /* If we are passing this arg in the fixed parameter save area (gprs or
12117 memory) as well as VRs, we do not use the partial bytes mechanism;
12118 instead, rs6000_function_arg will return a PARALLEL including a memory
12119 element as necessary. Library support functions for IEEE 128-bit are
12120 assumed to not need the value passed both in GPRs and in vector
12121 registers. */
12122 if (TARGET_64BIT && !cum->prototype
12123 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12124 return 0;
12125
12126 /* Otherwise, we pass in VRs only. Check for partial copies. */
12127 passed_in_gprs = false;
12128 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12129 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12130 }
12131
12132 /* In this complicated case we just disable the partial_nregs code. */
12133 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12134 return 0;
12135
12136 align_words = rs6000_parm_start (mode, type, cum->words);
12137
12138 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12139 && !(TARGET_AIX && !TARGET_ELF && AGGREGATE_TYPE_P (type)))
12140 {
12141 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12142
12143 /* If we are passing this arg in the fixed parameter save area
12144 (gprs or memory) as well as FPRs, we do not use the partial
12145 bytes mechanism; instead, rs6000_function_arg will return a
12146 PARALLEL including a memory element as necessary. */
12147 if (type
12148 && (cum->nargs_prototype <= 0
12149 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12150 && TARGET_XL_COMPAT
12151 && align_words >= GP_ARG_NUM_REG)))
12152 return 0;
12153
12154 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12155 passed_in_gprs = false;
12156 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12157 {
12158 /* Compute number of bytes / words passed in FPRs. If there
12159 is still space available in the register parameter area
12160 *after* that amount, a part of the argument will be passed
12161 in GPRs. In that case, the total amount passed in any
12162 registers is equal to the amount that would have been passed
12163 in GPRs if everything were passed there, so we fall back to
12164 the GPR code below to compute the appropriate value. */
12165 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12166 * MIN (8, GET_MODE_SIZE (elt_mode)));
12167 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12168
12169 if (align_words + fpr_words < GP_ARG_NUM_REG)
12170 passed_in_gprs = true;
12171 else
12172 ret = fpr;
12173 }
12174 }
12175
12176 if (passed_in_gprs
12177 && align_words < GP_ARG_NUM_REG
12178 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12179 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12180
12181 if (ret != 0 && TARGET_DEBUG_ARG)
12182 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12183
12184 return ret;
12185 }
12186 \f
12187 /* A C expression that indicates when an argument must be passed by
12188 reference. If nonzero for an argument, a copy of that argument is
12189 made in memory and a pointer to the argument is passed instead of
12190 the argument itself. The pointer is passed in whatever way is
12191 appropriate for passing a pointer to that type.
12192
12193 Under V.4, aggregates and long double are passed by reference.
12194
12195 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12196 reference unless the AltiVec vector extension ABI is in force.
12197
12198 As an extension to all ABIs, variable sized types are passed by
12199 reference. */
12200
12201 static bool
12202 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12203 machine_mode mode, const_tree type,
12204 bool named ATTRIBUTE_UNUSED)
12205 {
12206 if (!type)
12207 return 0;
12208
12209 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12210 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12211 {
12212 if (TARGET_DEBUG_ARG)
12213 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12214 return 1;
12215 }
12216
12217 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12218 {
12219 if (TARGET_DEBUG_ARG)
12220 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12221 return 1;
12222 }
12223
12224 if (int_size_in_bytes (type) < 0)
12225 {
12226 if (TARGET_DEBUG_ARG)
12227 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12228 return 1;
12229 }
12230
12231 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12232 modes only exist for GCC vector types if -maltivec. */
12233 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12234 {
12235 if (TARGET_DEBUG_ARG)
12236 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12237 return 1;
12238 }
12239
12240 /* Pass synthetic vectors in memory. */
12241 if (TREE_CODE (type) == VECTOR_TYPE
12242 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12243 {
12244 static bool warned_for_pass_big_vectors = false;
12245 if (TARGET_DEBUG_ARG)
12246 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12247 if (!warned_for_pass_big_vectors)
12248 {
12249 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12250 "non-standard ABI extension with no compatibility "
12251 "guarantee");
12252 warned_for_pass_big_vectors = true;
12253 }
12254 return 1;
12255 }
12256
12257 return 0;
12258 }
12259
12260 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12261 already processes. Return true if the parameter must be passed
12262 (fully or partially) on the stack. */
12263
12264 static bool
12265 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12266 {
12267 machine_mode mode;
12268 int unsignedp;
12269 rtx entry_parm;
12270
12271 /* Catch errors. */
12272 if (type == NULL || type == error_mark_node)
12273 return true;
12274
12275 /* Handle types with no storage requirement. */
12276 if (TYPE_MODE (type) == VOIDmode)
12277 return false;
12278
12279 /* Handle complex types. */
12280 if (TREE_CODE (type) == COMPLEX_TYPE)
12281 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12282 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12283
12284 /* Handle transparent aggregates. */
12285 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12286 && TYPE_TRANSPARENT_AGGR (type))
12287 type = TREE_TYPE (first_field (type));
12288
12289 /* See if this arg was passed by invisible reference. */
12290 if (pass_by_reference (get_cumulative_args (args_so_far),
12291 TYPE_MODE (type), type, true))
12292 type = build_pointer_type (type);
12293
12294 /* Find mode as it is passed by the ABI. */
12295 unsignedp = TYPE_UNSIGNED (type);
12296 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12297
12298 /* If we must pass in stack, we need a stack. */
12299 if (rs6000_must_pass_in_stack (mode, type))
12300 return true;
12301
12302 /* If there is no incoming register, we need a stack. */
12303 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12304 if (entry_parm == NULL)
12305 return true;
12306
12307 /* Likewise if we need to pass both in registers and on the stack. */
12308 if (GET_CODE (entry_parm) == PARALLEL
12309 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12310 return true;
12311
12312 /* Also true if we're partially in registers and partially not. */
12313 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12314 return true;
12315
12316 /* Update info on where next arg arrives in registers. */
12317 rs6000_function_arg_advance (args_so_far, mode, type, true);
12318 return false;
12319 }
12320
12321 /* Return true if FUN has no prototype, has a variable argument
12322 list, or passes any parameter in memory. */
12323
12324 static bool
12325 rs6000_function_parms_need_stack (tree fun, bool incoming)
12326 {
12327 tree fntype, result;
12328 CUMULATIVE_ARGS args_so_far_v;
12329 cumulative_args_t args_so_far;
12330
12331 if (!fun)
12332 /* Must be a libcall, all of which only use reg parms. */
12333 return false;
12334
12335 fntype = fun;
12336 if (!TYPE_P (fun))
12337 fntype = TREE_TYPE (fun);
12338
12339 /* Varargs functions need the parameter save area. */
12340 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12341 return true;
12342
12343 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12344 args_so_far = pack_cumulative_args (&args_so_far_v);
12345
12346 /* When incoming, we will have been passed the function decl.
12347 It is necessary to use the decl to handle K&R style functions,
12348 where TYPE_ARG_TYPES may not be available. */
12349 if (incoming)
12350 {
12351 gcc_assert (DECL_P (fun));
12352 result = DECL_RESULT (fun);
12353 }
12354 else
12355 result = TREE_TYPE (fntype);
12356
12357 if (result && aggregate_value_p (result, fntype))
12358 {
12359 if (!TYPE_P (result))
12360 result = TREE_TYPE (result);
12361 result = build_pointer_type (result);
12362 rs6000_parm_needs_stack (args_so_far, result);
12363 }
12364
12365 if (incoming)
12366 {
12367 tree parm;
12368
12369 for (parm = DECL_ARGUMENTS (fun);
12370 parm && parm != void_list_node;
12371 parm = TREE_CHAIN (parm))
12372 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12373 return true;
12374 }
12375 else
12376 {
12377 function_args_iterator args_iter;
12378 tree arg_type;
12379
12380 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12381 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12382 return true;
12383 }
12384
12385 return false;
12386 }
12387
12388 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12389 usually a constant depending on the ABI. However, in the ELFv2 ABI
12390 the register parameter area is optional when calling a function that
12391 has a prototype is scope, has no variable argument list, and passes
12392 all parameters in registers. */
12393
12394 int
12395 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12396 {
12397 int reg_parm_stack_space;
12398
12399 switch (DEFAULT_ABI)
12400 {
12401 default:
12402 reg_parm_stack_space = 0;
12403 break;
12404
12405 case ABI_AIX:
12406 case ABI_DARWIN:
12407 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12408 break;
12409
12410 case ABI_ELFv2:
12411 /* ??? Recomputing this every time is a bit expensive. Is there
12412 a place to cache this information? */
12413 if (rs6000_function_parms_need_stack (fun, incoming))
12414 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12415 else
12416 reg_parm_stack_space = 0;
12417 break;
12418 }
12419
12420 return reg_parm_stack_space;
12421 }
12422
12423 static void
12424 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12425 {
12426 int i;
12427 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12428
12429 if (nregs == 0)
12430 return;
12431
12432 for (i = 0; i < nregs; i++)
12433 {
12434 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12435 if (reload_completed)
12436 {
12437 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12438 tem = NULL_RTX;
12439 else
12440 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12441 i * GET_MODE_SIZE (reg_mode));
12442 }
12443 else
12444 tem = replace_equiv_address (tem, XEXP (tem, 0));
12445
12446 gcc_assert (tem);
12447
12448 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12449 }
12450 }
12451 \f
12452 /* Perform any needed actions needed for a function that is receiving a
12453 variable number of arguments.
12454
12455 CUM is as above.
12456
12457 MODE and TYPE are the mode and type of the current parameter.
12458
12459 PRETEND_SIZE is a variable that should be set to the amount of stack
12460 that must be pushed by the prolog to pretend that our caller pushed
12461 it.
12462
12463 Normally, this macro will push all remaining incoming registers on the
12464 stack and set PRETEND_SIZE to the length of the registers pushed. */
12465
12466 static void
12467 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12468 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12469 int no_rtl)
12470 {
12471 CUMULATIVE_ARGS next_cum;
12472 int reg_size = TARGET_32BIT ? 4 : 8;
12473 rtx save_area = NULL_RTX, mem;
12474 int first_reg_offset;
12475 alias_set_type set;
12476
12477 /* Skip the last named argument. */
12478 next_cum = *get_cumulative_args (cum);
12479 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12480
12481 if (DEFAULT_ABI == ABI_V4)
12482 {
12483 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12484
12485 if (! no_rtl)
12486 {
12487 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12488 HOST_WIDE_INT offset = 0;
12489
12490 /* Try to optimize the size of the varargs save area.
12491 The ABI requires that ap.reg_save_area is doubleword
12492 aligned, but we don't need to allocate space for all
12493 the bytes, only those to which we actually will save
12494 anything. */
12495 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12496 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12497 if (TARGET_HARD_FLOAT
12498 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12499 && cfun->va_list_fpr_size)
12500 {
12501 if (gpr_reg_num)
12502 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12503 * UNITS_PER_FP_WORD;
12504 if (cfun->va_list_fpr_size
12505 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12506 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12507 else
12508 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12509 * UNITS_PER_FP_WORD;
12510 }
12511 if (gpr_reg_num)
12512 {
12513 offset = -((first_reg_offset * reg_size) & ~7);
12514 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12515 {
12516 gpr_reg_num = cfun->va_list_gpr_size;
12517 if (reg_size == 4 && (first_reg_offset & 1))
12518 gpr_reg_num++;
12519 }
12520 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12521 }
12522 else if (fpr_size)
12523 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12524 * UNITS_PER_FP_WORD
12525 - (int) (GP_ARG_NUM_REG * reg_size);
12526
12527 if (gpr_size + fpr_size)
12528 {
12529 rtx reg_save_area
12530 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12531 gcc_assert (GET_CODE (reg_save_area) == MEM);
12532 reg_save_area = XEXP (reg_save_area, 0);
12533 if (GET_CODE (reg_save_area) == PLUS)
12534 {
12535 gcc_assert (XEXP (reg_save_area, 0)
12536 == virtual_stack_vars_rtx);
12537 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12538 offset += INTVAL (XEXP (reg_save_area, 1));
12539 }
12540 else
12541 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12542 }
12543
12544 cfun->machine->varargs_save_offset = offset;
12545 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12546 }
12547 }
12548 else
12549 {
12550 first_reg_offset = next_cum.words;
12551 save_area = crtl->args.internal_arg_pointer;
12552
12553 if (targetm.calls.must_pass_in_stack (mode, type))
12554 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12555 }
12556
12557 set = get_varargs_alias_set ();
12558 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12559 && cfun->va_list_gpr_size)
12560 {
12561 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12562
12563 if (va_list_gpr_counter_field)
12564 /* V4 va_list_gpr_size counts number of registers needed. */
12565 n_gpr = cfun->va_list_gpr_size;
12566 else
12567 /* char * va_list instead counts number of bytes needed. */
12568 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12569
12570 if (nregs > n_gpr)
12571 nregs = n_gpr;
12572
12573 mem = gen_rtx_MEM (BLKmode,
12574 plus_constant (Pmode, save_area,
12575 first_reg_offset * reg_size));
12576 MEM_NOTRAP_P (mem) = 1;
12577 set_mem_alias_set (mem, set);
12578 set_mem_align (mem, BITS_PER_WORD);
12579
12580 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12581 nregs);
12582 }
12583
12584 /* Save FP registers if needed. */
12585 if (DEFAULT_ABI == ABI_V4
12586 && TARGET_HARD_FLOAT
12587 && ! no_rtl
12588 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12589 && cfun->va_list_fpr_size)
12590 {
12591 int fregno = next_cum.fregno, nregs;
12592 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12593 rtx lab = gen_label_rtx ();
12594 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12595 * UNITS_PER_FP_WORD);
12596
12597 emit_jump_insn
12598 (gen_rtx_SET (pc_rtx,
12599 gen_rtx_IF_THEN_ELSE (VOIDmode,
12600 gen_rtx_NE (VOIDmode, cr1,
12601 const0_rtx),
12602 gen_rtx_LABEL_REF (VOIDmode, lab),
12603 pc_rtx)));
12604
12605 for (nregs = 0;
12606 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12607 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12608 {
12609 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12610 plus_constant (Pmode, save_area, off));
12611 MEM_NOTRAP_P (mem) = 1;
12612 set_mem_alias_set (mem, set);
12613 set_mem_align (mem, GET_MODE_ALIGNMENT (
12614 TARGET_HARD_FLOAT ? DFmode : SFmode));
12615 emit_move_insn (mem, gen_rtx_REG (
12616 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12617 }
12618
12619 emit_label (lab);
12620 }
12621 }
12622
12623 /* Create the va_list data type. */
12624
12625 static tree
12626 rs6000_build_builtin_va_list (void)
12627 {
12628 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12629
12630 /* For AIX, prefer 'char *' because that's what the system
12631 header files like. */
12632 if (DEFAULT_ABI != ABI_V4)
12633 return build_pointer_type (char_type_node);
12634
12635 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12636 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12637 get_identifier ("__va_list_tag"), record);
12638
12639 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12640 unsigned_char_type_node);
12641 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12642 unsigned_char_type_node);
12643 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12644 every user file. */
12645 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12646 get_identifier ("reserved"), short_unsigned_type_node);
12647 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12648 get_identifier ("overflow_arg_area"),
12649 ptr_type_node);
12650 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12651 get_identifier ("reg_save_area"),
12652 ptr_type_node);
12653
12654 va_list_gpr_counter_field = f_gpr;
12655 va_list_fpr_counter_field = f_fpr;
12656
12657 DECL_FIELD_CONTEXT (f_gpr) = record;
12658 DECL_FIELD_CONTEXT (f_fpr) = record;
12659 DECL_FIELD_CONTEXT (f_res) = record;
12660 DECL_FIELD_CONTEXT (f_ovf) = record;
12661 DECL_FIELD_CONTEXT (f_sav) = record;
12662
12663 TYPE_STUB_DECL (record) = type_decl;
12664 TYPE_NAME (record) = type_decl;
12665 TYPE_FIELDS (record) = f_gpr;
12666 DECL_CHAIN (f_gpr) = f_fpr;
12667 DECL_CHAIN (f_fpr) = f_res;
12668 DECL_CHAIN (f_res) = f_ovf;
12669 DECL_CHAIN (f_ovf) = f_sav;
12670
12671 layout_type (record);
12672
12673 /* The correct type is an array type of one element. */
12674 return build_array_type (record, build_index_type (size_zero_node));
12675 }
12676
12677 /* Implement va_start. */
12678
12679 static void
12680 rs6000_va_start (tree valist, rtx nextarg)
12681 {
12682 HOST_WIDE_INT words, n_gpr, n_fpr;
12683 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12684 tree gpr, fpr, ovf, sav, t;
12685
12686 /* Only SVR4 needs something special. */
12687 if (DEFAULT_ABI != ABI_V4)
12688 {
12689 std_expand_builtin_va_start (valist, nextarg);
12690 return;
12691 }
12692
12693 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12694 f_fpr = DECL_CHAIN (f_gpr);
12695 f_res = DECL_CHAIN (f_fpr);
12696 f_ovf = DECL_CHAIN (f_res);
12697 f_sav = DECL_CHAIN (f_ovf);
12698
12699 valist = build_simple_mem_ref (valist);
12700 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12701 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12702 f_fpr, NULL_TREE);
12703 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12704 f_ovf, NULL_TREE);
12705 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12706 f_sav, NULL_TREE);
12707
12708 /* Count number of gp and fp argument registers used. */
12709 words = crtl->args.info.words;
12710 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12711 GP_ARG_NUM_REG);
12712 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12713 FP_ARG_NUM_REG);
12714
12715 if (TARGET_DEBUG_ARG)
12716 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12717 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12718 words, n_gpr, n_fpr);
12719
12720 if (cfun->va_list_gpr_size)
12721 {
12722 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12723 build_int_cst (NULL_TREE, n_gpr));
12724 TREE_SIDE_EFFECTS (t) = 1;
12725 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12726 }
12727
12728 if (cfun->va_list_fpr_size)
12729 {
12730 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12731 build_int_cst (NULL_TREE, n_fpr));
12732 TREE_SIDE_EFFECTS (t) = 1;
12733 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12734
12735 #ifdef HAVE_AS_GNU_ATTRIBUTE
12736 if (call_ABI_of_interest (cfun->decl))
12737 rs6000_passes_float = true;
12738 #endif
12739 }
12740
12741 /* Find the overflow area. */
12742 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12743 if (words != 0)
12744 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12745 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12746 TREE_SIDE_EFFECTS (t) = 1;
12747 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12748
12749 /* If there were no va_arg invocations, don't set up the register
12750 save area. */
12751 if (!cfun->va_list_gpr_size
12752 && !cfun->va_list_fpr_size
12753 && n_gpr < GP_ARG_NUM_REG
12754 && n_fpr < FP_ARG_V4_MAX_REG)
12755 return;
12756
12757 /* Find the register save area. */
12758 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12759 if (cfun->machine->varargs_save_offset)
12760 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12761 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12762 TREE_SIDE_EFFECTS (t) = 1;
12763 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12764 }
12765
12766 /* Implement va_arg. */
12767
12768 static tree
12769 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12770 gimple_seq *post_p)
12771 {
12772 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12773 tree gpr, fpr, ovf, sav, reg, t, u;
12774 int size, rsize, n_reg, sav_ofs, sav_scale;
12775 tree lab_false, lab_over, addr;
12776 int align;
12777 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12778 int regalign = 0;
12779 gimple *stmt;
12780
12781 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12782 {
12783 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12784 return build_va_arg_indirect_ref (t);
12785 }
12786
12787 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12788 earlier version of gcc, with the property that it always applied alignment
12789 adjustments to the va-args (even for zero-sized types). The cheapest way
12790 to deal with this is to replicate the effect of the part of
12791 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12792 of relevance.
12793 We don't need to check for pass-by-reference because of the test above.
12794 We can return a simplifed answer, since we know there's no offset to add. */
12795
12796 if (((TARGET_MACHO
12797 && rs6000_darwin64_abi)
12798 || DEFAULT_ABI == ABI_ELFv2
12799 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12800 && integer_zerop (TYPE_SIZE (type)))
12801 {
12802 unsigned HOST_WIDE_INT align, boundary;
12803 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12804 align = PARM_BOUNDARY / BITS_PER_UNIT;
12805 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12806 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12807 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12808 boundary /= BITS_PER_UNIT;
12809 if (boundary > align)
12810 {
12811 tree t ;
12812 /* This updates arg ptr by the amount that would be necessary
12813 to align the zero-sized (but not zero-alignment) item. */
12814 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12815 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12816 gimplify_and_add (t, pre_p);
12817
12818 t = fold_convert (sizetype, valist_tmp);
12819 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12820 fold_convert (TREE_TYPE (valist),
12821 fold_build2 (BIT_AND_EXPR, sizetype, t,
12822 size_int (-boundary))));
12823 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12824 gimplify_and_add (t, pre_p);
12825 }
12826 /* Since it is zero-sized there's no increment for the item itself. */
12827 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12828 return build_va_arg_indirect_ref (valist_tmp);
12829 }
12830
12831 if (DEFAULT_ABI != ABI_V4)
12832 {
12833 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12834 {
12835 tree elem_type = TREE_TYPE (type);
12836 machine_mode elem_mode = TYPE_MODE (elem_type);
12837 int elem_size = GET_MODE_SIZE (elem_mode);
12838
12839 if (elem_size < UNITS_PER_WORD)
12840 {
12841 tree real_part, imag_part;
12842 gimple_seq post = NULL;
12843
12844 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12845 &post);
12846 /* Copy the value into a temporary, lest the formal temporary
12847 be reused out from under us. */
12848 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12849 gimple_seq_add_seq (pre_p, post);
12850
12851 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12852 post_p);
12853
12854 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12855 }
12856 }
12857
12858 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12859 }
12860
12861 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12862 f_fpr = DECL_CHAIN (f_gpr);
12863 f_res = DECL_CHAIN (f_fpr);
12864 f_ovf = DECL_CHAIN (f_res);
12865 f_sav = DECL_CHAIN (f_ovf);
12866
12867 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12868 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12869 f_fpr, NULL_TREE);
12870 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12871 f_ovf, NULL_TREE);
12872 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12873 f_sav, NULL_TREE);
12874
12875 size = int_size_in_bytes (type);
12876 rsize = (size + 3) / 4;
12877 int pad = 4 * rsize - size;
12878 align = 1;
12879
12880 machine_mode mode = TYPE_MODE (type);
12881 if (abi_v4_pass_in_fpr (mode, false))
12882 {
12883 /* FP args go in FP registers, if present. */
12884 reg = fpr;
12885 n_reg = (size + 7) / 8;
12886 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12887 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12888 if (mode != SFmode && mode != SDmode)
12889 align = 8;
12890 }
12891 else
12892 {
12893 /* Otherwise into GP registers. */
12894 reg = gpr;
12895 n_reg = rsize;
12896 sav_ofs = 0;
12897 sav_scale = 4;
12898 if (n_reg == 2)
12899 align = 8;
12900 }
12901
12902 /* Pull the value out of the saved registers.... */
12903
12904 lab_over = NULL;
12905 addr = create_tmp_var (ptr_type_node, "addr");
12906
12907 /* AltiVec vectors never go in registers when -mabi=altivec. */
12908 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12909 align = 16;
12910 else
12911 {
12912 lab_false = create_artificial_label (input_location);
12913 lab_over = create_artificial_label (input_location);
12914
12915 /* Long long is aligned in the registers. As are any other 2 gpr
12916 item such as complex int due to a historical mistake. */
12917 u = reg;
12918 if (n_reg == 2 && reg == gpr)
12919 {
12920 regalign = 1;
12921 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12922 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12923 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12924 unshare_expr (reg), u);
12925 }
12926 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12927 reg number is 0 for f1, so we want to make it odd. */
12928 else if (reg == fpr && mode == TDmode)
12929 {
12930 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12931 build_int_cst (TREE_TYPE (reg), 1));
12932 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12933 }
12934
12935 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12936 t = build2 (GE_EXPR, boolean_type_node, u, t);
12937 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12938 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12939 gimplify_and_add (t, pre_p);
12940
12941 t = sav;
12942 if (sav_ofs)
12943 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12944
12945 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12946 build_int_cst (TREE_TYPE (reg), n_reg));
12947 u = fold_convert (sizetype, u);
12948 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12949 t = fold_build_pointer_plus (t, u);
12950
12951 /* _Decimal32 varargs are located in the second word of the 64-bit
12952 FP register for 32-bit binaries. */
12953 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12954 t = fold_build_pointer_plus_hwi (t, size);
12955
12956 /* Args are passed right-aligned. */
12957 if (BYTES_BIG_ENDIAN)
12958 t = fold_build_pointer_plus_hwi (t, pad);
12959
12960 gimplify_assign (addr, t, pre_p);
12961
12962 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12963
12964 stmt = gimple_build_label (lab_false);
12965 gimple_seq_add_stmt (pre_p, stmt);
12966
12967 if ((n_reg == 2 && !regalign) || n_reg > 2)
12968 {
12969 /* Ensure that we don't find any more args in regs.
12970 Alignment has taken care of for special cases. */
12971 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12972 }
12973 }
12974
12975 /* ... otherwise out of the overflow area. */
12976
12977 /* Care for on-stack alignment if needed. */
12978 t = ovf;
12979 if (align != 1)
12980 {
12981 t = fold_build_pointer_plus_hwi (t, align - 1);
12982 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12983 build_int_cst (TREE_TYPE (t), -align));
12984 }
12985
12986 /* Args are passed right-aligned. */
12987 if (BYTES_BIG_ENDIAN)
12988 t = fold_build_pointer_plus_hwi (t, pad);
12989
12990 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12991
12992 gimplify_assign (unshare_expr (addr), t, pre_p);
12993
12994 t = fold_build_pointer_plus_hwi (t, size);
12995 gimplify_assign (unshare_expr (ovf), t, pre_p);
12996
12997 if (lab_over)
12998 {
12999 stmt = gimple_build_label (lab_over);
13000 gimple_seq_add_stmt (pre_p, stmt);
13001 }
13002
13003 if (STRICT_ALIGNMENT
13004 && (TYPE_ALIGN (type)
13005 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13006 {
13007 /* The value (of type complex double, for example) may not be
13008 aligned in memory in the saved registers, so copy via a
13009 temporary. (This is the same code as used for SPARC.) */
13010 tree tmp = create_tmp_var (type, "va_arg_tmp");
13011 tree dest_addr = build_fold_addr_expr (tmp);
13012
13013 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13014 3, dest_addr, addr, size_int (rsize * 4));
13015 TREE_ADDRESSABLE (tmp) = 1;
13016
13017 gimplify_and_add (copy, pre_p);
13018 addr = dest_addr;
13019 }
13020
13021 addr = fold_convert (ptrtype, addr);
13022 return build_va_arg_indirect_ref (addr);
13023 }
13024
13025 /* Builtins. */
13026
13027 static void
13028 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13029 {
13030 tree t;
13031 unsigned classify = rs6000_builtin_info[(int)code].attr;
13032 const char *attr_string = "";
13033
13034 gcc_assert (name != NULL);
13035 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13036
13037 if (rs6000_builtin_decls[(int)code])
13038 fatal_error (input_location,
13039 "internal error: builtin function %qs already processed",
13040 name);
13041
13042 rs6000_builtin_decls[(int)code] = t =
13043 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13044
13045 /* Set any special attributes. */
13046 if ((classify & RS6000_BTC_CONST) != 0)
13047 {
13048 /* const function, function only depends on the inputs. */
13049 TREE_READONLY (t) = 1;
13050 TREE_NOTHROW (t) = 1;
13051 attr_string = ", const";
13052 }
13053 else if ((classify & RS6000_BTC_PURE) != 0)
13054 {
13055 /* pure function, function can read global memory, but does not set any
13056 external state. */
13057 DECL_PURE_P (t) = 1;
13058 TREE_NOTHROW (t) = 1;
13059 attr_string = ", pure";
13060 }
13061 else if ((classify & RS6000_BTC_FP) != 0)
13062 {
13063 /* Function is a math function. If rounding mode is on, then treat the
13064 function as not reading global memory, but it can have arbitrary side
13065 effects. If it is off, then assume the function is a const function.
13066 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13067 builtin-attribute.def that is used for the math functions. */
13068 TREE_NOTHROW (t) = 1;
13069 if (flag_rounding_math)
13070 {
13071 DECL_PURE_P (t) = 1;
13072 DECL_IS_NOVOPS (t) = 1;
13073 attr_string = ", fp, pure";
13074 }
13075 else
13076 {
13077 TREE_READONLY (t) = 1;
13078 attr_string = ", fp, const";
13079 }
13080 }
13081 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13082 gcc_unreachable ();
13083
13084 if (TARGET_DEBUG_BUILTIN)
13085 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13086 (int)code, name, attr_string);
13087 }
13088
13089 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13090
13091 #undef RS6000_BUILTIN_0
13092 #undef RS6000_BUILTIN_1
13093 #undef RS6000_BUILTIN_2
13094 #undef RS6000_BUILTIN_3
13095 #undef RS6000_BUILTIN_A
13096 #undef RS6000_BUILTIN_D
13097 #undef RS6000_BUILTIN_H
13098 #undef RS6000_BUILTIN_P
13099 #undef RS6000_BUILTIN_X
13100
13101 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13102 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13103 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13104 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13105 { MASK, ICODE, NAME, ENUM },
13106
13107 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13108 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13109 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13110 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13111 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13112
13113 static const struct builtin_description bdesc_3arg[] =
13114 {
13115 #include "rs6000-builtin.def"
13116 };
13117
13118 /* DST operations: void foo (void *, const int, const char). */
13119
13120 #undef RS6000_BUILTIN_0
13121 #undef RS6000_BUILTIN_1
13122 #undef RS6000_BUILTIN_2
13123 #undef RS6000_BUILTIN_3
13124 #undef RS6000_BUILTIN_A
13125 #undef RS6000_BUILTIN_D
13126 #undef RS6000_BUILTIN_H
13127 #undef RS6000_BUILTIN_P
13128 #undef RS6000_BUILTIN_X
13129
13130 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13131 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13132 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13133 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13134 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13135 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13136 { MASK, ICODE, NAME, ENUM },
13137
13138 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13139 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13140 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13141
13142 static const struct builtin_description bdesc_dst[] =
13143 {
13144 #include "rs6000-builtin.def"
13145 };
13146
13147 /* Simple binary operations: VECc = foo (VECa, VECb). */
13148
13149 #undef RS6000_BUILTIN_0
13150 #undef RS6000_BUILTIN_1
13151 #undef RS6000_BUILTIN_2
13152 #undef RS6000_BUILTIN_3
13153 #undef RS6000_BUILTIN_A
13154 #undef RS6000_BUILTIN_D
13155 #undef RS6000_BUILTIN_H
13156 #undef RS6000_BUILTIN_P
13157 #undef RS6000_BUILTIN_X
13158
13159 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13160 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13161 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13162 { MASK, ICODE, NAME, ENUM },
13163
13164 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13165 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13166 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13167 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13168 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13169 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13170
13171 static const struct builtin_description bdesc_2arg[] =
13172 {
13173 #include "rs6000-builtin.def"
13174 };
13175
13176 #undef RS6000_BUILTIN_0
13177 #undef RS6000_BUILTIN_1
13178 #undef RS6000_BUILTIN_2
13179 #undef RS6000_BUILTIN_3
13180 #undef RS6000_BUILTIN_A
13181 #undef RS6000_BUILTIN_D
13182 #undef RS6000_BUILTIN_H
13183 #undef RS6000_BUILTIN_P
13184 #undef RS6000_BUILTIN_X
13185
13186 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13187 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13188 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13189 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13190 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13191 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13192 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13193 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13194 { MASK, ICODE, NAME, ENUM },
13195
13196 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13197
13198 /* AltiVec predicates. */
13199
13200 static const struct builtin_description bdesc_altivec_preds[] =
13201 {
13202 #include "rs6000-builtin.def"
13203 };
13204
13205 /* ABS* operations. */
13206
13207 #undef RS6000_BUILTIN_0
13208 #undef RS6000_BUILTIN_1
13209 #undef RS6000_BUILTIN_2
13210 #undef RS6000_BUILTIN_3
13211 #undef RS6000_BUILTIN_A
13212 #undef RS6000_BUILTIN_D
13213 #undef RS6000_BUILTIN_H
13214 #undef RS6000_BUILTIN_P
13215 #undef RS6000_BUILTIN_X
13216
13217 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13218 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13219 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13220 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13221 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13222 { MASK, ICODE, NAME, ENUM },
13223
13224 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13225 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13226 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13227 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13228
13229 static const struct builtin_description bdesc_abs[] =
13230 {
13231 #include "rs6000-builtin.def"
13232 };
13233
13234 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13235 foo (VECa). */
13236
13237 #undef RS6000_BUILTIN_0
13238 #undef RS6000_BUILTIN_1
13239 #undef RS6000_BUILTIN_2
13240 #undef RS6000_BUILTIN_3
13241 #undef RS6000_BUILTIN_A
13242 #undef RS6000_BUILTIN_D
13243 #undef RS6000_BUILTIN_H
13244 #undef RS6000_BUILTIN_P
13245 #undef RS6000_BUILTIN_X
13246
13247 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13248 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13249 { MASK, ICODE, NAME, ENUM },
13250
13251 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13252 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13253 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13254 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13255 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13256 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13257 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13258
13259 static const struct builtin_description bdesc_1arg[] =
13260 {
13261 #include "rs6000-builtin.def"
13262 };
13263
13264 /* Simple no-argument operations: result = __builtin_darn_32 () */
13265
13266 #undef RS6000_BUILTIN_0
13267 #undef RS6000_BUILTIN_1
13268 #undef RS6000_BUILTIN_2
13269 #undef RS6000_BUILTIN_3
13270 #undef RS6000_BUILTIN_A
13271 #undef RS6000_BUILTIN_D
13272 #undef RS6000_BUILTIN_H
13273 #undef RS6000_BUILTIN_P
13274 #undef RS6000_BUILTIN_X
13275
13276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13277 { MASK, ICODE, NAME, ENUM },
13278
13279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13280 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13281 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13282 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13283 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13284 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13285 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13286 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13287
13288 static const struct builtin_description bdesc_0arg[] =
13289 {
13290 #include "rs6000-builtin.def"
13291 };
13292
13293 /* HTM builtins. */
13294 #undef RS6000_BUILTIN_0
13295 #undef RS6000_BUILTIN_1
13296 #undef RS6000_BUILTIN_2
13297 #undef RS6000_BUILTIN_3
13298 #undef RS6000_BUILTIN_A
13299 #undef RS6000_BUILTIN_D
13300 #undef RS6000_BUILTIN_H
13301 #undef RS6000_BUILTIN_P
13302 #undef RS6000_BUILTIN_X
13303
13304 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13305 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13306 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13307 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13308 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13309 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13310 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13311 { MASK, ICODE, NAME, ENUM },
13312
13313 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13314 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13315
13316 static const struct builtin_description bdesc_htm[] =
13317 {
13318 #include "rs6000-builtin.def"
13319 };
13320
13321 #undef RS6000_BUILTIN_0
13322 #undef RS6000_BUILTIN_1
13323 #undef RS6000_BUILTIN_2
13324 #undef RS6000_BUILTIN_3
13325 #undef RS6000_BUILTIN_A
13326 #undef RS6000_BUILTIN_D
13327 #undef RS6000_BUILTIN_H
13328 #undef RS6000_BUILTIN_P
13329
13330 /* Return true if a builtin function is overloaded. */
13331 bool
13332 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13333 {
13334 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13335 }
13336
13337 const char *
13338 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13339 {
13340 return rs6000_builtin_info[(int)fncode].name;
13341 }
13342
13343 /* Expand an expression EXP that calls a builtin without arguments. */
13344 static rtx
13345 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13346 {
13347 rtx pat;
13348 machine_mode tmode = insn_data[icode].operand[0].mode;
13349
13350 if (icode == CODE_FOR_nothing)
13351 /* Builtin not supported on this processor. */
13352 return 0;
13353
13354 if (icode == CODE_FOR_rs6000_mffsl
13355 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13356 {
13357 error ("__builtin_mffsl() not supported with -msoft-float");
13358 return const0_rtx;
13359 }
13360
13361 if (target == 0
13362 || GET_MODE (target) != tmode
13363 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13364 target = gen_reg_rtx (tmode);
13365
13366 pat = GEN_FCN (icode) (target);
13367 if (! pat)
13368 return 0;
13369 emit_insn (pat);
13370
13371 return target;
13372 }
13373
13374
13375 static rtx
13376 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13377 {
13378 rtx pat;
13379 tree arg0 = CALL_EXPR_ARG (exp, 0);
13380 tree arg1 = CALL_EXPR_ARG (exp, 1);
13381 rtx op0 = expand_normal (arg0);
13382 rtx op1 = expand_normal (arg1);
13383 machine_mode mode0 = insn_data[icode].operand[0].mode;
13384 machine_mode mode1 = insn_data[icode].operand[1].mode;
13385
13386 if (icode == CODE_FOR_nothing)
13387 /* Builtin not supported on this processor. */
13388 return 0;
13389
13390 /* If we got invalid arguments bail out before generating bad rtl. */
13391 if (arg0 == error_mark_node || arg1 == error_mark_node)
13392 return const0_rtx;
13393
13394 if (GET_CODE (op0) != CONST_INT
13395 || INTVAL (op0) > 255
13396 || INTVAL (op0) < 0)
13397 {
13398 error ("argument 1 must be an 8-bit field value");
13399 return const0_rtx;
13400 }
13401
13402 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13403 op0 = copy_to_mode_reg (mode0, op0);
13404
13405 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13406 op1 = copy_to_mode_reg (mode1, op1);
13407
13408 pat = GEN_FCN (icode) (op0, op1);
13409 if (!pat)
13410 return const0_rtx;
13411 emit_insn (pat);
13412
13413 return NULL_RTX;
13414 }
13415
13416 static rtx
13417 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13418 {
13419 rtx pat;
13420 tree arg0 = CALL_EXPR_ARG (exp, 0);
13421 rtx op0 = expand_normal (arg0);
13422
13423 if (icode == CODE_FOR_nothing)
13424 /* Builtin not supported on this processor. */
13425 return 0;
13426
13427 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13428 {
13429 error ("__builtin_mtfsb0 and __builtin_mtfsb1 not supported with -msoft-float");
13430 return const0_rtx;
13431 }
13432
13433 /* If we got invalid arguments bail out before generating bad rtl. */
13434 if (arg0 == error_mark_node)
13435 return const0_rtx;
13436
13437 /* Only allow bit numbers 0 to 31. */
13438 if (!u5bit_cint_operand (op0, VOIDmode))
13439 {
13440 error ("Argument must be a constant between 0 and 31.");
13441 return const0_rtx;
13442 }
13443
13444 pat = GEN_FCN (icode) (op0);
13445 if (!pat)
13446 return const0_rtx;
13447 emit_insn (pat);
13448
13449 return NULL_RTX;
13450 }
13451
13452 static rtx
13453 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13454 {
13455 rtx pat;
13456 tree arg0 = CALL_EXPR_ARG (exp, 0);
13457 rtx op0 = expand_normal (arg0);
13458 machine_mode mode0 = insn_data[icode].operand[0].mode;
13459
13460 if (icode == CODE_FOR_nothing)
13461 /* Builtin not supported on this processor. */
13462 return 0;
13463
13464 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13465 {
13466 error ("__builtin_set_fpscr_rn not supported with -msoft-float");
13467 return const0_rtx;
13468 }
13469
13470 /* If we got invalid arguments bail out before generating bad rtl. */
13471 if (arg0 == error_mark_node)
13472 return const0_rtx;
13473
13474 /* If the argument is a constant, check the range. Argument can only be a
13475 2-bit value. Unfortunately, can't check the range of the value at
13476 compile time if the argument is a variable. The least significant two
13477 bits of the argument, regardless of type, are used to set the rounding
13478 mode. All other bits are ignored. */
13479 if (GET_CODE (op0) == CONST_INT && !const_0_to_3_operand(op0, VOIDmode))
13480 {
13481 error ("Argument must be a value between 0 and 3.");
13482 return const0_rtx;
13483 }
13484
13485 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13486 op0 = copy_to_mode_reg (mode0, op0);
13487
13488 pat = GEN_FCN (icode) (op0);
13489 if (!pat)
13490 return const0_rtx;
13491 emit_insn (pat);
13492
13493 return NULL_RTX;
13494 }
13495 static rtx
13496 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13497 {
13498 rtx pat;
13499 tree arg0 = CALL_EXPR_ARG (exp, 0);
13500 rtx op0 = expand_normal (arg0);
13501 machine_mode mode0 = insn_data[icode].operand[0].mode;
13502
13503 if (TARGET_32BIT)
13504 /* Builtin not supported in 32-bit mode. */
13505 fatal_error (input_location,
13506 "__builtin_set_fpscr_drn is not supported in 32-bit mode.");
13507
13508 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13509 {
13510 error ("__builtin_set_fpscr_drn not supported with -msoft-float");
13511 return const0_rtx;
13512 }
13513
13514 if (icode == CODE_FOR_nothing)
13515 /* Builtin not supported on this processor. */
13516 return 0;
13517
13518 /* If we got invalid arguments bail out before generating bad rtl. */
13519 if (arg0 == error_mark_node)
13520 return const0_rtx;
13521
13522 /* If the argument is a constant, check the range. Agrument can only be a
13523 3-bit value. Unfortunately, can't check the range of the value at
13524 compile time if the argument is a variable. The least significant two
13525 bits of the argument, regardless of type, are used to set the rounding
13526 mode. All other bits are ignored. */
13527 if (GET_CODE (op0) == CONST_INT && !const_0_to_7_operand(op0, VOIDmode))
13528 {
13529 error ("Argument must be a value between 0 and 7.");
13530 return const0_rtx;
13531 }
13532
13533 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13534 op0 = copy_to_mode_reg (mode0, op0);
13535
13536 pat = GEN_FCN (icode) (op0);
13537 if (! pat)
13538 return const0_rtx;
13539 emit_insn (pat);
13540
13541 return NULL_RTX;
13542 }
13543
13544 static rtx
13545 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13546 {
13547 rtx pat;
13548 tree arg0 = CALL_EXPR_ARG (exp, 0);
13549 rtx op0 = expand_normal (arg0);
13550 machine_mode tmode = insn_data[icode].operand[0].mode;
13551 machine_mode mode0 = insn_data[icode].operand[1].mode;
13552
13553 if (icode == CODE_FOR_nothing)
13554 /* Builtin not supported on this processor. */
13555 return 0;
13556
13557 /* If we got invalid arguments bail out before generating bad rtl. */
13558 if (arg0 == error_mark_node)
13559 return const0_rtx;
13560
13561 if (icode == CODE_FOR_altivec_vspltisb
13562 || icode == CODE_FOR_altivec_vspltish
13563 || icode == CODE_FOR_altivec_vspltisw)
13564 {
13565 /* Only allow 5-bit *signed* literals. */
13566 if (GET_CODE (op0) != CONST_INT
13567 || INTVAL (op0) > 15
13568 || INTVAL (op0) < -16)
13569 {
13570 error ("argument 1 must be a 5-bit signed literal");
13571 return CONST0_RTX (tmode);
13572 }
13573 }
13574
13575 if (target == 0
13576 || GET_MODE (target) != tmode
13577 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13578 target = gen_reg_rtx (tmode);
13579
13580 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13581 op0 = copy_to_mode_reg (mode0, op0);
13582
13583 pat = GEN_FCN (icode) (target, op0);
13584 if (! pat)
13585 return 0;
13586 emit_insn (pat);
13587
13588 return target;
13589 }
13590
13591 static rtx
13592 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13593 {
13594 rtx pat, scratch1, scratch2;
13595 tree arg0 = CALL_EXPR_ARG (exp, 0);
13596 rtx op0 = expand_normal (arg0);
13597 machine_mode tmode = insn_data[icode].operand[0].mode;
13598 machine_mode mode0 = insn_data[icode].operand[1].mode;
13599
13600 /* If we have invalid arguments, bail out before generating bad rtl. */
13601 if (arg0 == error_mark_node)
13602 return const0_rtx;
13603
13604 if (target == 0
13605 || GET_MODE (target) != tmode
13606 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13607 target = gen_reg_rtx (tmode);
13608
13609 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13610 op0 = copy_to_mode_reg (mode0, op0);
13611
13612 scratch1 = gen_reg_rtx (mode0);
13613 scratch2 = gen_reg_rtx (mode0);
13614
13615 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13616 if (! pat)
13617 return 0;
13618 emit_insn (pat);
13619
13620 return target;
13621 }
13622
13623 static rtx
13624 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13625 {
13626 rtx pat;
13627 tree arg0 = CALL_EXPR_ARG (exp, 0);
13628 tree arg1 = CALL_EXPR_ARG (exp, 1);
13629 rtx op0 = expand_normal (arg0);
13630 rtx op1 = expand_normal (arg1);
13631 machine_mode tmode = insn_data[icode].operand[0].mode;
13632 machine_mode mode0 = insn_data[icode].operand[1].mode;
13633 machine_mode mode1 = insn_data[icode].operand[2].mode;
13634
13635 if (icode == CODE_FOR_nothing)
13636 /* Builtin not supported on this processor. */
13637 return 0;
13638
13639 /* If we got invalid arguments bail out before generating bad rtl. */
13640 if (arg0 == error_mark_node || arg1 == error_mark_node)
13641 return const0_rtx;
13642
13643 if (icode == CODE_FOR_unpackv1ti
13644 || icode == CODE_FOR_unpackkf
13645 || icode == CODE_FOR_unpacktf
13646 || icode == CODE_FOR_unpackif
13647 || icode == CODE_FOR_unpacktd)
13648 {
13649 /* Only allow 1-bit unsigned literals. */
13650 STRIP_NOPS (arg1);
13651 if (TREE_CODE (arg1) != INTEGER_CST
13652 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13653 {
13654 error ("argument 2 must be a 1-bit unsigned literal");
13655 return CONST0_RTX (tmode);
13656 }
13657 }
13658 else if (icode == CODE_FOR_altivec_vspltw)
13659 {
13660 /* Only allow 2-bit unsigned literals. */
13661 STRIP_NOPS (arg1);
13662 if (TREE_CODE (arg1) != INTEGER_CST
13663 || TREE_INT_CST_LOW (arg1) & ~3)
13664 {
13665 error ("argument 2 must be a 2-bit unsigned literal");
13666 return CONST0_RTX (tmode);
13667 }
13668 }
13669 else if (icode == CODE_FOR_altivec_vsplth)
13670 {
13671 /* Only allow 3-bit unsigned literals. */
13672 STRIP_NOPS (arg1);
13673 if (TREE_CODE (arg1) != INTEGER_CST
13674 || TREE_INT_CST_LOW (arg1) & ~7)
13675 {
13676 error ("argument 2 must be a 3-bit unsigned literal");
13677 return CONST0_RTX (tmode);
13678 }
13679 }
13680 else if (icode == CODE_FOR_altivec_vspltb)
13681 {
13682 /* Only allow 4-bit unsigned literals. */
13683 STRIP_NOPS (arg1);
13684 if (TREE_CODE (arg1) != INTEGER_CST
13685 || TREE_INT_CST_LOW (arg1) & ~15)
13686 {
13687 error ("argument 2 must be a 4-bit unsigned literal");
13688 return CONST0_RTX (tmode);
13689 }
13690 }
13691 else if (icode == CODE_FOR_altivec_vcfux
13692 || icode == CODE_FOR_altivec_vcfsx
13693 || icode == CODE_FOR_altivec_vctsxs
13694 || icode == CODE_FOR_altivec_vctuxs)
13695 {
13696 /* Only allow 5-bit unsigned literals. */
13697 STRIP_NOPS (arg1);
13698 if (TREE_CODE (arg1) != INTEGER_CST
13699 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13700 {
13701 error ("argument 2 must be a 5-bit unsigned literal");
13702 return CONST0_RTX (tmode);
13703 }
13704 }
13705 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13706 || icode == CODE_FOR_dfptstsfi_lt_dd
13707 || icode == CODE_FOR_dfptstsfi_gt_dd
13708 || icode == CODE_FOR_dfptstsfi_unordered_dd
13709 || icode == CODE_FOR_dfptstsfi_eq_td
13710 || icode == CODE_FOR_dfptstsfi_lt_td
13711 || icode == CODE_FOR_dfptstsfi_gt_td
13712 || icode == CODE_FOR_dfptstsfi_unordered_td)
13713 {
13714 /* Only allow 6-bit unsigned literals. */
13715 STRIP_NOPS (arg0);
13716 if (TREE_CODE (arg0) != INTEGER_CST
13717 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13718 {
13719 error ("argument 1 must be a 6-bit unsigned literal");
13720 return CONST0_RTX (tmode);
13721 }
13722 }
13723 else if (icode == CODE_FOR_xststdcqp_kf
13724 || icode == CODE_FOR_xststdcqp_tf
13725 || icode == CODE_FOR_xststdcdp
13726 || icode == CODE_FOR_xststdcsp
13727 || icode == CODE_FOR_xvtstdcdp
13728 || icode == CODE_FOR_xvtstdcsp)
13729 {
13730 /* Only allow 7-bit unsigned literals. */
13731 STRIP_NOPS (arg1);
13732 if (TREE_CODE (arg1) != INTEGER_CST
13733 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13734 {
13735 error ("argument 2 must be a 7-bit unsigned literal");
13736 return CONST0_RTX (tmode);
13737 }
13738 }
13739
13740 if (target == 0
13741 || GET_MODE (target) != tmode
13742 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13743 target = gen_reg_rtx (tmode);
13744
13745 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13746 op0 = copy_to_mode_reg (mode0, op0);
13747 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13748 op1 = copy_to_mode_reg (mode1, op1);
13749
13750 pat = GEN_FCN (icode) (target, op0, op1);
13751 if (! pat)
13752 return 0;
13753 emit_insn (pat);
13754
13755 return target;
13756 }
13757
13758 static rtx
13759 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13760 {
13761 rtx pat, scratch;
13762 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13763 tree arg0 = CALL_EXPR_ARG (exp, 1);
13764 tree arg1 = CALL_EXPR_ARG (exp, 2);
13765 rtx op0 = expand_normal (arg0);
13766 rtx op1 = expand_normal (arg1);
13767 machine_mode tmode = SImode;
13768 machine_mode mode0 = insn_data[icode].operand[1].mode;
13769 machine_mode mode1 = insn_data[icode].operand[2].mode;
13770 int cr6_form_int;
13771
13772 if (TREE_CODE (cr6_form) != INTEGER_CST)
13773 {
13774 error ("argument 1 of %qs must be a constant",
13775 "__builtin_altivec_predicate");
13776 return const0_rtx;
13777 }
13778 else
13779 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13780
13781 gcc_assert (mode0 == mode1);
13782
13783 /* If we have invalid arguments, bail out before generating bad rtl. */
13784 if (arg0 == error_mark_node || arg1 == error_mark_node)
13785 return const0_rtx;
13786
13787 if (target == 0
13788 || GET_MODE (target) != tmode
13789 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13790 target = gen_reg_rtx (tmode);
13791
13792 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13793 op0 = copy_to_mode_reg (mode0, op0);
13794 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13795 op1 = copy_to_mode_reg (mode1, op1);
13796
13797 /* Note that for many of the relevant operations (e.g. cmpne or
13798 cmpeq) with float or double operands, it makes more sense for the
13799 mode of the allocated scratch register to select a vector of
13800 integer. But the choice to copy the mode of operand 0 was made
13801 long ago and there are no plans to change it. */
13802 scratch = gen_reg_rtx (mode0);
13803
13804 pat = GEN_FCN (icode) (scratch, op0, op1);
13805 if (! pat)
13806 return 0;
13807 emit_insn (pat);
13808
13809 /* The vec_any* and vec_all* predicates use the same opcodes for two
13810 different operations, but the bits in CR6 will be different
13811 depending on what information we want. So we have to play tricks
13812 with CR6 to get the right bits out.
13813
13814 If you think this is disgusting, look at the specs for the
13815 AltiVec predicates. */
13816
13817 switch (cr6_form_int)
13818 {
13819 case 0:
13820 emit_insn (gen_cr6_test_for_zero (target));
13821 break;
13822 case 1:
13823 emit_insn (gen_cr6_test_for_zero_reverse (target));
13824 break;
13825 case 2:
13826 emit_insn (gen_cr6_test_for_lt (target));
13827 break;
13828 case 3:
13829 emit_insn (gen_cr6_test_for_lt_reverse (target));
13830 break;
13831 default:
13832 error ("argument 1 of %qs is out of range",
13833 "__builtin_altivec_predicate");
13834 break;
13835 }
13836
13837 return target;
13838 }
13839
13840 rtx
13841 swap_endian_selector_for_mode (machine_mode mode)
13842 {
13843 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13844 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13845 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13846 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13847
13848 unsigned int *swaparray, i;
13849 rtx perm[16];
13850
13851 switch (mode)
13852 {
13853 case E_V1TImode:
13854 swaparray = swap1;
13855 break;
13856 case E_V2DFmode:
13857 case E_V2DImode:
13858 swaparray = swap2;
13859 break;
13860 case E_V4SFmode:
13861 case E_V4SImode:
13862 swaparray = swap4;
13863 break;
13864 case E_V8HImode:
13865 swaparray = swap8;
13866 break;
13867 default:
13868 gcc_unreachable ();
13869 }
13870
13871 for (i = 0; i < 16; ++i)
13872 perm[i] = GEN_INT (swaparray[i]);
13873
13874 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13875 gen_rtvec_v (16, perm)));
13876 }
13877
13878 static rtx
13879 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13880 {
13881 rtx pat, addr;
13882 tree arg0 = CALL_EXPR_ARG (exp, 0);
13883 tree arg1 = CALL_EXPR_ARG (exp, 1);
13884 machine_mode tmode = insn_data[icode].operand[0].mode;
13885 machine_mode mode0 = Pmode;
13886 machine_mode mode1 = Pmode;
13887 rtx op0 = expand_normal (arg0);
13888 rtx op1 = expand_normal (arg1);
13889
13890 if (icode == CODE_FOR_nothing)
13891 /* Builtin not supported on this processor. */
13892 return 0;
13893
13894 /* If we got invalid arguments bail out before generating bad rtl. */
13895 if (arg0 == error_mark_node || arg1 == error_mark_node)
13896 return const0_rtx;
13897
13898 if (target == 0
13899 || GET_MODE (target) != tmode
13900 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13901 target = gen_reg_rtx (tmode);
13902
13903 op1 = copy_to_mode_reg (mode1, op1);
13904
13905 /* For LVX, express the RTL accurately by ANDing the address with -16.
13906 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13907 so the raw address is fine. */
13908 if (icode == CODE_FOR_altivec_lvx_v1ti
13909 || icode == CODE_FOR_altivec_lvx_v2df
13910 || icode == CODE_FOR_altivec_lvx_v2di
13911 || icode == CODE_FOR_altivec_lvx_v4sf
13912 || icode == CODE_FOR_altivec_lvx_v4si
13913 || icode == CODE_FOR_altivec_lvx_v8hi
13914 || icode == CODE_FOR_altivec_lvx_v16qi)
13915 {
13916 rtx rawaddr;
13917 if (op0 == const0_rtx)
13918 rawaddr = op1;
13919 else
13920 {
13921 op0 = copy_to_mode_reg (mode0, op0);
13922 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13923 }
13924 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13925 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13926
13927 emit_insn (gen_rtx_SET (target, addr));
13928 }
13929 else
13930 {
13931 if (op0 == const0_rtx)
13932 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13933 else
13934 {
13935 op0 = copy_to_mode_reg (mode0, op0);
13936 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13937 gen_rtx_PLUS (Pmode, op1, op0));
13938 }
13939
13940 pat = GEN_FCN (icode) (target, addr);
13941 if (! pat)
13942 return 0;
13943 emit_insn (pat);
13944 }
13945
13946 return target;
13947 }
13948
13949 static rtx
13950 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13951 {
13952 rtx pat;
13953 tree arg0 = CALL_EXPR_ARG (exp, 0);
13954 tree arg1 = CALL_EXPR_ARG (exp, 1);
13955 tree arg2 = CALL_EXPR_ARG (exp, 2);
13956 rtx op0 = expand_normal (arg0);
13957 rtx op1 = expand_normal (arg1);
13958 rtx op2 = expand_normal (arg2);
13959 machine_mode mode0 = insn_data[icode].operand[0].mode;
13960 machine_mode mode1 = insn_data[icode].operand[1].mode;
13961 machine_mode mode2 = insn_data[icode].operand[2].mode;
13962
13963 if (icode == CODE_FOR_nothing)
13964 /* Builtin not supported on this processor. */
13965 return NULL_RTX;
13966
13967 /* If we got invalid arguments bail out before generating bad rtl. */
13968 if (arg0 == error_mark_node
13969 || arg1 == error_mark_node
13970 || arg2 == error_mark_node)
13971 return NULL_RTX;
13972
13973 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13974 op0 = copy_to_mode_reg (mode0, op0);
13975 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13976 op1 = copy_to_mode_reg (mode1, op1);
13977 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13978 op2 = copy_to_mode_reg (mode2, op2);
13979
13980 pat = GEN_FCN (icode) (op0, op1, op2);
13981 if (pat)
13982 emit_insn (pat);
13983
13984 return NULL_RTX;
13985 }
13986
13987 static rtx
13988 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13989 {
13990 tree arg0 = CALL_EXPR_ARG (exp, 0);
13991 tree arg1 = CALL_EXPR_ARG (exp, 1);
13992 tree arg2 = CALL_EXPR_ARG (exp, 2);
13993 rtx op0 = expand_normal (arg0);
13994 rtx op1 = expand_normal (arg1);
13995 rtx op2 = expand_normal (arg2);
13996 rtx pat, addr, rawaddr;
13997 machine_mode tmode = insn_data[icode].operand[0].mode;
13998 machine_mode smode = insn_data[icode].operand[1].mode;
13999 machine_mode mode1 = Pmode;
14000 machine_mode mode2 = Pmode;
14001
14002 /* Invalid arguments. Bail before doing anything stoopid! */
14003 if (arg0 == error_mark_node
14004 || arg1 == error_mark_node
14005 || arg2 == error_mark_node)
14006 return const0_rtx;
14007
14008 op2 = copy_to_mode_reg (mode2, op2);
14009
14010 /* For STVX, express the RTL accurately by ANDing the address with -16.
14011 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14012 so the raw address is fine. */
14013 if (icode == CODE_FOR_altivec_stvx_v2df
14014 || icode == CODE_FOR_altivec_stvx_v2di
14015 || icode == CODE_FOR_altivec_stvx_v4sf
14016 || icode == CODE_FOR_altivec_stvx_v4si
14017 || icode == CODE_FOR_altivec_stvx_v8hi
14018 || icode == CODE_FOR_altivec_stvx_v16qi)
14019 {
14020 if (op1 == const0_rtx)
14021 rawaddr = op2;
14022 else
14023 {
14024 op1 = copy_to_mode_reg (mode1, op1);
14025 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14026 }
14027
14028 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14029 addr = gen_rtx_MEM (tmode, addr);
14030
14031 op0 = copy_to_mode_reg (tmode, op0);
14032
14033 emit_insn (gen_rtx_SET (addr, op0));
14034 }
14035 else
14036 {
14037 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14038 op0 = copy_to_mode_reg (smode, op0);
14039
14040 if (op1 == const0_rtx)
14041 addr = gen_rtx_MEM (tmode, op2);
14042 else
14043 {
14044 op1 = copy_to_mode_reg (mode1, op1);
14045 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14046 }
14047
14048 pat = GEN_FCN (icode) (addr, op0);
14049 if (pat)
14050 emit_insn (pat);
14051 }
14052
14053 return NULL_RTX;
14054 }
14055
14056 /* Return the appropriate SPR number associated with the given builtin. */
14057 static inline HOST_WIDE_INT
14058 htm_spr_num (enum rs6000_builtins code)
14059 {
14060 if (code == HTM_BUILTIN_GET_TFHAR
14061 || code == HTM_BUILTIN_SET_TFHAR)
14062 return TFHAR_SPR;
14063 else if (code == HTM_BUILTIN_GET_TFIAR
14064 || code == HTM_BUILTIN_SET_TFIAR)
14065 return TFIAR_SPR;
14066 else if (code == HTM_BUILTIN_GET_TEXASR
14067 || code == HTM_BUILTIN_SET_TEXASR)
14068 return TEXASR_SPR;
14069 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14070 || code == HTM_BUILTIN_SET_TEXASRU);
14071 return TEXASRU_SPR;
14072 }
14073
14074 /* Return the appropriate SPR regno associated with the given builtin. */
14075 static inline HOST_WIDE_INT
14076 htm_spr_regno (enum rs6000_builtins code)
14077 {
14078 if (code == HTM_BUILTIN_GET_TFHAR
14079 || code == HTM_BUILTIN_SET_TFHAR)
14080 return TFHAR_REGNO;
14081 else if (code == HTM_BUILTIN_GET_TFIAR
14082 || code == HTM_BUILTIN_SET_TFIAR)
14083 return TFIAR_REGNO;
14084 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14085 || code == HTM_BUILTIN_SET_TEXASR
14086 || code == HTM_BUILTIN_GET_TEXASRU
14087 || code == HTM_BUILTIN_SET_TEXASRU);
14088 return TEXASR_REGNO;
14089 }
14090
14091 /* Return the correct ICODE value depending on whether we are
14092 setting or reading the HTM SPRs. */
14093 static inline enum insn_code
14094 rs6000_htm_spr_icode (bool nonvoid)
14095 {
14096 if (nonvoid)
14097 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14098 else
14099 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14100 }
14101
14102 /* Expand the HTM builtin in EXP and store the result in TARGET.
14103 Store true in *EXPANDEDP if we found a builtin to expand. */
14104 static rtx
14105 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14106 {
14107 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14108 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14109 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14110 const struct builtin_description *d;
14111 size_t i;
14112
14113 *expandedp = true;
14114
14115 if (!TARGET_POWERPC64
14116 && (fcode == HTM_BUILTIN_TABORTDC
14117 || fcode == HTM_BUILTIN_TABORTDCI))
14118 {
14119 size_t uns_fcode = (size_t)fcode;
14120 const char *name = rs6000_builtin_info[uns_fcode].name;
14121 error ("builtin %qs is only valid in 64-bit mode", name);
14122 return const0_rtx;
14123 }
14124
14125 /* Expand the HTM builtins. */
14126 d = bdesc_htm;
14127 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14128 if (d->code == fcode)
14129 {
14130 rtx op[MAX_HTM_OPERANDS], pat;
14131 int nopnds = 0;
14132 tree arg;
14133 call_expr_arg_iterator iter;
14134 unsigned attr = rs6000_builtin_info[fcode].attr;
14135 enum insn_code icode = d->icode;
14136 const struct insn_operand_data *insn_op;
14137 bool uses_spr = (attr & RS6000_BTC_SPR);
14138 rtx cr = NULL_RTX;
14139
14140 if (uses_spr)
14141 icode = rs6000_htm_spr_icode (nonvoid);
14142 insn_op = &insn_data[icode].operand[0];
14143
14144 if (nonvoid)
14145 {
14146 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14147 if (!target
14148 || GET_MODE (target) != tmode
14149 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14150 target = gen_reg_rtx (tmode);
14151 if (uses_spr)
14152 op[nopnds++] = target;
14153 }
14154
14155 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14156 {
14157 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14158 return const0_rtx;
14159
14160 insn_op = &insn_data[icode].operand[nopnds];
14161
14162 op[nopnds] = expand_normal (arg);
14163
14164 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14165 {
14166 if (!strcmp (insn_op->constraint, "n"))
14167 {
14168 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14169 if (!CONST_INT_P (op[nopnds]))
14170 error ("argument %d must be an unsigned literal", arg_num);
14171 else
14172 error ("argument %d is an unsigned literal that is "
14173 "out of range", arg_num);
14174 return const0_rtx;
14175 }
14176 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14177 }
14178
14179 nopnds++;
14180 }
14181
14182 /* Handle the builtins for extended mnemonics. These accept
14183 no arguments, but map to builtins that take arguments. */
14184 switch (fcode)
14185 {
14186 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14187 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14188 op[nopnds++] = GEN_INT (1);
14189 if (flag_checking)
14190 attr |= RS6000_BTC_UNARY;
14191 break;
14192 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14193 op[nopnds++] = GEN_INT (0);
14194 if (flag_checking)
14195 attr |= RS6000_BTC_UNARY;
14196 break;
14197 default:
14198 break;
14199 }
14200
14201 /* If this builtin accesses SPRs, then pass in the appropriate
14202 SPR number and SPR regno as the last two operands. */
14203 if (uses_spr)
14204 {
14205 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14206 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14207 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14208 }
14209 /* If this builtin accesses a CR, then pass in a scratch
14210 CR as the last operand. */
14211 else if (attr & RS6000_BTC_CR)
14212 { cr = gen_reg_rtx (CCmode);
14213 op[nopnds++] = cr;
14214 }
14215
14216 if (flag_checking)
14217 {
14218 int expected_nopnds = 0;
14219 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14220 expected_nopnds = 1;
14221 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14222 expected_nopnds = 2;
14223 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14224 expected_nopnds = 3;
14225 if (!(attr & RS6000_BTC_VOID))
14226 expected_nopnds += 1;
14227 if (uses_spr)
14228 expected_nopnds += 2;
14229
14230 gcc_assert (nopnds == expected_nopnds
14231 && nopnds <= MAX_HTM_OPERANDS);
14232 }
14233
14234 switch (nopnds)
14235 {
14236 case 1:
14237 pat = GEN_FCN (icode) (op[0]);
14238 break;
14239 case 2:
14240 pat = GEN_FCN (icode) (op[0], op[1]);
14241 break;
14242 case 3:
14243 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14244 break;
14245 case 4:
14246 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14247 break;
14248 default:
14249 gcc_unreachable ();
14250 }
14251 if (!pat)
14252 return NULL_RTX;
14253 emit_insn (pat);
14254
14255 if (attr & RS6000_BTC_CR)
14256 {
14257 if (fcode == HTM_BUILTIN_TBEGIN)
14258 {
14259 /* Emit code to set TARGET to true or false depending on
14260 whether the tbegin. instruction successfully or failed
14261 to start a transaction. We do this by placing the 1's
14262 complement of CR's EQ bit into TARGET. */
14263 rtx scratch = gen_reg_rtx (SImode);
14264 emit_insn (gen_rtx_SET (scratch,
14265 gen_rtx_EQ (SImode, cr,
14266 const0_rtx)));
14267 emit_insn (gen_rtx_SET (target,
14268 gen_rtx_XOR (SImode, scratch,
14269 GEN_INT (1))));
14270 }
14271 else
14272 {
14273 /* Emit code to copy the 4-bit condition register field
14274 CR into the least significant end of register TARGET. */
14275 rtx scratch1 = gen_reg_rtx (SImode);
14276 rtx scratch2 = gen_reg_rtx (SImode);
14277 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14278 emit_insn (gen_movcc (subreg, cr));
14279 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14280 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14281 }
14282 }
14283
14284 if (nonvoid)
14285 return target;
14286 return const0_rtx;
14287 }
14288
14289 *expandedp = false;
14290 return NULL_RTX;
14291 }
14292
14293 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14294
14295 static rtx
14296 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14297 rtx target)
14298 {
14299 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14300 if (fcode == RS6000_BUILTIN_CPU_INIT)
14301 return const0_rtx;
14302
14303 if (target == 0 || GET_MODE (target) != SImode)
14304 target = gen_reg_rtx (SImode);
14305
14306 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14307 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14308 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14309 to a STRING_CST. */
14310 if (TREE_CODE (arg) == ARRAY_REF
14311 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14312 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14313 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14314 arg = TREE_OPERAND (arg, 0);
14315
14316 if (TREE_CODE (arg) != STRING_CST)
14317 {
14318 error ("builtin %qs only accepts a string argument",
14319 rs6000_builtin_info[(size_t) fcode].name);
14320 return const0_rtx;
14321 }
14322
14323 if (fcode == RS6000_BUILTIN_CPU_IS)
14324 {
14325 const char *cpu = TREE_STRING_POINTER (arg);
14326 rtx cpuid = NULL_RTX;
14327 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14328 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14329 {
14330 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14331 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14332 break;
14333 }
14334 if (cpuid == NULL_RTX)
14335 {
14336 /* Invalid CPU argument. */
14337 error ("cpu %qs is an invalid argument to builtin %qs",
14338 cpu, rs6000_builtin_info[(size_t) fcode].name);
14339 return const0_rtx;
14340 }
14341
14342 rtx platform = gen_reg_rtx (SImode);
14343 rtx tcbmem = gen_const_mem (SImode,
14344 gen_rtx_PLUS (Pmode,
14345 gen_rtx_REG (Pmode, TLS_REGNUM),
14346 GEN_INT (TCB_PLATFORM_OFFSET)));
14347 emit_move_insn (platform, tcbmem);
14348 emit_insn (gen_eqsi3 (target, platform, cpuid));
14349 }
14350 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14351 {
14352 const char *hwcap = TREE_STRING_POINTER (arg);
14353 rtx mask = NULL_RTX;
14354 int hwcap_offset;
14355 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14356 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14357 {
14358 mask = GEN_INT (cpu_supports_info[i].mask);
14359 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14360 break;
14361 }
14362 if (mask == NULL_RTX)
14363 {
14364 /* Invalid HWCAP argument. */
14365 error ("%s %qs is an invalid argument to builtin %qs",
14366 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14367 return const0_rtx;
14368 }
14369
14370 rtx tcb_hwcap = gen_reg_rtx (SImode);
14371 rtx tcbmem = gen_const_mem (SImode,
14372 gen_rtx_PLUS (Pmode,
14373 gen_rtx_REG (Pmode, TLS_REGNUM),
14374 GEN_INT (hwcap_offset)));
14375 emit_move_insn (tcb_hwcap, tcbmem);
14376 rtx scratch1 = gen_reg_rtx (SImode);
14377 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14378 rtx scratch2 = gen_reg_rtx (SImode);
14379 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14380 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14381 }
14382 else
14383 gcc_unreachable ();
14384
14385 /* Record that we have expanded a CPU builtin, so that we can later
14386 emit a reference to the special symbol exported by LIBC to ensure we
14387 do not link against an old LIBC that doesn't support this feature. */
14388 cpu_builtin_p = true;
14389
14390 #else
14391 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14392 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14393
14394 /* For old LIBCs, always return FALSE. */
14395 emit_move_insn (target, GEN_INT (0));
14396 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14397
14398 return target;
14399 }
14400
14401 static rtx
14402 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14403 {
14404 rtx pat;
14405 tree arg0 = CALL_EXPR_ARG (exp, 0);
14406 tree arg1 = CALL_EXPR_ARG (exp, 1);
14407 tree arg2 = CALL_EXPR_ARG (exp, 2);
14408 rtx op0 = expand_normal (arg0);
14409 rtx op1 = expand_normal (arg1);
14410 rtx op2 = expand_normal (arg2);
14411 machine_mode tmode = insn_data[icode].operand[0].mode;
14412 machine_mode mode0 = insn_data[icode].operand[1].mode;
14413 machine_mode mode1 = insn_data[icode].operand[2].mode;
14414 machine_mode mode2 = insn_data[icode].operand[3].mode;
14415
14416 if (icode == CODE_FOR_nothing)
14417 /* Builtin not supported on this processor. */
14418 return 0;
14419
14420 /* If we got invalid arguments bail out before generating bad rtl. */
14421 if (arg0 == error_mark_node
14422 || arg1 == error_mark_node
14423 || arg2 == error_mark_node)
14424 return const0_rtx;
14425
14426 /* Check and prepare argument depending on the instruction code.
14427
14428 Note that a switch statement instead of the sequence of tests
14429 would be incorrect as many of the CODE_FOR values could be
14430 CODE_FOR_nothing and that would yield multiple alternatives
14431 with identical values. We'd never reach here at runtime in
14432 this case. */
14433 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14434 || icode == CODE_FOR_altivec_vsldoi_v2df
14435 || icode == CODE_FOR_altivec_vsldoi_v4si
14436 || icode == CODE_FOR_altivec_vsldoi_v8hi
14437 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14438 {
14439 /* Only allow 4-bit unsigned literals. */
14440 STRIP_NOPS (arg2);
14441 if (TREE_CODE (arg2) != INTEGER_CST
14442 || TREE_INT_CST_LOW (arg2) & ~0xf)
14443 {
14444 error ("argument 3 must be a 4-bit unsigned literal");
14445 return CONST0_RTX (tmode);
14446 }
14447 }
14448 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14449 || icode == CODE_FOR_vsx_xxpermdi_v2di
14450 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14451 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14452 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14453 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14454 || icode == CODE_FOR_vsx_xxpermdi_v4si
14455 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14456 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14457 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14458 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14459 || icode == CODE_FOR_vsx_xxsldwi_v4si
14460 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14461 || icode == CODE_FOR_vsx_xxsldwi_v2di
14462 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14463 {
14464 /* Only allow 2-bit unsigned literals. */
14465 STRIP_NOPS (arg2);
14466 if (TREE_CODE (arg2) != INTEGER_CST
14467 || TREE_INT_CST_LOW (arg2) & ~0x3)
14468 {
14469 error ("argument 3 must be a 2-bit unsigned literal");
14470 return CONST0_RTX (tmode);
14471 }
14472 }
14473 else if (icode == CODE_FOR_vsx_set_v2df
14474 || icode == CODE_FOR_vsx_set_v2di
14475 || icode == CODE_FOR_bcdadd
14476 || icode == CODE_FOR_bcdadd_lt
14477 || icode == CODE_FOR_bcdadd_eq
14478 || icode == CODE_FOR_bcdadd_gt
14479 || icode == CODE_FOR_bcdsub
14480 || icode == CODE_FOR_bcdsub_lt
14481 || icode == CODE_FOR_bcdsub_eq
14482 || icode == CODE_FOR_bcdsub_gt)
14483 {
14484 /* Only allow 1-bit unsigned literals. */
14485 STRIP_NOPS (arg2);
14486 if (TREE_CODE (arg2) != INTEGER_CST
14487 || TREE_INT_CST_LOW (arg2) & ~0x1)
14488 {
14489 error ("argument 3 must be a 1-bit unsigned literal");
14490 return CONST0_RTX (tmode);
14491 }
14492 }
14493 else if (icode == CODE_FOR_dfp_ddedpd_dd
14494 || icode == CODE_FOR_dfp_ddedpd_td)
14495 {
14496 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14497 STRIP_NOPS (arg0);
14498 if (TREE_CODE (arg0) != INTEGER_CST
14499 || TREE_INT_CST_LOW (arg2) & ~0x3)
14500 {
14501 error ("argument 1 must be 0 or 2");
14502 return CONST0_RTX (tmode);
14503 }
14504 }
14505 else if (icode == CODE_FOR_dfp_denbcd_dd
14506 || icode == CODE_FOR_dfp_denbcd_td)
14507 {
14508 /* Only allow 1-bit unsigned literals. */
14509 STRIP_NOPS (arg0);
14510 if (TREE_CODE (arg0) != INTEGER_CST
14511 || TREE_INT_CST_LOW (arg0) & ~0x1)
14512 {
14513 error ("argument 1 must be a 1-bit unsigned literal");
14514 return CONST0_RTX (tmode);
14515 }
14516 }
14517 else if (icode == CODE_FOR_dfp_dscli_dd
14518 || icode == CODE_FOR_dfp_dscli_td
14519 || icode == CODE_FOR_dfp_dscri_dd
14520 || icode == CODE_FOR_dfp_dscri_td)
14521 {
14522 /* Only allow 6-bit unsigned literals. */
14523 STRIP_NOPS (arg1);
14524 if (TREE_CODE (arg1) != INTEGER_CST
14525 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14526 {
14527 error ("argument 2 must be a 6-bit unsigned literal");
14528 return CONST0_RTX (tmode);
14529 }
14530 }
14531 else if (icode == CODE_FOR_crypto_vshasigmaw
14532 || icode == CODE_FOR_crypto_vshasigmad)
14533 {
14534 /* Check whether the 2nd and 3rd arguments are integer constants and in
14535 range and prepare arguments. */
14536 STRIP_NOPS (arg1);
14537 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14538 {
14539 error ("argument 2 must be 0 or 1");
14540 return CONST0_RTX (tmode);
14541 }
14542
14543 STRIP_NOPS (arg2);
14544 if (TREE_CODE (arg2) != INTEGER_CST
14545 || wi::geu_p (wi::to_wide (arg2), 16))
14546 {
14547 error ("argument 3 must be in the range 0..15");
14548 return CONST0_RTX (tmode);
14549 }
14550 }
14551
14552 if (target == 0
14553 || GET_MODE (target) != tmode
14554 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14555 target = gen_reg_rtx (tmode);
14556
14557 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14558 op0 = copy_to_mode_reg (mode0, op0);
14559 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14560 op1 = copy_to_mode_reg (mode1, op1);
14561 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14562 op2 = copy_to_mode_reg (mode2, op2);
14563
14564 pat = GEN_FCN (icode) (target, op0, op1, op2);
14565 if (! pat)
14566 return 0;
14567 emit_insn (pat);
14568
14569 return target;
14570 }
14571
14572
14573 /* Expand the dst builtins. */
14574 static rtx
14575 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14576 bool *expandedp)
14577 {
14578 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14579 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14580 tree arg0, arg1, arg2;
14581 machine_mode mode0, mode1;
14582 rtx pat, op0, op1, op2;
14583 const struct builtin_description *d;
14584 size_t i;
14585
14586 *expandedp = false;
14587
14588 /* Handle DST variants. */
14589 d = bdesc_dst;
14590 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14591 if (d->code == fcode)
14592 {
14593 arg0 = CALL_EXPR_ARG (exp, 0);
14594 arg1 = CALL_EXPR_ARG (exp, 1);
14595 arg2 = CALL_EXPR_ARG (exp, 2);
14596 op0 = expand_normal (arg0);
14597 op1 = expand_normal (arg1);
14598 op2 = expand_normal (arg2);
14599 mode0 = insn_data[d->icode].operand[0].mode;
14600 mode1 = insn_data[d->icode].operand[1].mode;
14601
14602 /* Invalid arguments, bail out before generating bad rtl. */
14603 if (arg0 == error_mark_node
14604 || arg1 == error_mark_node
14605 || arg2 == error_mark_node)
14606 return const0_rtx;
14607
14608 *expandedp = true;
14609 STRIP_NOPS (arg2);
14610 if (TREE_CODE (arg2) != INTEGER_CST
14611 || TREE_INT_CST_LOW (arg2) & ~0x3)
14612 {
14613 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14614 return const0_rtx;
14615 }
14616
14617 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14618 op0 = copy_to_mode_reg (Pmode, op0);
14619 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14620 op1 = copy_to_mode_reg (mode1, op1);
14621
14622 pat = GEN_FCN (d->icode) (op0, op1, op2);
14623 if (pat != 0)
14624 emit_insn (pat);
14625
14626 return NULL_RTX;
14627 }
14628
14629 return NULL_RTX;
14630 }
14631
14632 /* Expand vec_init builtin. */
14633 static rtx
14634 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14635 {
14636 machine_mode tmode = TYPE_MODE (type);
14637 machine_mode inner_mode = GET_MODE_INNER (tmode);
14638 int i, n_elt = GET_MODE_NUNITS (tmode);
14639
14640 gcc_assert (VECTOR_MODE_P (tmode));
14641 gcc_assert (n_elt == call_expr_nargs (exp));
14642
14643 if (!target || !register_operand (target, tmode))
14644 target = gen_reg_rtx (tmode);
14645
14646 /* If we have a vector compromised of a single element, such as V1TImode, do
14647 the initialization directly. */
14648 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14649 {
14650 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14651 emit_move_insn (target, gen_lowpart (tmode, x));
14652 }
14653 else
14654 {
14655 rtvec v = rtvec_alloc (n_elt);
14656
14657 for (i = 0; i < n_elt; ++i)
14658 {
14659 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14660 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14661 }
14662
14663 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14664 }
14665
14666 return target;
14667 }
14668
14669 /* Return the integer constant in ARG. Constrain it to be in the range
14670 of the subparts of VEC_TYPE; issue an error if not. */
14671
14672 static int
14673 get_element_number (tree vec_type, tree arg)
14674 {
14675 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14676
14677 if (!tree_fits_uhwi_p (arg)
14678 || (elt = tree_to_uhwi (arg), elt > max))
14679 {
14680 error ("selector must be an integer constant in the range 0..%wi", max);
14681 return 0;
14682 }
14683
14684 return elt;
14685 }
14686
14687 /* Expand vec_set builtin. */
14688 static rtx
14689 altivec_expand_vec_set_builtin (tree exp)
14690 {
14691 machine_mode tmode, mode1;
14692 tree arg0, arg1, arg2;
14693 int elt;
14694 rtx op0, op1;
14695
14696 arg0 = CALL_EXPR_ARG (exp, 0);
14697 arg1 = CALL_EXPR_ARG (exp, 1);
14698 arg2 = CALL_EXPR_ARG (exp, 2);
14699
14700 tmode = TYPE_MODE (TREE_TYPE (arg0));
14701 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14702 gcc_assert (VECTOR_MODE_P (tmode));
14703
14704 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14705 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14706 elt = get_element_number (TREE_TYPE (arg0), arg2);
14707
14708 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14709 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14710
14711 op0 = force_reg (tmode, op0);
14712 op1 = force_reg (mode1, op1);
14713
14714 rs6000_expand_vector_set (op0, op1, elt);
14715
14716 return op0;
14717 }
14718
14719 /* Expand vec_ext builtin. */
14720 static rtx
14721 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14722 {
14723 machine_mode tmode, mode0;
14724 tree arg0, arg1;
14725 rtx op0;
14726 rtx op1;
14727
14728 arg0 = CALL_EXPR_ARG (exp, 0);
14729 arg1 = CALL_EXPR_ARG (exp, 1);
14730
14731 op0 = expand_normal (arg0);
14732 op1 = expand_normal (arg1);
14733
14734 /* Call get_element_number to validate arg1 if it is a constant. */
14735 if (TREE_CODE (arg1) == INTEGER_CST)
14736 (void) get_element_number (TREE_TYPE (arg0), arg1);
14737
14738 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14739 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14740 gcc_assert (VECTOR_MODE_P (mode0));
14741
14742 op0 = force_reg (mode0, op0);
14743
14744 if (optimize || !target || !register_operand (target, tmode))
14745 target = gen_reg_rtx (tmode);
14746
14747 rs6000_expand_vector_extract (target, op0, op1);
14748
14749 return target;
14750 }
14751
14752 /* Expand the builtin in EXP and store the result in TARGET. Store
14753 true in *EXPANDEDP if we found a builtin to expand. */
14754 static rtx
14755 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14756 {
14757 const struct builtin_description *d;
14758 size_t i;
14759 enum insn_code icode;
14760 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14761 tree arg0, arg1, arg2;
14762 rtx op0, pat;
14763 machine_mode tmode, mode0;
14764 enum rs6000_builtins fcode
14765 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14766
14767 if (rs6000_overloaded_builtin_p (fcode))
14768 {
14769 *expandedp = true;
14770 error ("unresolved overload for Altivec builtin %qF", fndecl);
14771
14772 /* Given it is invalid, just generate a normal call. */
14773 return expand_call (exp, target, false);
14774 }
14775
14776 target = altivec_expand_dst_builtin (exp, target, expandedp);
14777 if (*expandedp)
14778 return target;
14779
14780 *expandedp = true;
14781
14782 switch (fcode)
14783 {
14784 case ALTIVEC_BUILTIN_STVX_V2DF:
14785 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14786 case ALTIVEC_BUILTIN_STVX_V2DI:
14787 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14788 case ALTIVEC_BUILTIN_STVX_V4SF:
14789 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14790 case ALTIVEC_BUILTIN_STVX:
14791 case ALTIVEC_BUILTIN_STVX_V4SI:
14792 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14793 case ALTIVEC_BUILTIN_STVX_V8HI:
14794 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14795 case ALTIVEC_BUILTIN_STVX_V16QI:
14796 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14797 case ALTIVEC_BUILTIN_STVEBX:
14798 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14799 case ALTIVEC_BUILTIN_STVEHX:
14800 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14801 case ALTIVEC_BUILTIN_STVEWX:
14802 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14803 case ALTIVEC_BUILTIN_STVXL_V2DF:
14804 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14805 case ALTIVEC_BUILTIN_STVXL_V2DI:
14806 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14807 case ALTIVEC_BUILTIN_STVXL_V4SF:
14808 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14809 case ALTIVEC_BUILTIN_STVXL:
14810 case ALTIVEC_BUILTIN_STVXL_V4SI:
14811 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14812 case ALTIVEC_BUILTIN_STVXL_V8HI:
14813 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14814 case ALTIVEC_BUILTIN_STVXL_V16QI:
14815 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14816
14817 case ALTIVEC_BUILTIN_STVLX:
14818 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14819 case ALTIVEC_BUILTIN_STVLXL:
14820 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14821 case ALTIVEC_BUILTIN_STVRX:
14822 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14823 case ALTIVEC_BUILTIN_STVRXL:
14824 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14825
14826 case P9V_BUILTIN_STXVL:
14827 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14828
14829 case P9V_BUILTIN_XST_LEN_R:
14830 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14831
14832 case VSX_BUILTIN_STXVD2X_V1TI:
14833 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14834 case VSX_BUILTIN_STXVD2X_V2DF:
14835 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14836 case VSX_BUILTIN_STXVD2X_V2DI:
14837 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14838 case VSX_BUILTIN_STXVW4X_V4SF:
14839 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14840 case VSX_BUILTIN_STXVW4X_V4SI:
14841 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14842 case VSX_BUILTIN_STXVW4X_V8HI:
14843 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14844 case VSX_BUILTIN_STXVW4X_V16QI:
14845 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14846
14847 /* For the following on big endian, it's ok to use any appropriate
14848 unaligned-supporting store, so use a generic expander. For
14849 little-endian, the exact element-reversing instruction must
14850 be used. */
14851 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14852 {
14853 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14854 : CODE_FOR_vsx_st_elemrev_v1ti);
14855 return altivec_expand_stv_builtin (code, exp);
14856 }
14857 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14858 {
14859 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14860 : CODE_FOR_vsx_st_elemrev_v2df);
14861 return altivec_expand_stv_builtin (code, exp);
14862 }
14863 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14864 {
14865 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14866 : CODE_FOR_vsx_st_elemrev_v2di);
14867 return altivec_expand_stv_builtin (code, exp);
14868 }
14869 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14870 {
14871 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14872 : CODE_FOR_vsx_st_elemrev_v4sf);
14873 return altivec_expand_stv_builtin (code, exp);
14874 }
14875 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14876 {
14877 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14878 : CODE_FOR_vsx_st_elemrev_v4si);
14879 return altivec_expand_stv_builtin (code, exp);
14880 }
14881 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14882 {
14883 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14884 : CODE_FOR_vsx_st_elemrev_v8hi);
14885 return altivec_expand_stv_builtin (code, exp);
14886 }
14887 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14888 {
14889 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14890 : CODE_FOR_vsx_st_elemrev_v16qi);
14891 return altivec_expand_stv_builtin (code, exp);
14892 }
14893
14894 case ALTIVEC_BUILTIN_MFVSCR:
14895 icode = CODE_FOR_altivec_mfvscr;
14896 tmode = insn_data[icode].operand[0].mode;
14897
14898 if (target == 0
14899 || GET_MODE (target) != tmode
14900 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14901 target = gen_reg_rtx (tmode);
14902
14903 pat = GEN_FCN (icode) (target);
14904 if (! pat)
14905 return 0;
14906 emit_insn (pat);
14907 return target;
14908
14909 case ALTIVEC_BUILTIN_MTVSCR:
14910 icode = CODE_FOR_altivec_mtvscr;
14911 arg0 = CALL_EXPR_ARG (exp, 0);
14912 op0 = expand_normal (arg0);
14913 mode0 = insn_data[icode].operand[0].mode;
14914
14915 /* If we got invalid arguments bail out before generating bad rtl. */
14916 if (arg0 == error_mark_node)
14917 return const0_rtx;
14918
14919 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14920 op0 = copy_to_mode_reg (mode0, op0);
14921
14922 pat = GEN_FCN (icode) (op0);
14923 if (pat)
14924 emit_insn (pat);
14925 return NULL_RTX;
14926
14927 case ALTIVEC_BUILTIN_DSSALL:
14928 emit_insn (gen_altivec_dssall ());
14929 return NULL_RTX;
14930
14931 case ALTIVEC_BUILTIN_DSS:
14932 icode = CODE_FOR_altivec_dss;
14933 arg0 = CALL_EXPR_ARG (exp, 0);
14934 STRIP_NOPS (arg0);
14935 op0 = expand_normal (arg0);
14936 mode0 = insn_data[icode].operand[0].mode;
14937
14938 /* If we got invalid arguments bail out before generating bad rtl. */
14939 if (arg0 == error_mark_node)
14940 return const0_rtx;
14941
14942 if (TREE_CODE (arg0) != INTEGER_CST
14943 || TREE_INT_CST_LOW (arg0) & ~0x3)
14944 {
14945 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14946 return const0_rtx;
14947 }
14948
14949 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14950 op0 = copy_to_mode_reg (mode0, op0);
14951
14952 emit_insn (gen_altivec_dss (op0));
14953 return NULL_RTX;
14954
14955 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14956 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14957 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14958 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14959 case VSX_BUILTIN_VEC_INIT_V2DF:
14960 case VSX_BUILTIN_VEC_INIT_V2DI:
14961 case VSX_BUILTIN_VEC_INIT_V1TI:
14962 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14963
14964 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14965 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14966 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14967 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14968 case VSX_BUILTIN_VEC_SET_V2DF:
14969 case VSX_BUILTIN_VEC_SET_V2DI:
14970 case VSX_BUILTIN_VEC_SET_V1TI:
14971 return altivec_expand_vec_set_builtin (exp);
14972
14973 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14974 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14975 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14976 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14977 case VSX_BUILTIN_VEC_EXT_V2DF:
14978 case VSX_BUILTIN_VEC_EXT_V2DI:
14979 case VSX_BUILTIN_VEC_EXT_V1TI:
14980 return altivec_expand_vec_ext_builtin (exp, target);
14981
14982 case P9V_BUILTIN_VEC_EXTRACT4B:
14983 arg1 = CALL_EXPR_ARG (exp, 1);
14984 STRIP_NOPS (arg1);
14985
14986 /* Generate a normal call if it is invalid. */
14987 if (arg1 == error_mark_node)
14988 return expand_call (exp, target, false);
14989
14990 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14991 {
14992 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14993 return expand_call (exp, target, false);
14994 }
14995 break;
14996
14997 case P9V_BUILTIN_VEC_INSERT4B:
14998 arg2 = CALL_EXPR_ARG (exp, 2);
14999 STRIP_NOPS (arg2);
15000
15001 /* Generate a normal call if it is invalid. */
15002 if (arg2 == error_mark_node)
15003 return expand_call (exp, target, false);
15004
15005 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15006 {
15007 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15008 return expand_call (exp, target, false);
15009 }
15010 break;
15011
15012 default:
15013 break;
15014 /* Fall through. */
15015 }
15016
15017 /* Expand abs* operations. */
15018 d = bdesc_abs;
15019 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15020 if (d->code == fcode)
15021 return altivec_expand_abs_builtin (d->icode, exp, target);
15022
15023 /* Expand the AltiVec predicates. */
15024 d = bdesc_altivec_preds;
15025 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15026 if (d->code == fcode)
15027 return altivec_expand_predicate_builtin (d->icode, exp, target);
15028
15029 /* LV* are funky. We initialized them differently. */
15030 switch (fcode)
15031 {
15032 case ALTIVEC_BUILTIN_LVSL:
15033 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15034 exp, target, false);
15035 case ALTIVEC_BUILTIN_LVSR:
15036 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15037 exp, target, false);
15038 case ALTIVEC_BUILTIN_LVEBX:
15039 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15040 exp, target, false);
15041 case ALTIVEC_BUILTIN_LVEHX:
15042 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15043 exp, target, false);
15044 case ALTIVEC_BUILTIN_LVEWX:
15045 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15046 exp, target, false);
15047 case ALTIVEC_BUILTIN_LVXL_V2DF:
15048 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15049 exp, target, false);
15050 case ALTIVEC_BUILTIN_LVXL_V2DI:
15051 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15052 exp, target, false);
15053 case ALTIVEC_BUILTIN_LVXL_V4SF:
15054 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15055 exp, target, false);
15056 case ALTIVEC_BUILTIN_LVXL:
15057 case ALTIVEC_BUILTIN_LVXL_V4SI:
15058 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15059 exp, target, false);
15060 case ALTIVEC_BUILTIN_LVXL_V8HI:
15061 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15062 exp, target, false);
15063 case ALTIVEC_BUILTIN_LVXL_V16QI:
15064 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15065 exp, target, false);
15066 case ALTIVEC_BUILTIN_LVX_V1TI:
15067 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15068 exp, target, false);
15069 case ALTIVEC_BUILTIN_LVX_V2DF:
15070 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15071 exp, target, false);
15072 case ALTIVEC_BUILTIN_LVX_V2DI:
15073 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15074 exp, target, false);
15075 case ALTIVEC_BUILTIN_LVX_V4SF:
15076 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15077 exp, target, false);
15078 case ALTIVEC_BUILTIN_LVX:
15079 case ALTIVEC_BUILTIN_LVX_V4SI:
15080 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15081 exp, target, false);
15082 case ALTIVEC_BUILTIN_LVX_V8HI:
15083 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15084 exp, target, false);
15085 case ALTIVEC_BUILTIN_LVX_V16QI:
15086 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15087 exp, target, false);
15088 case ALTIVEC_BUILTIN_LVLX:
15089 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15090 exp, target, true);
15091 case ALTIVEC_BUILTIN_LVLXL:
15092 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15093 exp, target, true);
15094 case ALTIVEC_BUILTIN_LVRX:
15095 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15096 exp, target, true);
15097 case ALTIVEC_BUILTIN_LVRXL:
15098 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15099 exp, target, true);
15100 case VSX_BUILTIN_LXVD2X_V1TI:
15101 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15102 exp, target, false);
15103 case VSX_BUILTIN_LXVD2X_V2DF:
15104 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15105 exp, target, false);
15106 case VSX_BUILTIN_LXVD2X_V2DI:
15107 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15108 exp, target, false);
15109 case VSX_BUILTIN_LXVW4X_V4SF:
15110 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15111 exp, target, false);
15112 case VSX_BUILTIN_LXVW4X_V4SI:
15113 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15114 exp, target, false);
15115 case VSX_BUILTIN_LXVW4X_V8HI:
15116 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15117 exp, target, false);
15118 case VSX_BUILTIN_LXVW4X_V16QI:
15119 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15120 exp, target, false);
15121 /* For the following on big endian, it's ok to use any appropriate
15122 unaligned-supporting load, so use a generic expander. For
15123 little-endian, the exact element-reversing instruction must
15124 be used. */
15125 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15126 {
15127 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15128 : CODE_FOR_vsx_ld_elemrev_v2df);
15129 return altivec_expand_lv_builtin (code, exp, target, false);
15130 }
15131 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15132 {
15133 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15134 : CODE_FOR_vsx_ld_elemrev_v1ti);
15135 return altivec_expand_lv_builtin (code, exp, target, false);
15136 }
15137 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15138 {
15139 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15140 : CODE_FOR_vsx_ld_elemrev_v2di);
15141 return altivec_expand_lv_builtin (code, exp, target, false);
15142 }
15143 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15144 {
15145 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15146 : CODE_FOR_vsx_ld_elemrev_v4sf);
15147 return altivec_expand_lv_builtin (code, exp, target, false);
15148 }
15149 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15150 {
15151 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15152 : CODE_FOR_vsx_ld_elemrev_v4si);
15153 return altivec_expand_lv_builtin (code, exp, target, false);
15154 }
15155 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15156 {
15157 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15158 : CODE_FOR_vsx_ld_elemrev_v8hi);
15159 return altivec_expand_lv_builtin (code, exp, target, false);
15160 }
15161 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15162 {
15163 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15164 : CODE_FOR_vsx_ld_elemrev_v16qi);
15165 return altivec_expand_lv_builtin (code, exp, target, false);
15166 }
15167 break;
15168 default:
15169 break;
15170 /* Fall through. */
15171 }
15172
15173 *expandedp = false;
15174 return NULL_RTX;
15175 }
15176
15177 /* Check whether a builtin function is supported in this target
15178 configuration. */
15179 bool
15180 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15181 {
15182 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15183 if ((fnmask & rs6000_builtin_mask) != fnmask)
15184 return false;
15185 else
15186 return true;
15187 }
15188
15189 /* Raise an error message for a builtin function that is called without the
15190 appropriate target options being set. */
15191
15192 static void
15193 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15194 {
15195 size_t uns_fncode = (size_t) fncode;
15196 const char *name = rs6000_builtin_info[uns_fncode].name;
15197 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15198
15199 gcc_assert (name != NULL);
15200 if ((fnmask & RS6000_BTM_CELL) != 0)
15201 error ("builtin function %qs is only valid for the cell processor", name);
15202 else if ((fnmask & RS6000_BTM_VSX) != 0)
15203 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15204 else if ((fnmask & RS6000_BTM_HTM) != 0)
15205 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15206 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15207 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15208 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15209 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15210 error ("builtin function %qs requires the %qs and %qs options",
15211 name, "-mhard-dfp", "-mpower8-vector");
15212 else if ((fnmask & RS6000_BTM_DFP) != 0)
15213 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15214 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15215 error ("builtin function %qs requires the %qs option", name,
15216 "-mpower8-vector");
15217 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15218 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15219 error ("builtin function %qs requires the %qs and %qs options",
15220 name, "-mcpu=power9", "-m64");
15221 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15222 error ("builtin function %qs requires the %qs option", name,
15223 "-mcpu=power9");
15224 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15225 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15226 error ("builtin function %qs requires the %qs and %qs options",
15227 name, "-mcpu=power9", "-m64");
15228 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15229 error ("builtin function %qs requires the %qs option", name,
15230 "-mcpu=power9");
15231 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15232 {
15233 if (!TARGET_HARD_FLOAT)
15234 error ("builtin function %qs requires the %qs option", name,
15235 "-mhard-float");
15236 else
15237 error ("builtin function %qs requires the %qs option", name,
15238 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15239 }
15240 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15241 error ("builtin function %qs requires the %qs option", name,
15242 "-mhard-float");
15243 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15244 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15245 name);
15246 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15247 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15248 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15249 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15250 error ("builtin function %qs requires the %qs (or newer), and "
15251 "%qs or %qs options",
15252 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15253 else
15254 error ("builtin function %qs is not supported with the current options",
15255 name);
15256 }
15257
15258 /* Target hook for early folding of built-ins, shamelessly stolen
15259 from ia64.c. */
15260
15261 static tree
15262 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15263 int n_args ATTRIBUTE_UNUSED,
15264 tree *args ATTRIBUTE_UNUSED,
15265 bool ignore ATTRIBUTE_UNUSED)
15266 {
15267 #ifdef SUBTARGET_FOLD_BUILTIN
15268 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15269 #else
15270 return NULL_TREE;
15271 #endif
15272 }
15273
15274 /* Helper function to sort out which built-ins may be valid without having
15275 a LHS. */
15276 static bool
15277 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15278 {
15279 switch (fn_code)
15280 {
15281 case ALTIVEC_BUILTIN_STVX_V16QI:
15282 case ALTIVEC_BUILTIN_STVX_V8HI:
15283 case ALTIVEC_BUILTIN_STVX_V4SI:
15284 case ALTIVEC_BUILTIN_STVX_V4SF:
15285 case ALTIVEC_BUILTIN_STVX_V2DI:
15286 case ALTIVEC_BUILTIN_STVX_V2DF:
15287 case VSX_BUILTIN_STXVW4X_V16QI:
15288 case VSX_BUILTIN_STXVW4X_V8HI:
15289 case VSX_BUILTIN_STXVW4X_V4SF:
15290 case VSX_BUILTIN_STXVW4X_V4SI:
15291 case VSX_BUILTIN_STXVD2X_V2DF:
15292 case VSX_BUILTIN_STXVD2X_V2DI:
15293 return true;
15294 default:
15295 return false;
15296 }
15297 }
15298
15299 /* Helper function to handle the gimple folding of a vector compare
15300 operation. This sets up true/false vectors, and uses the
15301 VEC_COND_EXPR operation.
15302 CODE indicates which comparison is to be made. (EQ, GT, ...).
15303 TYPE indicates the type of the result. */
15304 static tree
15305 fold_build_vec_cmp (tree_code code, tree type,
15306 tree arg0, tree arg1)
15307 {
15308 tree cmp_type = build_same_sized_truth_vector_type (type);
15309 tree zero_vec = build_zero_cst (type);
15310 tree minus_one_vec = build_minus_one_cst (type);
15311 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15312 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15313 }
15314
15315 /* Helper function to handle the in-between steps for the
15316 vector compare built-ins. */
15317 static void
15318 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15319 {
15320 tree arg0 = gimple_call_arg (stmt, 0);
15321 tree arg1 = gimple_call_arg (stmt, 1);
15322 tree lhs = gimple_call_lhs (stmt);
15323 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15324 gimple *g = gimple_build_assign (lhs, cmp);
15325 gimple_set_location (g, gimple_location (stmt));
15326 gsi_replace (gsi, g, true);
15327 }
15328
15329 /* Helper function to map V2DF and V4SF types to their
15330 integral equivalents (V2DI and V4SI). */
15331 tree map_to_integral_tree_type (tree input_tree_type)
15332 {
15333 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15334 return input_tree_type;
15335 else
15336 {
15337 if (types_compatible_p (TREE_TYPE (input_tree_type),
15338 TREE_TYPE (V2DF_type_node)))
15339 return V2DI_type_node;
15340 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15341 TREE_TYPE (V4SF_type_node)))
15342 return V4SI_type_node;
15343 else
15344 gcc_unreachable ();
15345 }
15346 }
15347
15348 /* Helper function to handle the vector merge[hl] built-ins. The
15349 implementation difference between h and l versions for this code are in
15350 the values used when building of the permute vector for high word versus
15351 low word merge. The variance is keyed off the use_high parameter. */
15352 static void
15353 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15354 {
15355 tree arg0 = gimple_call_arg (stmt, 0);
15356 tree arg1 = gimple_call_arg (stmt, 1);
15357 tree lhs = gimple_call_lhs (stmt);
15358 tree lhs_type = TREE_TYPE (lhs);
15359 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15360 int midpoint = n_elts / 2;
15361 int offset = 0;
15362
15363 if (use_high == 1)
15364 offset = midpoint;
15365
15366 /* The permute_type will match the lhs for integral types. For double and
15367 float types, the permute type needs to map to the V2 or V4 type that
15368 matches size. */
15369 tree permute_type;
15370 permute_type = map_to_integral_tree_type (lhs_type);
15371 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15372
15373 for (int i = 0; i < midpoint; i++)
15374 {
15375 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15376 offset + i));
15377 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15378 offset + n_elts + i));
15379 }
15380
15381 tree permute = elts.build ();
15382
15383 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15384 gimple_set_location (g, gimple_location (stmt));
15385 gsi_replace (gsi, g, true);
15386 }
15387
15388 /* Helper function to handle the vector merge[eo] built-ins. */
15389 static void
15390 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15391 {
15392 tree arg0 = gimple_call_arg (stmt, 0);
15393 tree arg1 = gimple_call_arg (stmt, 1);
15394 tree lhs = gimple_call_lhs (stmt);
15395 tree lhs_type = TREE_TYPE (lhs);
15396 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15397
15398 /* The permute_type will match the lhs for integral types. For double and
15399 float types, the permute type needs to map to the V2 or V4 type that
15400 matches size. */
15401 tree permute_type;
15402 permute_type = map_to_integral_tree_type (lhs_type);
15403
15404 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15405
15406 /* Build the permute vector. */
15407 for (int i = 0; i < n_elts / 2; i++)
15408 {
15409 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15410 2*i + use_odd));
15411 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15412 2*i + use_odd + n_elts));
15413 }
15414
15415 tree permute = elts.build ();
15416
15417 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15418 gimple_set_location (g, gimple_location (stmt));
15419 gsi_replace (gsi, g, true);
15420 }
15421
15422 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15423 a constant, use rs6000_fold_builtin.) */
15424
15425 bool
15426 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15427 {
15428 gimple *stmt = gsi_stmt (*gsi);
15429 tree fndecl = gimple_call_fndecl (stmt);
15430 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15431 enum rs6000_builtins fn_code
15432 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15433 tree arg0, arg1, lhs, temp;
15434 enum tree_code bcode;
15435 gimple *g;
15436
15437 size_t uns_fncode = (size_t) fn_code;
15438 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15439 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15440 const char *fn_name2 = (icode != CODE_FOR_nothing)
15441 ? get_insn_name ((int) icode)
15442 : "nothing";
15443
15444 if (TARGET_DEBUG_BUILTIN)
15445 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15446 fn_code, fn_name1, fn_name2);
15447
15448 if (!rs6000_fold_gimple)
15449 return false;
15450
15451 /* Prevent gimple folding for code that does not have a LHS, unless it is
15452 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15453 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15454 return false;
15455
15456 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15457 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15458 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15459 if (!func_valid_p)
15460 return false;
15461
15462 switch (fn_code)
15463 {
15464 /* Flavors of vec_add. We deliberately don't expand
15465 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15466 TImode, resulting in much poorer code generation. */
15467 case ALTIVEC_BUILTIN_VADDUBM:
15468 case ALTIVEC_BUILTIN_VADDUHM:
15469 case ALTIVEC_BUILTIN_VADDUWM:
15470 case P8V_BUILTIN_VADDUDM:
15471 case ALTIVEC_BUILTIN_VADDFP:
15472 case VSX_BUILTIN_XVADDDP:
15473 bcode = PLUS_EXPR;
15474 do_binary:
15475 arg0 = gimple_call_arg (stmt, 0);
15476 arg1 = gimple_call_arg (stmt, 1);
15477 lhs = gimple_call_lhs (stmt);
15478 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15479 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15480 {
15481 /* Ensure the binary operation is performed in a type
15482 that wraps if it is integral type. */
15483 gimple_seq stmts = NULL;
15484 tree type = unsigned_type_for (TREE_TYPE (lhs));
15485 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15486 type, arg0);
15487 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15488 type, arg1);
15489 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15490 type, uarg0, uarg1);
15491 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15492 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15493 build1 (VIEW_CONVERT_EXPR,
15494 TREE_TYPE (lhs), res));
15495 gsi_replace (gsi, g, true);
15496 return true;
15497 }
15498 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15499 gimple_set_location (g, gimple_location (stmt));
15500 gsi_replace (gsi, g, true);
15501 return true;
15502 /* Flavors of vec_sub. We deliberately don't expand
15503 P8V_BUILTIN_VSUBUQM. */
15504 case ALTIVEC_BUILTIN_VSUBUBM:
15505 case ALTIVEC_BUILTIN_VSUBUHM:
15506 case ALTIVEC_BUILTIN_VSUBUWM:
15507 case P8V_BUILTIN_VSUBUDM:
15508 case ALTIVEC_BUILTIN_VSUBFP:
15509 case VSX_BUILTIN_XVSUBDP:
15510 bcode = MINUS_EXPR;
15511 goto do_binary;
15512 case VSX_BUILTIN_XVMULSP:
15513 case VSX_BUILTIN_XVMULDP:
15514 arg0 = gimple_call_arg (stmt, 0);
15515 arg1 = gimple_call_arg (stmt, 1);
15516 lhs = gimple_call_lhs (stmt);
15517 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15518 gimple_set_location (g, gimple_location (stmt));
15519 gsi_replace (gsi, g, true);
15520 return true;
15521 /* Even element flavors of vec_mul (signed). */
15522 case ALTIVEC_BUILTIN_VMULESB:
15523 case ALTIVEC_BUILTIN_VMULESH:
15524 case P8V_BUILTIN_VMULESW:
15525 /* Even element flavors of vec_mul (unsigned). */
15526 case ALTIVEC_BUILTIN_VMULEUB:
15527 case ALTIVEC_BUILTIN_VMULEUH:
15528 case P8V_BUILTIN_VMULEUW:
15529 arg0 = gimple_call_arg (stmt, 0);
15530 arg1 = gimple_call_arg (stmt, 1);
15531 lhs = gimple_call_lhs (stmt);
15532 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15533 gimple_set_location (g, gimple_location (stmt));
15534 gsi_replace (gsi, g, true);
15535 return true;
15536 /* Odd element flavors of vec_mul (signed). */
15537 case ALTIVEC_BUILTIN_VMULOSB:
15538 case ALTIVEC_BUILTIN_VMULOSH:
15539 case P8V_BUILTIN_VMULOSW:
15540 /* Odd element flavors of vec_mul (unsigned). */
15541 case ALTIVEC_BUILTIN_VMULOUB:
15542 case ALTIVEC_BUILTIN_VMULOUH:
15543 case P8V_BUILTIN_VMULOUW:
15544 arg0 = gimple_call_arg (stmt, 0);
15545 arg1 = gimple_call_arg (stmt, 1);
15546 lhs = gimple_call_lhs (stmt);
15547 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15548 gimple_set_location (g, gimple_location (stmt));
15549 gsi_replace (gsi, g, true);
15550 return true;
15551 /* Flavors of vec_div (Integer). */
15552 case VSX_BUILTIN_DIV_V2DI:
15553 case VSX_BUILTIN_UDIV_V2DI:
15554 arg0 = gimple_call_arg (stmt, 0);
15555 arg1 = gimple_call_arg (stmt, 1);
15556 lhs = gimple_call_lhs (stmt);
15557 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15558 gimple_set_location (g, gimple_location (stmt));
15559 gsi_replace (gsi, g, true);
15560 return true;
15561 /* Flavors of vec_div (Float). */
15562 case VSX_BUILTIN_XVDIVSP:
15563 case VSX_BUILTIN_XVDIVDP:
15564 arg0 = gimple_call_arg (stmt, 0);
15565 arg1 = gimple_call_arg (stmt, 1);
15566 lhs = gimple_call_lhs (stmt);
15567 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15568 gimple_set_location (g, gimple_location (stmt));
15569 gsi_replace (gsi, g, true);
15570 return true;
15571 /* Flavors of vec_and. */
15572 case ALTIVEC_BUILTIN_VAND:
15573 arg0 = gimple_call_arg (stmt, 0);
15574 arg1 = gimple_call_arg (stmt, 1);
15575 lhs = gimple_call_lhs (stmt);
15576 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15577 gimple_set_location (g, gimple_location (stmt));
15578 gsi_replace (gsi, g, true);
15579 return true;
15580 /* Flavors of vec_andc. */
15581 case ALTIVEC_BUILTIN_VANDC:
15582 arg0 = gimple_call_arg (stmt, 0);
15583 arg1 = gimple_call_arg (stmt, 1);
15584 lhs = gimple_call_lhs (stmt);
15585 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15586 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15587 gimple_set_location (g, gimple_location (stmt));
15588 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15589 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15590 gimple_set_location (g, gimple_location (stmt));
15591 gsi_replace (gsi, g, true);
15592 return true;
15593 /* Flavors of vec_nand. */
15594 case P8V_BUILTIN_VEC_NAND:
15595 case P8V_BUILTIN_NAND_V16QI:
15596 case P8V_BUILTIN_NAND_V8HI:
15597 case P8V_BUILTIN_NAND_V4SI:
15598 case P8V_BUILTIN_NAND_V4SF:
15599 case P8V_BUILTIN_NAND_V2DF:
15600 case P8V_BUILTIN_NAND_V2DI:
15601 arg0 = gimple_call_arg (stmt, 0);
15602 arg1 = gimple_call_arg (stmt, 1);
15603 lhs = gimple_call_lhs (stmt);
15604 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15605 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15606 gimple_set_location (g, gimple_location (stmt));
15607 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15608 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15609 gimple_set_location (g, gimple_location (stmt));
15610 gsi_replace (gsi, g, true);
15611 return true;
15612 /* Flavors of vec_or. */
15613 case ALTIVEC_BUILTIN_VOR:
15614 arg0 = gimple_call_arg (stmt, 0);
15615 arg1 = gimple_call_arg (stmt, 1);
15616 lhs = gimple_call_lhs (stmt);
15617 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15618 gimple_set_location (g, gimple_location (stmt));
15619 gsi_replace (gsi, g, true);
15620 return true;
15621 /* flavors of vec_orc. */
15622 case P8V_BUILTIN_ORC_V16QI:
15623 case P8V_BUILTIN_ORC_V8HI:
15624 case P8V_BUILTIN_ORC_V4SI:
15625 case P8V_BUILTIN_ORC_V4SF:
15626 case P8V_BUILTIN_ORC_V2DF:
15627 case P8V_BUILTIN_ORC_V2DI:
15628 arg0 = gimple_call_arg (stmt, 0);
15629 arg1 = gimple_call_arg (stmt, 1);
15630 lhs = gimple_call_lhs (stmt);
15631 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15632 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15633 gimple_set_location (g, gimple_location (stmt));
15634 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15635 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15636 gimple_set_location (g, gimple_location (stmt));
15637 gsi_replace (gsi, g, true);
15638 return true;
15639 /* Flavors of vec_xor. */
15640 case ALTIVEC_BUILTIN_VXOR:
15641 arg0 = gimple_call_arg (stmt, 0);
15642 arg1 = gimple_call_arg (stmt, 1);
15643 lhs = gimple_call_lhs (stmt);
15644 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15645 gimple_set_location (g, gimple_location (stmt));
15646 gsi_replace (gsi, g, true);
15647 return true;
15648 /* Flavors of vec_nor. */
15649 case ALTIVEC_BUILTIN_VNOR:
15650 arg0 = gimple_call_arg (stmt, 0);
15651 arg1 = gimple_call_arg (stmt, 1);
15652 lhs = gimple_call_lhs (stmt);
15653 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15654 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15655 gimple_set_location (g, gimple_location (stmt));
15656 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15657 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15658 gimple_set_location (g, gimple_location (stmt));
15659 gsi_replace (gsi, g, true);
15660 return true;
15661 /* flavors of vec_abs. */
15662 case ALTIVEC_BUILTIN_ABS_V16QI:
15663 case ALTIVEC_BUILTIN_ABS_V8HI:
15664 case ALTIVEC_BUILTIN_ABS_V4SI:
15665 case ALTIVEC_BUILTIN_ABS_V4SF:
15666 case P8V_BUILTIN_ABS_V2DI:
15667 case VSX_BUILTIN_XVABSDP:
15668 arg0 = gimple_call_arg (stmt, 0);
15669 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15670 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15671 return false;
15672 lhs = gimple_call_lhs (stmt);
15673 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15674 gimple_set_location (g, gimple_location (stmt));
15675 gsi_replace (gsi, g, true);
15676 return true;
15677 /* flavors of vec_min. */
15678 case VSX_BUILTIN_XVMINDP:
15679 case P8V_BUILTIN_VMINSD:
15680 case P8V_BUILTIN_VMINUD:
15681 case ALTIVEC_BUILTIN_VMINSB:
15682 case ALTIVEC_BUILTIN_VMINSH:
15683 case ALTIVEC_BUILTIN_VMINSW:
15684 case ALTIVEC_BUILTIN_VMINUB:
15685 case ALTIVEC_BUILTIN_VMINUH:
15686 case ALTIVEC_BUILTIN_VMINUW:
15687 case ALTIVEC_BUILTIN_VMINFP:
15688 arg0 = gimple_call_arg (stmt, 0);
15689 arg1 = gimple_call_arg (stmt, 1);
15690 lhs = gimple_call_lhs (stmt);
15691 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15692 gimple_set_location (g, gimple_location (stmt));
15693 gsi_replace (gsi, g, true);
15694 return true;
15695 /* flavors of vec_max. */
15696 case VSX_BUILTIN_XVMAXDP:
15697 case P8V_BUILTIN_VMAXSD:
15698 case P8V_BUILTIN_VMAXUD:
15699 case ALTIVEC_BUILTIN_VMAXSB:
15700 case ALTIVEC_BUILTIN_VMAXSH:
15701 case ALTIVEC_BUILTIN_VMAXSW:
15702 case ALTIVEC_BUILTIN_VMAXUB:
15703 case ALTIVEC_BUILTIN_VMAXUH:
15704 case ALTIVEC_BUILTIN_VMAXUW:
15705 case ALTIVEC_BUILTIN_VMAXFP:
15706 arg0 = gimple_call_arg (stmt, 0);
15707 arg1 = gimple_call_arg (stmt, 1);
15708 lhs = gimple_call_lhs (stmt);
15709 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15710 gimple_set_location (g, gimple_location (stmt));
15711 gsi_replace (gsi, g, true);
15712 return true;
15713 /* Flavors of vec_eqv. */
15714 case P8V_BUILTIN_EQV_V16QI:
15715 case P8V_BUILTIN_EQV_V8HI:
15716 case P8V_BUILTIN_EQV_V4SI:
15717 case P8V_BUILTIN_EQV_V4SF:
15718 case P8V_BUILTIN_EQV_V2DF:
15719 case P8V_BUILTIN_EQV_V2DI:
15720 arg0 = gimple_call_arg (stmt, 0);
15721 arg1 = gimple_call_arg (stmt, 1);
15722 lhs = gimple_call_lhs (stmt);
15723 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15724 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15725 gimple_set_location (g, gimple_location (stmt));
15726 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15727 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15728 gimple_set_location (g, gimple_location (stmt));
15729 gsi_replace (gsi, g, true);
15730 return true;
15731 /* Flavors of vec_rotate_left. */
15732 case ALTIVEC_BUILTIN_VRLB:
15733 case ALTIVEC_BUILTIN_VRLH:
15734 case ALTIVEC_BUILTIN_VRLW:
15735 case P8V_BUILTIN_VRLD:
15736 arg0 = gimple_call_arg (stmt, 0);
15737 arg1 = gimple_call_arg (stmt, 1);
15738 lhs = gimple_call_lhs (stmt);
15739 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15740 gimple_set_location (g, gimple_location (stmt));
15741 gsi_replace (gsi, g, true);
15742 return true;
15743 /* Flavors of vector shift right algebraic.
15744 vec_sra{b,h,w} -> vsra{b,h,w}. */
15745 case ALTIVEC_BUILTIN_VSRAB:
15746 case ALTIVEC_BUILTIN_VSRAH:
15747 case ALTIVEC_BUILTIN_VSRAW:
15748 case P8V_BUILTIN_VSRAD:
15749 arg0 = gimple_call_arg (stmt, 0);
15750 arg1 = gimple_call_arg (stmt, 1);
15751 lhs = gimple_call_lhs (stmt);
15752 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
15753 gimple_set_location (g, gimple_location (stmt));
15754 gsi_replace (gsi, g, true);
15755 return true;
15756 /* Flavors of vector shift left.
15757 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15758 case ALTIVEC_BUILTIN_VSLB:
15759 case ALTIVEC_BUILTIN_VSLH:
15760 case ALTIVEC_BUILTIN_VSLW:
15761 case P8V_BUILTIN_VSLD:
15762 {
15763 location_t loc;
15764 gimple_seq stmts = NULL;
15765 arg0 = gimple_call_arg (stmt, 0);
15766 tree arg0_type = TREE_TYPE (arg0);
15767 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15768 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15769 return false;
15770 arg1 = gimple_call_arg (stmt, 1);
15771 tree arg1_type = TREE_TYPE (arg1);
15772 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15773 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15774 loc = gimple_location (stmt);
15775 lhs = gimple_call_lhs (stmt);
15776 /* Force arg1 into the range valid matching the arg0 type. */
15777 /* Build a vector consisting of the max valid bit-size values. */
15778 int n_elts = VECTOR_CST_NELTS (arg1);
15779 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15780 * BITS_PER_UNIT;
15781 tree element_size = build_int_cst (unsigned_element_type,
15782 tree_size_in_bits / n_elts);
15783 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15784 for (int i = 0; i < n_elts; i++)
15785 elts.safe_push (element_size);
15786 tree modulo_tree = elts.build ();
15787 /* Modulo the provided shift value against that vector. */
15788 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15789 unsigned_arg1_type, arg1);
15790 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15791 unsigned_arg1_type, unsigned_arg1,
15792 modulo_tree);
15793 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15794 /* And finally, do the shift. */
15795 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15796 gimple_set_location (g, gimple_location (stmt));
15797 gsi_replace (gsi, g, true);
15798 return true;
15799 }
15800 /* Flavors of vector shift right. */
15801 case ALTIVEC_BUILTIN_VSRB:
15802 case ALTIVEC_BUILTIN_VSRH:
15803 case ALTIVEC_BUILTIN_VSRW:
15804 case P8V_BUILTIN_VSRD:
15805 {
15806 arg0 = gimple_call_arg (stmt, 0);
15807 arg1 = gimple_call_arg (stmt, 1);
15808 lhs = gimple_call_lhs (stmt);
15809 gimple_seq stmts = NULL;
15810 /* Convert arg0 to unsigned. */
15811 tree arg0_unsigned
15812 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15813 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15814 tree res
15815 = gimple_build (&stmts, RSHIFT_EXPR,
15816 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
15817 /* Convert result back to the lhs type. */
15818 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15819 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15820 update_call_from_tree (gsi, res);
15821 return true;
15822 }
15823 /* Vector loads. */
15824 case ALTIVEC_BUILTIN_LVX_V16QI:
15825 case ALTIVEC_BUILTIN_LVX_V8HI:
15826 case ALTIVEC_BUILTIN_LVX_V4SI:
15827 case ALTIVEC_BUILTIN_LVX_V4SF:
15828 case ALTIVEC_BUILTIN_LVX_V2DI:
15829 case ALTIVEC_BUILTIN_LVX_V2DF:
15830 case ALTIVEC_BUILTIN_LVX_V1TI:
15831 {
15832 arg0 = gimple_call_arg (stmt, 0); // offset
15833 arg1 = gimple_call_arg (stmt, 1); // address
15834 lhs = gimple_call_lhs (stmt);
15835 location_t loc = gimple_location (stmt);
15836 /* Since arg1 may be cast to a different type, just use ptr_type_node
15837 here instead of trying to enforce TBAA on pointer types. */
15838 tree arg1_type = ptr_type_node;
15839 tree lhs_type = TREE_TYPE (lhs);
15840 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15841 the tree using the value from arg0. The resulting type will match
15842 the type of arg1. */
15843 gimple_seq stmts = NULL;
15844 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15845 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15846 arg1_type, arg1, temp_offset);
15847 /* Mask off any lower bits from the address. */
15848 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15849 arg1_type, temp_addr,
15850 build_int_cst (arg1_type, -16));
15851 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15852 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15853 take an offset, but since we've already incorporated the offset
15854 above, here we just pass in a zero. */
15855 gimple *g
15856 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15857 build_int_cst (arg1_type, 0)));
15858 gimple_set_location (g, loc);
15859 gsi_replace (gsi, g, true);
15860 return true;
15861 }
15862 /* Vector stores. */
15863 case ALTIVEC_BUILTIN_STVX_V16QI:
15864 case ALTIVEC_BUILTIN_STVX_V8HI:
15865 case ALTIVEC_BUILTIN_STVX_V4SI:
15866 case ALTIVEC_BUILTIN_STVX_V4SF:
15867 case ALTIVEC_BUILTIN_STVX_V2DI:
15868 case ALTIVEC_BUILTIN_STVX_V2DF:
15869 {
15870 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15871 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15872 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15873 location_t loc = gimple_location (stmt);
15874 tree arg0_type = TREE_TYPE (arg0);
15875 /* Use ptr_type_node (no TBAA) for the arg2_type.
15876 FIXME: (Richard) "A proper fix would be to transition this type as
15877 seen from the frontend to GIMPLE, for example in a similar way we
15878 do for MEM_REFs by piggy-backing that on an extra argument, a
15879 constant zero pointer of the alias pointer type to use (which would
15880 also serve as a type indicator of the store itself). I'd use a
15881 target specific internal function for this (not sure if we can have
15882 those target specific, but I guess if it's folded away then that's
15883 fine) and get away with the overload set." */
15884 tree arg2_type = ptr_type_node;
15885 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15886 the tree using the value from arg0. The resulting type will match
15887 the type of arg2. */
15888 gimple_seq stmts = NULL;
15889 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15890 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15891 arg2_type, arg2, temp_offset);
15892 /* Mask off any lower bits from the address. */
15893 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15894 arg2_type, temp_addr,
15895 build_int_cst (arg2_type, -16));
15896 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15897 /* The desired gimple result should be similar to:
15898 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15899 gimple *g
15900 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15901 build_int_cst (arg2_type, 0)), arg0);
15902 gimple_set_location (g, loc);
15903 gsi_replace (gsi, g, true);
15904 return true;
15905 }
15906
15907 /* unaligned Vector loads. */
15908 case VSX_BUILTIN_LXVW4X_V16QI:
15909 case VSX_BUILTIN_LXVW4X_V8HI:
15910 case VSX_BUILTIN_LXVW4X_V4SF:
15911 case VSX_BUILTIN_LXVW4X_V4SI:
15912 case VSX_BUILTIN_LXVD2X_V2DF:
15913 case VSX_BUILTIN_LXVD2X_V2DI:
15914 {
15915 arg0 = gimple_call_arg (stmt, 0); // offset
15916 arg1 = gimple_call_arg (stmt, 1); // address
15917 lhs = gimple_call_lhs (stmt);
15918 location_t loc = gimple_location (stmt);
15919 /* Since arg1 may be cast to a different type, just use ptr_type_node
15920 here instead of trying to enforce TBAA on pointer types. */
15921 tree arg1_type = ptr_type_node;
15922 tree lhs_type = TREE_TYPE (lhs);
15923 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15924 required alignment (power) is 4 bytes regardless of data type. */
15925 tree align_ltype = build_aligned_type (lhs_type, 4);
15926 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15927 the tree using the value from arg0. The resulting type will match
15928 the type of arg1. */
15929 gimple_seq stmts = NULL;
15930 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15931 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15932 arg1_type, arg1, temp_offset);
15933 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15934 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15935 take an offset, but since we've already incorporated the offset
15936 above, here we just pass in a zero. */
15937 gimple *g;
15938 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15939 build_int_cst (arg1_type, 0)));
15940 gimple_set_location (g, loc);
15941 gsi_replace (gsi, g, true);
15942 return true;
15943 }
15944
15945 /* unaligned Vector stores. */
15946 case VSX_BUILTIN_STXVW4X_V16QI:
15947 case VSX_BUILTIN_STXVW4X_V8HI:
15948 case VSX_BUILTIN_STXVW4X_V4SF:
15949 case VSX_BUILTIN_STXVW4X_V4SI:
15950 case VSX_BUILTIN_STXVD2X_V2DF:
15951 case VSX_BUILTIN_STXVD2X_V2DI:
15952 {
15953 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15954 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15955 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15956 location_t loc = gimple_location (stmt);
15957 tree arg0_type = TREE_TYPE (arg0);
15958 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15959 tree arg2_type = ptr_type_node;
15960 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15961 required alignment (power) is 4 bytes regardless of data type. */
15962 tree align_stype = build_aligned_type (arg0_type, 4);
15963 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15964 the tree using the value from arg1. */
15965 gimple_seq stmts = NULL;
15966 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15967 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15968 arg2_type, arg2, temp_offset);
15969 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15970 gimple *g;
15971 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15972 build_int_cst (arg2_type, 0)), arg0);
15973 gimple_set_location (g, loc);
15974 gsi_replace (gsi, g, true);
15975 return true;
15976 }
15977
15978 /* Vector Fused multiply-add (fma). */
15979 case ALTIVEC_BUILTIN_VMADDFP:
15980 case VSX_BUILTIN_XVMADDDP:
15981 case ALTIVEC_BUILTIN_VMLADDUHM:
15982 {
15983 arg0 = gimple_call_arg (stmt, 0);
15984 arg1 = gimple_call_arg (stmt, 1);
15985 tree arg2 = gimple_call_arg (stmt, 2);
15986 lhs = gimple_call_lhs (stmt);
15987 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15988 gimple_call_set_lhs (g, lhs);
15989 gimple_call_set_nothrow (g, true);
15990 gimple_set_location (g, gimple_location (stmt));
15991 gsi_replace (gsi, g, true);
15992 return true;
15993 }
15994
15995 /* Vector compares; EQ, NE, GE, GT, LE. */
15996 case ALTIVEC_BUILTIN_VCMPEQUB:
15997 case ALTIVEC_BUILTIN_VCMPEQUH:
15998 case ALTIVEC_BUILTIN_VCMPEQUW:
15999 case P8V_BUILTIN_VCMPEQUD:
16000 fold_compare_helper (gsi, EQ_EXPR, stmt);
16001 return true;
16002
16003 case P9V_BUILTIN_CMPNEB:
16004 case P9V_BUILTIN_CMPNEH:
16005 case P9V_BUILTIN_CMPNEW:
16006 fold_compare_helper (gsi, NE_EXPR, stmt);
16007 return true;
16008
16009 case VSX_BUILTIN_CMPGE_16QI:
16010 case VSX_BUILTIN_CMPGE_U16QI:
16011 case VSX_BUILTIN_CMPGE_8HI:
16012 case VSX_BUILTIN_CMPGE_U8HI:
16013 case VSX_BUILTIN_CMPGE_4SI:
16014 case VSX_BUILTIN_CMPGE_U4SI:
16015 case VSX_BUILTIN_CMPGE_2DI:
16016 case VSX_BUILTIN_CMPGE_U2DI:
16017 fold_compare_helper (gsi, GE_EXPR, stmt);
16018 return true;
16019
16020 case ALTIVEC_BUILTIN_VCMPGTSB:
16021 case ALTIVEC_BUILTIN_VCMPGTUB:
16022 case ALTIVEC_BUILTIN_VCMPGTSH:
16023 case ALTIVEC_BUILTIN_VCMPGTUH:
16024 case ALTIVEC_BUILTIN_VCMPGTSW:
16025 case ALTIVEC_BUILTIN_VCMPGTUW:
16026 case P8V_BUILTIN_VCMPGTUD:
16027 case P8V_BUILTIN_VCMPGTSD:
16028 fold_compare_helper (gsi, GT_EXPR, stmt);
16029 return true;
16030
16031 case VSX_BUILTIN_CMPLE_16QI:
16032 case VSX_BUILTIN_CMPLE_U16QI:
16033 case VSX_BUILTIN_CMPLE_8HI:
16034 case VSX_BUILTIN_CMPLE_U8HI:
16035 case VSX_BUILTIN_CMPLE_4SI:
16036 case VSX_BUILTIN_CMPLE_U4SI:
16037 case VSX_BUILTIN_CMPLE_2DI:
16038 case VSX_BUILTIN_CMPLE_U2DI:
16039 fold_compare_helper (gsi, LE_EXPR, stmt);
16040 return true;
16041
16042 /* flavors of vec_splat_[us]{8,16,32}. */
16043 case ALTIVEC_BUILTIN_VSPLTISB:
16044 case ALTIVEC_BUILTIN_VSPLTISH:
16045 case ALTIVEC_BUILTIN_VSPLTISW:
16046 {
16047 int size;
16048 if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
16049 size = 8;
16050 else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
16051 size = 16;
16052 else
16053 size = 32;
16054
16055 arg0 = gimple_call_arg (stmt, 0);
16056 lhs = gimple_call_lhs (stmt);
16057
16058 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
16059 5-bit signed constant in range -16 to +15. */
16060 if (TREE_CODE (arg0) != INTEGER_CST
16061 || !IN_RANGE (sext_hwi (TREE_INT_CST_LOW (arg0), size),
16062 -16, 15))
16063 return false;
16064 gimple_seq stmts = NULL;
16065 location_t loc = gimple_location (stmt);
16066 tree splat_value = gimple_convert (&stmts, loc,
16067 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16068 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16069 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16070 g = gimple_build_assign (lhs, splat_tree);
16071 gimple_set_location (g, gimple_location (stmt));
16072 gsi_replace (gsi, g, true);
16073 return true;
16074 }
16075
16076 /* Flavors of vec_splat. */
16077 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
16078 case ALTIVEC_BUILTIN_VSPLTB:
16079 case ALTIVEC_BUILTIN_VSPLTH:
16080 case ALTIVEC_BUILTIN_VSPLTW:
16081 case VSX_BUILTIN_XXSPLTD_V2DI:
16082 case VSX_BUILTIN_XXSPLTD_V2DF:
16083 {
16084 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
16085 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
16086 /* Only fold the vec_splat_*() if arg1 is both a constant value and
16087 is a valid index into the arg0 vector. */
16088 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
16089 if (TREE_CODE (arg1) != INTEGER_CST
16090 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
16091 return false;
16092 lhs = gimple_call_lhs (stmt);
16093 tree lhs_type = TREE_TYPE (lhs);
16094 tree arg0_type = TREE_TYPE (arg0);
16095 tree splat;
16096 if (TREE_CODE (arg0) == VECTOR_CST)
16097 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
16098 else
16099 {
16100 /* Determine (in bits) the length and start location of the
16101 splat value for a call to the tree_vec_extract helper. */
16102 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
16103 * BITS_PER_UNIT / n_elts;
16104 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16105 tree len = build_int_cst (bitsizetype, splat_elem_size);
16106 tree start = build_int_cst (bitsizetype, splat_start_bit);
16107 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16108 len, start);
16109 }
16110 /* And finally, build the new vector. */
16111 tree splat_tree = build_vector_from_val (lhs_type, splat);
16112 g = gimple_build_assign (lhs, splat_tree);
16113 gimple_set_location (g, gimple_location (stmt));
16114 gsi_replace (gsi, g, true);
16115 return true;
16116 }
16117
16118 /* vec_mergel (integrals). */
16119 case ALTIVEC_BUILTIN_VMRGLH:
16120 case ALTIVEC_BUILTIN_VMRGLW:
16121 case VSX_BUILTIN_XXMRGLW_4SI:
16122 case ALTIVEC_BUILTIN_VMRGLB:
16123 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16124 case VSX_BUILTIN_XXMRGLW_4SF:
16125 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16126 fold_mergehl_helper (gsi, stmt, 1);
16127 return true;
16128 /* vec_mergeh (integrals). */
16129 case ALTIVEC_BUILTIN_VMRGHH:
16130 case ALTIVEC_BUILTIN_VMRGHW:
16131 case VSX_BUILTIN_XXMRGHW_4SI:
16132 case ALTIVEC_BUILTIN_VMRGHB:
16133 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16134 case VSX_BUILTIN_XXMRGHW_4SF:
16135 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16136 fold_mergehl_helper (gsi, stmt, 0);
16137 return true;
16138
16139 /* Flavors of vec_mergee. */
16140 case P8V_BUILTIN_VMRGEW_V4SI:
16141 case P8V_BUILTIN_VMRGEW_V2DI:
16142 case P8V_BUILTIN_VMRGEW_V4SF:
16143 case P8V_BUILTIN_VMRGEW_V2DF:
16144 fold_mergeeo_helper (gsi, stmt, 0);
16145 return true;
16146 /* Flavors of vec_mergeo. */
16147 case P8V_BUILTIN_VMRGOW_V4SI:
16148 case P8V_BUILTIN_VMRGOW_V2DI:
16149 case P8V_BUILTIN_VMRGOW_V4SF:
16150 case P8V_BUILTIN_VMRGOW_V2DF:
16151 fold_mergeeo_helper (gsi, stmt, 1);
16152 return true;
16153
16154 /* d = vec_pack (a, b) */
16155 case P8V_BUILTIN_VPKUDUM:
16156 case ALTIVEC_BUILTIN_VPKUHUM:
16157 case ALTIVEC_BUILTIN_VPKUWUM:
16158 {
16159 arg0 = gimple_call_arg (stmt, 0);
16160 arg1 = gimple_call_arg (stmt, 1);
16161 lhs = gimple_call_lhs (stmt);
16162 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16163 gimple_set_location (g, gimple_location (stmt));
16164 gsi_replace (gsi, g, true);
16165 return true;
16166 }
16167
16168 /* d = vec_unpackh (a) */
16169 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16170 in this code is sensitive to endian-ness, and needs to be inverted to
16171 handle both LE and BE targets. */
16172 case ALTIVEC_BUILTIN_VUPKHSB:
16173 case ALTIVEC_BUILTIN_VUPKHSH:
16174 case P8V_BUILTIN_VUPKHSW:
16175 {
16176 arg0 = gimple_call_arg (stmt, 0);
16177 lhs = gimple_call_lhs (stmt);
16178 if (BYTES_BIG_ENDIAN)
16179 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16180 else
16181 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16182 gimple_set_location (g, gimple_location (stmt));
16183 gsi_replace (gsi, g, true);
16184 return true;
16185 }
16186 /* d = vec_unpackl (a) */
16187 case ALTIVEC_BUILTIN_VUPKLSB:
16188 case ALTIVEC_BUILTIN_VUPKLSH:
16189 case P8V_BUILTIN_VUPKLSW:
16190 {
16191 arg0 = gimple_call_arg (stmt, 0);
16192 lhs = gimple_call_lhs (stmt);
16193 if (BYTES_BIG_ENDIAN)
16194 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16195 else
16196 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16197 gimple_set_location (g, gimple_location (stmt));
16198 gsi_replace (gsi, g, true);
16199 return true;
16200 }
16201 /* There is no gimple type corresponding with pixel, so just return. */
16202 case ALTIVEC_BUILTIN_VUPKHPX:
16203 case ALTIVEC_BUILTIN_VUPKLPX:
16204 return false;
16205
16206 /* vec_perm. */
16207 case ALTIVEC_BUILTIN_VPERM_16QI:
16208 case ALTIVEC_BUILTIN_VPERM_8HI:
16209 case ALTIVEC_BUILTIN_VPERM_4SI:
16210 case ALTIVEC_BUILTIN_VPERM_2DI:
16211 case ALTIVEC_BUILTIN_VPERM_4SF:
16212 case ALTIVEC_BUILTIN_VPERM_2DF:
16213 {
16214 arg0 = gimple_call_arg (stmt, 0);
16215 arg1 = gimple_call_arg (stmt, 1);
16216 tree permute = gimple_call_arg (stmt, 2);
16217 lhs = gimple_call_lhs (stmt);
16218 location_t loc = gimple_location (stmt);
16219 gimple_seq stmts = NULL;
16220 // convert arg0 and arg1 to match the type of the permute
16221 // for the VEC_PERM_EXPR operation.
16222 tree permute_type = (TREE_TYPE (permute));
16223 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16224 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16225 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16226 permute_type, arg0_ptype, arg1_ptype,
16227 permute);
16228 // Convert the result back to the desired lhs type upon completion.
16229 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16230 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16231 g = gimple_build_assign (lhs, temp);
16232 gimple_set_location (g, loc);
16233 gsi_replace (gsi, g, true);
16234 return true;
16235 }
16236
16237 default:
16238 if (TARGET_DEBUG_BUILTIN)
16239 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16240 fn_code, fn_name1, fn_name2);
16241 break;
16242 }
16243
16244 return false;
16245 }
16246
16247 /* Expand an expression EXP that calls a built-in function,
16248 with result going to TARGET if that's convenient
16249 (and in mode MODE if that's convenient).
16250 SUBTARGET may be used as the target for computing one of EXP's operands.
16251 IGNORE is nonzero if the value is to be ignored. */
16252
16253 static rtx
16254 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16255 machine_mode mode ATTRIBUTE_UNUSED,
16256 int ignore ATTRIBUTE_UNUSED)
16257 {
16258 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16259 enum rs6000_builtins fcode
16260 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16261 size_t uns_fcode = (size_t)fcode;
16262 const struct builtin_description *d;
16263 size_t i;
16264 rtx ret;
16265 bool success;
16266 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16267 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16268 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16269
16270 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16271 floating point type, depending on whether long double is the IBM extended
16272 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16273 we only define one variant of the built-in function, and switch the code
16274 when defining it, rather than defining two built-ins and using the
16275 overload table in rs6000-c.c to switch between the two. If we don't have
16276 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16277 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16278 if (FLOAT128_IEEE_P (TFmode))
16279 switch (icode)
16280 {
16281 default:
16282 break;
16283
16284 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16285 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16286 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16287 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16288 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16289 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16290 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16291 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16292 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16293 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16294 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16295 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16296 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16297 }
16298
16299 if (TARGET_DEBUG_BUILTIN)
16300 {
16301 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16302 const char *name2 = (icode != CODE_FOR_nothing)
16303 ? get_insn_name ((int) icode)
16304 : "nothing";
16305 const char *name3;
16306
16307 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16308 {
16309 default: name3 = "unknown"; break;
16310 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16311 case RS6000_BTC_UNARY: name3 = "unary"; break;
16312 case RS6000_BTC_BINARY: name3 = "binary"; break;
16313 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16314 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16315 case RS6000_BTC_ABS: name3 = "abs"; break;
16316 case RS6000_BTC_DST: name3 = "dst"; break;
16317 }
16318
16319
16320 fprintf (stderr,
16321 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16322 (name1) ? name1 : "---", fcode,
16323 (name2) ? name2 : "---", (int) icode,
16324 name3,
16325 func_valid_p ? "" : ", not valid");
16326 }
16327
16328 if (!func_valid_p)
16329 {
16330 rs6000_invalid_builtin (fcode);
16331
16332 /* Given it is invalid, just generate a normal call. */
16333 return expand_call (exp, target, ignore);
16334 }
16335
16336 switch (fcode)
16337 {
16338 case RS6000_BUILTIN_RECIP:
16339 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16340
16341 case RS6000_BUILTIN_RECIPF:
16342 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16343
16344 case RS6000_BUILTIN_RSQRTF:
16345 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16346
16347 case RS6000_BUILTIN_RSQRT:
16348 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16349
16350 case POWER7_BUILTIN_BPERMD:
16351 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16352 ? CODE_FOR_bpermd_di
16353 : CODE_FOR_bpermd_si), exp, target);
16354
16355 case RS6000_BUILTIN_GET_TB:
16356 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16357 target);
16358
16359 case RS6000_BUILTIN_MFTB:
16360 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16361 ? CODE_FOR_rs6000_mftb_di
16362 : CODE_FOR_rs6000_mftb_si),
16363 target);
16364
16365 case RS6000_BUILTIN_MFFS:
16366 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16367
16368 case RS6000_BUILTIN_MTFSB0:
16369 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16370
16371 case RS6000_BUILTIN_MTFSB1:
16372 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16373
16374 case RS6000_BUILTIN_SET_FPSCR_RN:
16375 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16376 exp);
16377
16378 case RS6000_BUILTIN_SET_FPSCR_DRN:
16379 return
16380 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16381 exp);
16382
16383 case RS6000_BUILTIN_MFFSL:
16384 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16385
16386 case RS6000_BUILTIN_MTFSF:
16387 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16388
16389 case RS6000_BUILTIN_CPU_INIT:
16390 case RS6000_BUILTIN_CPU_IS:
16391 case RS6000_BUILTIN_CPU_SUPPORTS:
16392 return cpu_expand_builtin (fcode, exp, target);
16393
16394 case MISC_BUILTIN_SPEC_BARRIER:
16395 {
16396 emit_insn (gen_speculation_barrier ());
16397 return NULL_RTX;
16398 }
16399
16400 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16401 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16402 {
16403 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16404 : (int) CODE_FOR_altivec_lvsl_direct);
16405 machine_mode tmode = insn_data[icode2].operand[0].mode;
16406 machine_mode mode = insn_data[icode2].operand[1].mode;
16407 tree arg;
16408 rtx op, addr, pat;
16409
16410 gcc_assert (TARGET_ALTIVEC);
16411
16412 arg = CALL_EXPR_ARG (exp, 0);
16413 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16414 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16415 addr = memory_address (mode, op);
16416 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16417 op = addr;
16418 else
16419 {
16420 /* For the load case need to negate the address. */
16421 op = gen_reg_rtx (GET_MODE (addr));
16422 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16423 }
16424 op = gen_rtx_MEM (mode, op);
16425
16426 if (target == 0
16427 || GET_MODE (target) != tmode
16428 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16429 target = gen_reg_rtx (tmode);
16430
16431 pat = GEN_FCN (icode2) (target, op);
16432 if (!pat)
16433 return 0;
16434 emit_insn (pat);
16435
16436 return target;
16437 }
16438
16439 case ALTIVEC_BUILTIN_VCFUX:
16440 case ALTIVEC_BUILTIN_VCFSX:
16441 case ALTIVEC_BUILTIN_VCTUXS:
16442 case ALTIVEC_BUILTIN_VCTSXS:
16443 /* FIXME: There's got to be a nicer way to handle this case than
16444 constructing a new CALL_EXPR. */
16445 if (call_expr_nargs (exp) == 1)
16446 {
16447 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16448 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16449 }
16450 break;
16451
16452 /* For the pack and unpack int128 routines, fix up the builtin so it
16453 uses the correct IBM128 type. */
16454 case MISC_BUILTIN_PACK_IF:
16455 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16456 {
16457 icode = CODE_FOR_packtf;
16458 fcode = MISC_BUILTIN_PACK_TF;
16459 uns_fcode = (size_t)fcode;
16460 }
16461 break;
16462
16463 case MISC_BUILTIN_UNPACK_IF:
16464 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16465 {
16466 icode = CODE_FOR_unpacktf;
16467 fcode = MISC_BUILTIN_UNPACK_TF;
16468 uns_fcode = (size_t)fcode;
16469 }
16470 break;
16471
16472 default:
16473 break;
16474 }
16475
16476 if (TARGET_ALTIVEC)
16477 {
16478 ret = altivec_expand_builtin (exp, target, &success);
16479
16480 if (success)
16481 return ret;
16482 }
16483 if (TARGET_HTM)
16484 {
16485 ret = htm_expand_builtin (exp, target, &success);
16486
16487 if (success)
16488 return ret;
16489 }
16490
16491 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16492 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16493 gcc_assert (attr == RS6000_BTC_UNARY
16494 || attr == RS6000_BTC_BINARY
16495 || attr == RS6000_BTC_TERNARY
16496 || attr == RS6000_BTC_SPECIAL);
16497
16498 /* Handle simple unary operations. */
16499 d = bdesc_1arg;
16500 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16501 if (d->code == fcode)
16502 return rs6000_expand_unop_builtin (icode, exp, target);
16503
16504 /* Handle simple binary operations. */
16505 d = bdesc_2arg;
16506 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16507 if (d->code == fcode)
16508 return rs6000_expand_binop_builtin (icode, exp, target);
16509
16510 /* Handle simple ternary operations. */
16511 d = bdesc_3arg;
16512 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16513 if (d->code == fcode)
16514 return rs6000_expand_ternop_builtin (icode, exp, target);
16515
16516 /* Handle simple no-argument operations. */
16517 d = bdesc_0arg;
16518 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16519 if (d->code == fcode)
16520 return rs6000_expand_zeroop_builtin (icode, target);
16521
16522 gcc_unreachable ();
16523 }
16524
16525 /* Create a builtin vector type with a name. Taking care not to give
16526 the canonical type a name. */
16527
16528 static tree
16529 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16530 {
16531 tree result = build_vector_type (elt_type, num_elts);
16532
16533 /* Copy so we don't give the canonical type a name. */
16534 result = build_variant_type_copy (result);
16535
16536 add_builtin_type (name, result);
16537
16538 return result;
16539 }
16540
16541 static void
16542 rs6000_init_builtins (void)
16543 {
16544 tree tdecl;
16545 tree ftype;
16546 machine_mode mode;
16547
16548 if (TARGET_DEBUG_BUILTIN)
16549 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16550 (TARGET_ALTIVEC) ? ", altivec" : "",
16551 (TARGET_VSX) ? ", vsx" : "");
16552
16553 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16554 : "__vector long long",
16555 intDI_type_node, 2);
16556 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16557 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16558 intSI_type_node, 4);
16559 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16560 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16561 intHI_type_node, 8);
16562 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16563 intQI_type_node, 16);
16564
16565 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16566 unsigned_intQI_type_node, 16);
16567 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16568 unsigned_intHI_type_node, 8);
16569 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16570 unsigned_intSI_type_node, 4);
16571 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16572 ? "__vector unsigned long"
16573 : "__vector unsigned long long",
16574 unsigned_intDI_type_node, 2);
16575
16576 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16577
16578 const_str_type_node
16579 = build_pointer_type (build_qualified_type (char_type_node,
16580 TYPE_QUAL_CONST));
16581
16582 /* We use V1TI mode as a special container to hold __int128_t items that
16583 must live in VSX registers. */
16584 if (intTI_type_node)
16585 {
16586 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16587 intTI_type_node, 1);
16588 unsigned_V1TI_type_node
16589 = rs6000_vector_type ("__vector unsigned __int128",
16590 unsigned_intTI_type_node, 1);
16591 }
16592
16593 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16594 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16595 'vector unsigned short'. */
16596
16597 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16598 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16599 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16600 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16601 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16602
16603 long_integer_type_internal_node = long_integer_type_node;
16604 long_unsigned_type_internal_node = long_unsigned_type_node;
16605 long_long_integer_type_internal_node = long_long_integer_type_node;
16606 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16607 intQI_type_internal_node = intQI_type_node;
16608 uintQI_type_internal_node = unsigned_intQI_type_node;
16609 intHI_type_internal_node = intHI_type_node;
16610 uintHI_type_internal_node = unsigned_intHI_type_node;
16611 intSI_type_internal_node = intSI_type_node;
16612 uintSI_type_internal_node = unsigned_intSI_type_node;
16613 intDI_type_internal_node = intDI_type_node;
16614 uintDI_type_internal_node = unsigned_intDI_type_node;
16615 intTI_type_internal_node = intTI_type_node;
16616 uintTI_type_internal_node = unsigned_intTI_type_node;
16617 float_type_internal_node = float_type_node;
16618 double_type_internal_node = double_type_node;
16619 long_double_type_internal_node = long_double_type_node;
16620 dfloat64_type_internal_node = dfloat64_type_node;
16621 dfloat128_type_internal_node = dfloat128_type_node;
16622 void_type_internal_node = void_type_node;
16623
16624 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16625 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16626 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16627 format that uses a pair of doubles, depending on the switches and
16628 defaults.
16629
16630 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16631 floating point, we need make sure the type is non-zero or else self-test
16632 fails during bootstrap.
16633
16634 Always create __ibm128 as a separate type, even if the current long double
16635 format is IBM extended double.
16636
16637 For IEEE 128-bit floating point, always create the type __ieee128. If the
16638 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16639 __ieee128. */
16640 if (TARGET_FLOAT128_TYPE)
16641 {
16642 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16643 ibm128_float_type_node = long_double_type_node;
16644 else
16645 {
16646 ibm128_float_type_node = make_node (REAL_TYPE);
16647 TYPE_PRECISION (ibm128_float_type_node) = 128;
16648 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16649 layout_type (ibm128_float_type_node);
16650 }
16651
16652 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16653 "__ibm128");
16654
16655 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16656 ieee128_float_type_node = long_double_type_node;
16657 else
16658 ieee128_float_type_node = float128_type_node;
16659
16660 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16661 "__ieee128");
16662 }
16663
16664 else
16665 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16666
16667 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16668 tree type node. */
16669 builtin_mode_to_type[QImode][0] = integer_type_node;
16670 builtin_mode_to_type[HImode][0] = integer_type_node;
16671 builtin_mode_to_type[SImode][0] = intSI_type_node;
16672 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16673 builtin_mode_to_type[DImode][0] = intDI_type_node;
16674 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16675 builtin_mode_to_type[TImode][0] = intTI_type_node;
16676 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16677 builtin_mode_to_type[SFmode][0] = float_type_node;
16678 builtin_mode_to_type[DFmode][0] = double_type_node;
16679 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16680 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16681 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16682 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16683 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16684 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16685 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16686 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16687 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16688 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16689 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16690 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16691 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16692 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16693 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16694 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16695 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16696
16697 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16698 TYPE_NAME (bool_char_type_node) = tdecl;
16699
16700 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16701 TYPE_NAME (bool_short_type_node) = tdecl;
16702
16703 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16704 TYPE_NAME (bool_int_type_node) = tdecl;
16705
16706 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16707 TYPE_NAME (pixel_type_node) = tdecl;
16708
16709 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16710 bool_char_type_node, 16);
16711 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16712 bool_short_type_node, 8);
16713 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16714 bool_int_type_node, 4);
16715 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16716 ? "__vector __bool long"
16717 : "__vector __bool long long",
16718 bool_long_long_type_node, 2);
16719 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16720 pixel_type_node, 8);
16721
16722 /* Create Altivec and VSX builtins on machines with at least the
16723 general purpose extensions (970 and newer) to allow the use of
16724 the target attribute. */
16725 if (TARGET_EXTRA_BUILTINS)
16726 altivec_init_builtins ();
16727 if (TARGET_HTM)
16728 htm_init_builtins ();
16729
16730 if (TARGET_EXTRA_BUILTINS)
16731 rs6000_common_init_builtins ();
16732
16733 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16734 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16735 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16736
16737 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16738 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16739 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16740
16741 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16742 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16743 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16744
16745 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16746 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16747 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16748
16749 mode = (TARGET_64BIT) ? DImode : SImode;
16750 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16751 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16752 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16753
16754 ftype = build_function_type_list (unsigned_intDI_type_node,
16755 NULL_TREE);
16756 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16757
16758 if (TARGET_64BIT)
16759 ftype = build_function_type_list (unsigned_intDI_type_node,
16760 NULL_TREE);
16761 else
16762 ftype = build_function_type_list (unsigned_intSI_type_node,
16763 NULL_TREE);
16764 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16765
16766 ftype = build_function_type_list (double_type_node, NULL_TREE);
16767 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16768
16769 ftype = build_function_type_list (double_type_node, NULL_TREE);
16770 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16771
16772 ftype = build_function_type_list (void_type_node,
16773 intSI_type_node,
16774 NULL_TREE);
16775 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16776
16777 ftype = build_function_type_list (void_type_node,
16778 intSI_type_node,
16779 NULL_TREE);
16780 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16781
16782 ftype = build_function_type_list (void_type_node,
16783 intDI_type_node,
16784 NULL_TREE);
16785 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16786
16787 ftype = build_function_type_list (void_type_node,
16788 intDI_type_node,
16789 NULL_TREE);
16790 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16791
16792 ftype = build_function_type_list (void_type_node,
16793 intSI_type_node, double_type_node,
16794 NULL_TREE);
16795 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16796
16797 ftype = build_function_type_list (void_type_node, NULL_TREE);
16798 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16799 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16800 MISC_BUILTIN_SPEC_BARRIER);
16801
16802 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16803 NULL_TREE);
16804 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16805 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16806
16807 /* AIX libm provides clog as __clog. */
16808 if (TARGET_XCOFF &&
16809 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16810 set_user_assembler_name (tdecl, "__clog");
16811
16812 #ifdef SUBTARGET_INIT_BUILTINS
16813 SUBTARGET_INIT_BUILTINS;
16814 #endif
16815 }
16816
16817 /* Returns the rs6000 builtin decl for CODE. */
16818
16819 static tree
16820 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16821 {
16822 HOST_WIDE_INT fnmask;
16823
16824 if (code >= RS6000_BUILTIN_COUNT)
16825 return error_mark_node;
16826
16827 fnmask = rs6000_builtin_info[code].mask;
16828 if ((fnmask & rs6000_builtin_mask) != fnmask)
16829 {
16830 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16831 return error_mark_node;
16832 }
16833
16834 return rs6000_builtin_decls[code];
16835 }
16836
16837 static void
16838 altivec_init_builtins (void)
16839 {
16840 const struct builtin_description *d;
16841 size_t i;
16842 tree ftype;
16843 tree decl;
16844 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16845
16846 tree pvoid_type_node = build_pointer_type (void_type_node);
16847
16848 tree pcvoid_type_node
16849 = build_pointer_type (build_qualified_type (void_type_node,
16850 TYPE_QUAL_CONST));
16851
16852 tree int_ftype_opaque
16853 = build_function_type_list (integer_type_node,
16854 opaque_V4SI_type_node, NULL_TREE);
16855 tree opaque_ftype_opaque
16856 = build_function_type_list (integer_type_node, NULL_TREE);
16857 tree opaque_ftype_opaque_int
16858 = build_function_type_list (opaque_V4SI_type_node,
16859 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16860 tree opaque_ftype_opaque_opaque_int
16861 = build_function_type_list (opaque_V4SI_type_node,
16862 opaque_V4SI_type_node, opaque_V4SI_type_node,
16863 integer_type_node, NULL_TREE);
16864 tree opaque_ftype_opaque_opaque_opaque
16865 = build_function_type_list (opaque_V4SI_type_node,
16866 opaque_V4SI_type_node, opaque_V4SI_type_node,
16867 opaque_V4SI_type_node, NULL_TREE);
16868 tree opaque_ftype_opaque_opaque
16869 = build_function_type_list (opaque_V4SI_type_node,
16870 opaque_V4SI_type_node, opaque_V4SI_type_node,
16871 NULL_TREE);
16872 tree int_ftype_int_opaque_opaque
16873 = build_function_type_list (integer_type_node,
16874 integer_type_node, opaque_V4SI_type_node,
16875 opaque_V4SI_type_node, NULL_TREE);
16876 tree int_ftype_int_v4si_v4si
16877 = build_function_type_list (integer_type_node,
16878 integer_type_node, V4SI_type_node,
16879 V4SI_type_node, NULL_TREE);
16880 tree int_ftype_int_v2di_v2di
16881 = build_function_type_list (integer_type_node,
16882 integer_type_node, V2DI_type_node,
16883 V2DI_type_node, NULL_TREE);
16884 tree void_ftype_v4si
16885 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16886 tree v8hi_ftype_void
16887 = build_function_type_list (V8HI_type_node, NULL_TREE);
16888 tree void_ftype_void
16889 = build_function_type_list (void_type_node, NULL_TREE);
16890 tree void_ftype_int
16891 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16892
16893 tree opaque_ftype_long_pcvoid
16894 = build_function_type_list (opaque_V4SI_type_node,
16895 long_integer_type_node, pcvoid_type_node,
16896 NULL_TREE);
16897 tree v16qi_ftype_long_pcvoid
16898 = build_function_type_list (V16QI_type_node,
16899 long_integer_type_node, pcvoid_type_node,
16900 NULL_TREE);
16901 tree v8hi_ftype_long_pcvoid
16902 = build_function_type_list (V8HI_type_node,
16903 long_integer_type_node, pcvoid_type_node,
16904 NULL_TREE);
16905 tree v4si_ftype_long_pcvoid
16906 = build_function_type_list (V4SI_type_node,
16907 long_integer_type_node, pcvoid_type_node,
16908 NULL_TREE);
16909 tree v4sf_ftype_long_pcvoid
16910 = build_function_type_list (V4SF_type_node,
16911 long_integer_type_node, pcvoid_type_node,
16912 NULL_TREE);
16913 tree v2df_ftype_long_pcvoid
16914 = build_function_type_list (V2DF_type_node,
16915 long_integer_type_node, pcvoid_type_node,
16916 NULL_TREE);
16917 tree v2di_ftype_long_pcvoid
16918 = build_function_type_list (V2DI_type_node,
16919 long_integer_type_node, pcvoid_type_node,
16920 NULL_TREE);
16921 tree v1ti_ftype_long_pcvoid
16922 = build_function_type_list (V1TI_type_node,
16923 long_integer_type_node, pcvoid_type_node,
16924 NULL_TREE);
16925
16926 tree void_ftype_opaque_long_pvoid
16927 = build_function_type_list (void_type_node,
16928 opaque_V4SI_type_node, long_integer_type_node,
16929 pvoid_type_node, NULL_TREE);
16930 tree void_ftype_v4si_long_pvoid
16931 = build_function_type_list (void_type_node,
16932 V4SI_type_node, long_integer_type_node,
16933 pvoid_type_node, NULL_TREE);
16934 tree void_ftype_v16qi_long_pvoid
16935 = build_function_type_list (void_type_node,
16936 V16QI_type_node, long_integer_type_node,
16937 pvoid_type_node, NULL_TREE);
16938
16939 tree void_ftype_v16qi_pvoid_long
16940 = build_function_type_list (void_type_node,
16941 V16QI_type_node, pvoid_type_node,
16942 long_integer_type_node, NULL_TREE);
16943
16944 tree void_ftype_v8hi_long_pvoid
16945 = build_function_type_list (void_type_node,
16946 V8HI_type_node, long_integer_type_node,
16947 pvoid_type_node, NULL_TREE);
16948 tree void_ftype_v4sf_long_pvoid
16949 = build_function_type_list (void_type_node,
16950 V4SF_type_node, long_integer_type_node,
16951 pvoid_type_node, NULL_TREE);
16952 tree void_ftype_v2df_long_pvoid
16953 = build_function_type_list (void_type_node,
16954 V2DF_type_node, long_integer_type_node,
16955 pvoid_type_node, NULL_TREE);
16956 tree void_ftype_v1ti_long_pvoid
16957 = build_function_type_list (void_type_node,
16958 V1TI_type_node, long_integer_type_node,
16959 pvoid_type_node, NULL_TREE);
16960 tree void_ftype_v2di_long_pvoid
16961 = build_function_type_list (void_type_node,
16962 V2DI_type_node, long_integer_type_node,
16963 pvoid_type_node, NULL_TREE);
16964 tree int_ftype_int_v8hi_v8hi
16965 = build_function_type_list (integer_type_node,
16966 integer_type_node, V8HI_type_node,
16967 V8HI_type_node, NULL_TREE);
16968 tree int_ftype_int_v16qi_v16qi
16969 = build_function_type_list (integer_type_node,
16970 integer_type_node, V16QI_type_node,
16971 V16QI_type_node, NULL_TREE);
16972 tree int_ftype_int_v4sf_v4sf
16973 = build_function_type_list (integer_type_node,
16974 integer_type_node, V4SF_type_node,
16975 V4SF_type_node, NULL_TREE);
16976 tree int_ftype_int_v2df_v2df
16977 = build_function_type_list (integer_type_node,
16978 integer_type_node, V2DF_type_node,
16979 V2DF_type_node, NULL_TREE);
16980 tree v2di_ftype_v2di
16981 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16982 tree v4si_ftype_v4si
16983 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16984 tree v8hi_ftype_v8hi
16985 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16986 tree v16qi_ftype_v16qi
16987 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16988 tree v4sf_ftype_v4sf
16989 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16990 tree v2df_ftype_v2df
16991 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16992 tree void_ftype_pcvoid_int_int
16993 = build_function_type_list (void_type_node,
16994 pcvoid_type_node, integer_type_node,
16995 integer_type_node, NULL_TREE);
16996
16997 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16998 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16999 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17000 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17001 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17002 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17003 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17004 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17005 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17006 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17007 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17008 ALTIVEC_BUILTIN_LVXL_V2DF);
17009 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17010 ALTIVEC_BUILTIN_LVXL_V2DI);
17011 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17012 ALTIVEC_BUILTIN_LVXL_V4SF);
17013 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17014 ALTIVEC_BUILTIN_LVXL_V4SI);
17015 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17016 ALTIVEC_BUILTIN_LVXL_V8HI);
17017 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17018 ALTIVEC_BUILTIN_LVXL_V16QI);
17019 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17020 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
17021 ALTIVEC_BUILTIN_LVX_V1TI);
17022 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17023 ALTIVEC_BUILTIN_LVX_V2DF);
17024 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17025 ALTIVEC_BUILTIN_LVX_V2DI);
17026 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17027 ALTIVEC_BUILTIN_LVX_V4SF);
17028 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17029 ALTIVEC_BUILTIN_LVX_V4SI);
17030 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17031 ALTIVEC_BUILTIN_LVX_V8HI);
17032 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17033 ALTIVEC_BUILTIN_LVX_V16QI);
17034 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17035 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17036 ALTIVEC_BUILTIN_STVX_V2DF);
17037 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17038 ALTIVEC_BUILTIN_STVX_V2DI);
17039 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17040 ALTIVEC_BUILTIN_STVX_V4SF);
17041 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17042 ALTIVEC_BUILTIN_STVX_V4SI);
17043 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17044 ALTIVEC_BUILTIN_STVX_V8HI);
17045 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17046 ALTIVEC_BUILTIN_STVX_V16QI);
17047 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17048 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17049 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17050 ALTIVEC_BUILTIN_STVXL_V2DF);
17051 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17052 ALTIVEC_BUILTIN_STVXL_V2DI);
17053 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17054 ALTIVEC_BUILTIN_STVXL_V4SF);
17055 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17056 ALTIVEC_BUILTIN_STVXL_V4SI);
17057 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17058 ALTIVEC_BUILTIN_STVXL_V8HI);
17059 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17060 ALTIVEC_BUILTIN_STVXL_V16QI);
17061 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17062 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17063 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17064 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17065 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17066 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17067 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17068 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17069 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17070 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17071 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17072 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17073 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17074 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17075 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17076 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17077
17078 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17079 VSX_BUILTIN_LXVD2X_V2DF);
17080 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17081 VSX_BUILTIN_LXVD2X_V2DI);
17082 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17083 VSX_BUILTIN_LXVW4X_V4SF);
17084 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17085 VSX_BUILTIN_LXVW4X_V4SI);
17086 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17087 VSX_BUILTIN_LXVW4X_V8HI);
17088 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17089 VSX_BUILTIN_LXVW4X_V16QI);
17090 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17091 VSX_BUILTIN_STXVD2X_V2DF);
17092 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17093 VSX_BUILTIN_STXVD2X_V2DI);
17094 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17095 VSX_BUILTIN_STXVW4X_V4SF);
17096 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17097 VSX_BUILTIN_STXVW4X_V4SI);
17098 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17099 VSX_BUILTIN_STXVW4X_V8HI);
17100 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17101 VSX_BUILTIN_STXVW4X_V16QI);
17102
17103 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17104 VSX_BUILTIN_LD_ELEMREV_V2DF);
17105 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17106 VSX_BUILTIN_LD_ELEMREV_V2DI);
17107 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17108 VSX_BUILTIN_LD_ELEMREV_V4SF);
17109 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17110 VSX_BUILTIN_LD_ELEMREV_V4SI);
17111 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17112 VSX_BUILTIN_LD_ELEMREV_V8HI);
17113 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17114 VSX_BUILTIN_LD_ELEMREV_V16QI);
17115 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17116 VSX_BUILTIN_ST_ELEMREV_V2DF);
17117 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17118 VSX_BUILTIN_ST_ELEMREV_V1TI);
17119 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17120 VSX_BUILTIN_ST_ELEMREV_V2DI);
17121 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17122 VSX_BUILTIN_ST_ELEMREV_V4SF);
17123 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17124 VSX_BUILTIN_ST_ELEMREV_V4SI);
17125 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17126 VSX_BUILTIN_ST_ELEMREV_V8HI);
17127 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17128 VSX_BUILTIN_ST_ELEMREV_V16QI);
17129
17130 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17131 VSX_BUILTIN_VEC_LD);
17132 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17133 VSX_BUILTIN_VEC_ST);
17134 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17135 VSX_BUILTIN_VEC_XL);
17136 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17137 VSX_BUILTIN_VEC_XL_BE);
17138 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17139 VSX_BUILTIN_VEC_XST);
17140 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17141 VSX_BUILTIN_VEC_XST_BE);
17142
17143 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17144 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17145 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17146
17147 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17148 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17149 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17150 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17151 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17152 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17153 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17154 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17155 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17156 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17157 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17158 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17159
17160 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17161 ALTIVEC_BUILTIN_VEC_ADDE);
17162 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17163 ALTIVEC_BUILTIN_VEC_ADDEC);
17164 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17165 ALTIVEC_BUILTIN_VEC_CMPNE);
17166 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17167 ALTIVEC_BUILTIN_VEC_MUL);
17168 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17169 ALTIVEC_BUILTIN_VEC_SUBE);
17170 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17171 ALTIVEC_BUILTIN_VEC_SUBEC);
17172
17173 /* Cell builtins. */
17174 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17175 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17176 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17177 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17178
17179 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17180 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17181 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17182 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17183
17184 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17185 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17186 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17187 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17188
17189 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17190 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17191 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17192 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17193
17194 if (TARGET_P9_VECTOR)
17195 {
17196 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17197 P9V_BUILTIN_STXVL);
17198 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17199 P9V_BUILTIN_XST_LEN_R);
17200 }
17201
17202 /* Add the DST variants. */
17203 d = bdesc_dst;
17204 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17205 {
17206 HOST_WIDE_INT mask = d->mask;
17207
17208 /* It is expected that these dst built-in functions may have
17209 d->icode equal to CODE_FOR_nothing. */
17210 if ((mask & builtin_mask) != mask)
17211 {
17212 if (TARGET_DEBUG_BUILTIN)
17213 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17214 d->name);
17215 continue;
17216 }
17217 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17218 }
17219
17220 /* Initialize the predicates. */
17221 d = bdesc_altivec_preds;
17222 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17223 {
17224 machine_mode mode1;
17225 tree type;
17226 HOST_WIDE_INT mask = d->mask;
17227
17228 if ((mask & builtin_mask) != mask)
17229 {
17230 if (TARGET_DEBUG_BUILTIN)
17231 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17232 d->name);
17233 continue;
17234 }
17235
17236 if (rs6000_overloaded_builtin_p (d->code))
17237 mode1 = VOIDmode;
17238 else
17239 {
17240 /* Cannot define builtin if the instruction is disabled. */
17241 gcc_assert (d->icode != CODE_FOR_nothing);
17242 mode1 = insn_data[d->icode].operand[1].mode;
17243 }
17244
17245 switch (mode1)
17246 {
17247 case E_VOIDmode:
17248 type = int_ftype_int_opaque_opaque;
17249 break;
17250 case E_V2DImode:
17251 type = int_ftype_int_v2di_v2di;
17252 break;
17253 case E_V4SImode:
17254 type = int_ftype_int_v4si_v4si;
17255 break;
17256 case E_V8HImode:
17257 type = int_ftype_int_v8hi_v8hi;
17258 break;
17259 case E_V16QImode:
17260 type = int_ftype_int_v16qi_v16qi;
17261 break;
17262 case E_V4SFmode:
17263 type = int_ftype_int_v4sf_v4sf;
17264 break;
17265 case E_V2DFmode:
17266 type = int_ftype_int_v2df_v2df;
17267 break;
17268 default:
17269 gcc_unreachable ();
17270 }
17271
17272 def_builtin (d->name, type, d->code);
17273 }
17274
17275 /* Initialize the abs* operators. */
17276 d = bdesc_abs;
17277 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17278 {
17279 machine_mode mode0;
17280 tree type;
17281 HOST_WIDE_INT mask = d->mask;
17282
17283 if ((mask & builtin_mask) != mask)
17284 {
17285 if (TARGET_DEBUG_BUILTIN)
17286 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17287 d->name);
17288 continue;
17289 }
17290
17291 /* Cannot define builtin if the instruction is disabled. */
17292 gcc_assert (d->icode != CODE_FOR_nothing);
17293 mode0 = insn_data[d->icode].operand[0].mode;
17294
17295 switch (mode0)
17296 {
17297 case E_V2DImode:
17298 type = v2di_ftype_v2di;
17299 break;
17300 case E_V4SImode:
17301 type = v4si_ftype_v4si;
17302 break;
17303 case E_V8HImode:
17304 type = v8hi_ftype_v8hi;
17305 break;
17306 case E_V16QImode:
17307 type = v16qi_ftype_v16qi;
17308 break;
17309 case E_V4SFmode:
17310 type = v4sf_ftype_v4sf;
17311 break;
17312 case E_V2DFmode:
17313 type = v2df_ftype_v2df;
17314 break;
17315 default:
17316 gcc_unreachable ();
17317 }
17318
17319 def_builtin (d->name, type, d->code);
17320 }
17321
17322 /* Initialize target builtin that implements
17323 targetm.vectorize.builtin_mask_for_load. */
17324
17325 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17326 v16qi_ftype_long_pcvoid,
17327 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17328 BUILT_IN_MD, NULL, NULL_TREE);
17329 TREE_READONLY (decl) = 1;
17330 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17331 altivec_builtin_mask_for_load = decl;
17332
17333 /* Access to the vec_init patterns. */
17334 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17335 integer_type_node, integer_type_node,
17336 integer_type_node, NULL_TREE);
17337 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17338
17339 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17340 short_integer_type_node,
17341 short_integer_type_node,
17342 short_integer_type_node,
17343 short_integer_type_node,
17344 short_integer_type_node,
17345 short_integer_type_node,
17346 short_integer_type_node, NULL_TREE);
17347 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17348
17349 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17350 char_type_node, char_type_node,
17351 char_type_node, char_type_node,
17352 char_type_node, char_type_node,
17353 char_type_node, char_type_node,
17354 char_type_node, char_type_node,
17355 char_type_node, char_type_node,
17356 char_type_node, char_type_node,
17357 char_type_node, NULL_TREE);
17358 def_builtin ("__builtin_vec_init_v16qi", ftype,
17359 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17360
17361 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17362 float_type_node, float_type_node,
17363 float_type_node, NULL_TREE);
17364 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17365
17366 /* VSX builtins. */
17367 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17368 double_type_node, NULL_TREE);
17369 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17370
17371 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17372 intDI_type_node, NULL_TREE);
17373 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17374
17375 /* Access to the vec_set patterns. */
17376 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17377 intSI_type_node,
17378 integer_type_node, NULL_TREE);
17379 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17380
17381 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17382 intHI_type_node,
17383 integer_type_node, NULL_TREE);
17384 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17385
17386 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17387 intQI_type_node,
17388 integer_type_node, NULL_TREE);
17389 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17390
17391 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17392 float_type_node,
17393 integer_type_node, NULL_TREE);
17394 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17395
17396 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17397 double_type_node,
17398 integer_type_node, NULL_TREE);
17399 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17400
17401 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17402 intDI_type_node,
17403 integer_type_node, NULL_TREE);
17404 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17405
17406 /* Access to the vec_extract patterns. */
17407 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17408 integer_type_node, NULL_TREE);
17409 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17410
17411 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17412 integer_type_node, NULL_TREE);
17413 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17414
17415 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17416 integer_type_node, NULL_TREE);
17417 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17418
17419 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17420 integer_type_node, NULL_TREE);
17421 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17422
17423 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17424 integer_type_node, NULL_TREE);
17425 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17426
17427 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17428 integer_type_node, NULL_TREE);
17429 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17430
17431
17432 if (V1TI_type_node)
17433 {
17434 tree v1ti_ftype_long_pcvoid
17435 = build_function_type_list (V1TI_type_node,
17436 long_integer_type_node, pcvoid_type_node,
17437 NULL_TREE);
17438 tree void_ftype_v1ti_long_pvoid
17439 = build_function_type_list (void_type_node,
17440 V1TI_type_node, long_integer_type_node,
17441 pvoid_type_node, NULL_TREE);
17442 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17443 VSX_BUILTIN_LD_ELEMREV_V1TI);
17444 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17445 VSX_BUILTIN_LXVD2X_V1TI);
17446 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17447 VSX_BUILTIN_STXVD2X_V1TI);
17448 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17449 NULL_TREE, NULL_TREE);
17450 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17451 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17452 intTI_type_node,
17453 integer_type_node, NULL_TREE);
17454 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17455 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17456 integer_type_node, NULL_TREE);
17457 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17458 }
17459
17460 }
17461
17462 static void
17463 htm_init_builtins (void)
17464 {
17465 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17466 const struct builtin_description *d;
17467 size_t i;
17468
17469 d = bdesc_htm;
17470 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17471 {
17472 tree op[MAX_HTM_OPERANDS], type;
17473 HOST_WIDE_INT mask = d->mask;
17474 unsigned attr = rs6000_builtin_info[d->code].attr;
17475 bool void_func = (attr & RS6000_BTC_VOID);
17476 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17477 int nopnds = 0;
17478 tree gpr_type_node;
17479 tree rettype;
17480 tree argtype;
17481
17482 /* It is expected that these htm built-in functions may have
17483 d->icode equal to CODE_FOR_nothing. */
17484
17485 if (TARGET_32BIT && TARGET_POWERPC64)
17486 gpr_type_node = long_long_unsigned_type_node;
17487 else
17488 gpr_type_node = long_unsigned_type_node;
17489
17490 if (attr & RS6000_BTC_SPR)
17491 {
17492 rettype = gpr_type_node;
17493 argtype = gpr_type_node;
17494 }
17495 else if (d->code == HTM_BUILTIN_TABORTDC
17496 || d->code == HTM_BUILTIN_TABORTDCI)
17497 {
17498 rettype = unsigned_type_node;
17499 argtype = gpr_type_node;
17500 }
17501 else
17502 {
17503 rettype = unsigned_type_node;
17504 argtype = unsigned_type_node;
17505 }
17506
17507 if ((mask & builtin_mask) != mask)
17508 {
17509 if (TARGET_DEBUG_BUILTIN)
17510 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17511 continue;
17512 }
17513
17514 if (d->name == 0)
17515 {
17516 if (TARGET_DEBUG_BUILTIN)
17517 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17518 (long unsigned) i);
17519 continue;
17520 }
17521
17522 op[nopnds++] = (void_func) ? void_type_node : rettype;
17523
17524 if (attr_args == RS6000_BTC_UNARY)
17525 op[nopnds++] = argtype;
17526 else if (attr_args == RS6000_BTC_BINARY)
17527 {
17528 op[nopnds++] = argtype;
17529 op[nopnds++] = argtype;
17530 }
17531 else if (attr_args == RS6000_BTC_TERNARY)
17532 {
17533 op[nopnds++] = argtype;
17534 op[nopnds++] = argtype;
17535 op[nopnds++] = argtype;
17536 }
17537
17538 switch (nopnds)
17539 {
17540 case 1:
17541 type = build_function_type_list (op[0], NULL_TREE);
17542 break;
17543 case 2:
17544 type = build_function_type_list (op[0], op[1], NULL_TREE);
17545 break;
17546 case 3:
17547 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17548 break;
17549 case 4:
17550 type = build_function_type_list (op[0], op[1], op[2], op[3],
17551 NULL_TREE);
17552 break;
17553 default:
17554 gcc_unreachable ();
17555 }
17556
17557 def_builtin (d->name, type, d->code);
17558 }
17559 }
17560
17561 /* Hash function for builtin functions with up to 3 arguments and a return
17562 type. */
17563 hashval_t
17564 builtin_hasher::hash (builtin_hash_struct *bh)
17565 {
17566 unsigned ret = 0;
17567 int i;
17568
17569 for (i = 0; i < 4; i++)
17570 {
17571 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17572 ret = (ret * 2) + bh->uns_p[i];
17573 }
17574
17575 return ret;
17576 }
17577
17578 /* Compare builtin hash entries H1 and H2 for equivalence. */
17579 bool
17580 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17581 {
17582 return ((p1->mode[0] == p2->mode[0])
17583 && (p1->mode[1] == p2->mode[1])
17584 && (p1->mode[2] == p2->mode[2])
17585 && (p1->mode[3] == p2->mode[3])
17586 && (p1->uns_p[0] == p2->uns_p[0])
17587 && (p1->uns_p[1] == p2->uns_p[1])
17588 && (p1->uns_p[2] == p2->uns_p[2])
17589 && (p1->uns_p[3] == p2->uns_p[3]));
17590 }
17591
17592 /* Map types for builtin functions with an explicit return type and up to 3
17593 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17594 of the argument. */
17595 static tree
17596 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17597 machine_mode mode_arg1, machine_mode mode_arg2,
17598 enum rs6000_builtins builtin, const char *name)
17599 {
17600 struct builtin_hash_struct h;
17601 struct builtin_hash_struct *h2;
17602 int num_args = 3;
17603 int i;
17604 tree ret_type = NULL_TREE;
17605 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17606
17607 /* Create builtin_hash_table. */
17608 if (builtin_hash_table == NULL)
17609 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17610
17611 h.type = NULL_TREE;
17612 h.mode[0] = mode_ret;
17613 h.mode[1] = mode_arg0;
17614 h.mode[2] = mode_arg1;
17615 h.mode[3] = mode_arg2;
17616 h.uns_p[0] = 0;
17617 h.uns_p[1] = 0;
17618 h.uns_p[2] = 0;
17619 h.uns_p[3] = 0;
17620
17621 /* If the builtin is a type that produces unsigned results or takes unsigned
17622 arguments, and it is returned as a decl for the vectorizer (such as
17623 widening multiplies, permute), make sure the arguments and return value
17624 are type correct. */
17625 switch (builtin)
17626 {
17627 /* unsigned 1 argument functions. */
17628 case CRYPTO_BUILTIN_VSBOX:
17629 case P8V_BUILTIN_VGBBD:
17630 case MISC_BUILTIN_CDTBCD:
17631 case MISC_BUILTIN_CBCDTD:
17632 h.uns_p[0] = 1;
17633 h.uns_p[1] = 1;
17634 break;
17635
17636 /* unsigned 2 argument functions. */
17637 case ALTIVEC_BUILTIN_VMULEUB:
17638 case ALTIVEC_BUILTIN_VMULEUH:
17639 case P8V_BUILTIN_VMULEUW:
17640 case ALTIVEC_BUILTIN_VMULOUB:
17641 case ALTIVEC_BUILTIN_VMULOUH:
17642 case P8V_BUILTIN_VMULOUW:
17643 case CRYPTO_BUILTIN_VCIPHER:
17644 case CRYPTO_BUILTIN_VCIPHERLAST:
17645 case CRYPTO_BUILTIN_VNCIPHER:
17646 case CRYPTO_BUILTIN_VNCIPHERLAST:
17647 case CRYPTO_BUILTIN_VPMSUMB:
17648 case CRYPTO_BUILTIN_VPMSUMH:
17649 case CRYPTO_BUILTIN_VPMSUMW:
17650 case CRYPTO_BUILTIN_VPMSUMD:
17651 case CRYPTO_BUILTIN_VPMSUM:
17652 case MISC_BUILTIN_ADDG6S:
17653 case MISC_BUILTIN_DIVWEU:
17654 case MISC_BUILTIN_DIVDEU:
17655 case VSX_BUILTIN_UDIV_V2DI:
17656 case ALTIVEC_BUILTIN_VMAXUB:
17657 case ALTIVEC_BUILTIN_VMINUB:
17658 case ALTIVEC_BUILTIN_VMAXUH:
17659 case ALTIVEC_BUILTIN_VMINUH:
17660 case ALTIVEC_BUILTIN_VMAXUW:
17661 case ALTIVEC_BUILTIN_VMINUW:
17662 case P8V_BUILTIN_VMAXUD:
17663 case P8V_BUILTIN_VMINUD:
17664 h.uns_p[0] = 1;
17665 h.uns_p[1] = 1;
17666 h.uns_p[2] = 1;
17667 break;
17668
17669 /* unsigned 3 argument functions. */
17670 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17671 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17672 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17673 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17674 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17675 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17676 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17677 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17678 case VSX_BUILTIN_VPERM_16QI_UNS:
17679 case VSX_BUILTIN_VPERM_8HI_UNS:
17680 case VSX_BUILTIN_VPERM_4SI_UNS:
17681 case VSX_BUILTIN_VPERM_2DI_UNS:
17682 case VSX_BUILTIN_XXSEL_16QI_UNS:
17683 case VSX_BUILTIN_XXSEL_8HI_UNS:
17684 case VSX_BUILTIN_XXSEL_4SI_UNS:
17685 case VSX_BUILTIN_XXSEL_2DI_UNS:
17686 case CRYPTO_BUILTIN_VPERMXOR:
17687 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17688 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17689 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17690 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17691 case CRYPTO_BUILTIN_VSHASIGMAW:
17692 case CRYPTO_BUILTIN_VSHASIGMAD:
17693 case CRYPTO_BUILTIN_VSHASIGMA:
17694 h.uns_p[0] = 1;
17695 h.uns_p[1] = 1;
17696 h.uns_p[2] = 1;
17697 h.uns_p[3] = 1;
17698 break;
17699
17700 /* signed permute functions with unsigned char mask. */
17701 case ALTIVEC_BUILTIN_VPERM_16QI:
17702 case ALTIVEC_BUILTIN_VPERM_8HI:
17703 case ALTIVEC_BUILTIN_VPERM_4SI:
17704 case ALTIVEC_BUILTIN_VPERM_4SF:
17705 case ALTIVEC_BUILTIN_VPERM_2DI:
17706 case ALTIVEC_BUILTIN_VPERM_2DF:
17707 case VSX_BUILTIN_VPERM_16QI:
17708 case VSX_BUILTIN_VPERM_8HI:
17709 case VSX_BUILTIN_VPERM_4SI:
17710 case VSX_BUILTIN_VPERM_4SF:
17711 case VSX_BUILTIN_VPERM_2DI:
17712 case VSX_BUILTIN_VPERM_2DF:
17713 h.uns_p[3] = 1;
17714 break;
17715
17716 /* unsigned args, signed return. */
17717 case VSX_BUILTIN_XVCVUXDSP:
17718 case VSX_BUILTIN_XVCVUXDDP_UNS:
17719 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17720 h.uns_p[1] = 1;
17721 break;
17722
17723 /* signed args, unsigned return. */
17724 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17725 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17726 case MISC_BUILTIN_UNPACK_TD:
17727 case MISC_BUILTIN_UNPACK_V1TI:
17728 h.uns_p[0] = 1;
17729 break;
17730
17731 /* unsigned arguments, bool return (compares). */
17732 case ALTIVEC_BUILTIN_VCMPEQUB:
17733 case ALTIVEC_BUILTIN_VCMPEQUH:
17734 case ALTIVEC_BUILTIN_VCMPEQUW:
17735 case P8V_BUILTIN_VCMPEQUD:
17736 case VSX_BUILTIN_CMPGE_U16QI:
17737 case VSX_BUILTIN_CMPGE_U8HI:
17738 case VSX_BUILTIN_CMPGE_U4SI:
17739 case VSX_BUILTIN_CMPGE_U2DI:
17740 case ALTIVEC_BUILTIN_VCMPGTUB:
17741 case ALTIVEC_BUILTIN_VCMPGTUH:
17742 case ALTIVEC_BUILTIN_VCMPGTUW:
17743 case P8V_BUILTIN_VCMPGTUD:
17744 h.uns_p[1] = 1;
17745 h.uns_p[2] = 1;
17746 break;
17747
17748 /* unsigned arguments for 128-bit pack instructions. */
17749 case MISC_BUILTIN_PACK_TD:
17750 case MISC_BUILTIN_PACK_V1TI:
17751 h.uns_p[1] = 1;
17752 h.uns_p[2] = 1;
17753 break;
17754
17755 /* unsigned second arguments (vector shift right). */
17756 case ALTIVEC_BUILTIN_VSRB:
17757 case ALTIVEC_BUILTIN_VSRH:
17758 case ALTIVEC_BUILTIN_VSRW:
17759 case P8V_BUILTIN_VSRD:
17760 h.uns_p[2] = 1;
17761 break;
17762
17763 default:
17764 break;
17765 }
17766
17767 /* Figure out how many args are present. */
17768 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17769 num_args--;
17770
17771 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17772 if (!ret_type && h.uns_p[0])
17773 ret_type = builtin_mode_to_type[h.mode[0]][0];
17774
17775 if (!ret_type)
17776 fatal_error (input_location,
17777 "internal error: builtin function %qs had an unexpected "
17778 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17779
17780 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17781 arg_type[i] = NULL_TREE;
17782
17783 for (i = 0; i < num_args; i++)
17784 {
17785 int m = (int) h.mode[i+1];
17786 int uns_p = h.uns_p[i+1];
17787
17788 arg_type[i] = builtin_mode_to_type[m][uns_p];
17789 if (!arg_type[i] && uns_p)
17790 arg_type[i] = builtin_mode_to_type[m][0];
17791
17792 if (!arg_type[i])
17793 fatal_error (input_location,
17794 "internal error: builtin function %qs, argument %d "
17795 "had unexpected argument type %qs", name, i,
17796 GET_MODE_NAME (m));
17797 }
17798
17799 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17800 if (*found == NULL)
17801 {
17802 h2 = ggc_alloc<builtin_hash_struct> ();
17803 *h2 = h;
17804 *found = h2;
17805
17806 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17807 arg_type[2], NULL_TREE);
17808 }
17809
17810 return (*found)->type;
17811 }
17812
17813 static void
17814 rs6000_common_init_builtins (void)
17815 {
17816 const struct builtin_description *d;
17817 size_t i;
17818
17819 tree opaque_ftype_opaque = NULL_TREE;
17820 tree opaque_ftype_opaque_opaque = NULL_TREE;
17821 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17822 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17823
17824 /* Create Altivec and VSX builtins on machines with at least the
17825 general purpose extensions (970 and newer) to allow the use of
17826 the target attribute. */
17827
17828 if (TARGET_EXTRA_BUILTINS)
17829 builtin_mask |= RS6000_BTM_COMMON;
17830
17831 /* Add the ternary operators. */
17832 d = bdesc_3arg;
17833 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17834 {
17835 tree type;
17836 HOST_WIDE_INT mask = d->mask;
17837
17838 if ((mask & builtin_mask) != mask)
17839 {
17840 if (TARGET_DEBUG_BUILTIN)
17841 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17842 continue;
17843 }
17844
17845 if (rs6000_overloaded_builtin_p (d->code))
17846 {
17847 if (! (type = opaque_ftype_opaque_opaque_opaque))
17848 type = opaque_ftype_opaque_opaque_opaque
17849 = build_function_type_list (opaque_V4SI_type_node,
17850 opaque_V4SI_type_node,
17851 opaque_V4SI_type_node,
17852 opaque_V4SI_type_node,
17853 NULL_TREE);
17854 }
17855 else
17856 {
17857 enum insn_code icode = d->icode;
17858 if (d->name == 0)
17859 {
17860 if (TARGET_DEBUG_BUILTIN)
17861 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17862 (long unsigned)i);
17863
17864 continue;
17865 }
17866
17867 if (icode == CODE_FOR_nothing)
17868 {
17869 if (TARGET_DEBUG_BUILTIN)
17870 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17871 d->name);
17872
17873 continue;
17874 }
17875
17876 type = builtin_function_type (insn_data[icode].operand[0].mode,
17877 insn_data[icode].operand[1].mode,
17878 insn_data[icode].operand[2].mode,
17879 insn_data[icode].operand[3].mode,
17880 d->code, d->name);
17881 }
17882
17883 def_builtin (d->name, type, d->code);
17884 }
17885
17886 /* Add the binary operators. */
17887 d = bdesc_2arg;
17888 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17889 {
17890 machine_mode mode0, mode1, mode2;
17891 tree type;
17892 HOST_WIDE_INT mask = d->mask;
17893
17894 if ((mask & builtin_mask) != mask)
17895 {
17896 if (TARGET_DEBUG_BUILTIN)
17897 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17898 continue;
17899 }
17900
17901 if (rs6000_overloaded_builtin_p (d->code))
17902 {
17903 if (! (type = opaque_ftype_opaque_opaque))
17904 type = opaque_ftype_opaque_opaque
17905 = build_function_type_list (opaque_V4SI_type_node,
17906 opaque_V4SI_type_node,
17907 opaque_V4SI_type_node,
17908 NULL_TREE);
17909 }
17910 else
17911 {
17912 enum insn_code icode = d->icode;
17913 if (d->name == 0)
17914 {
17915 if (TARGET_DEBUG_BUILTIN)
17916 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17917 (long unsigned)i);
17918
17919 continue;
17920 }
17921
17922 if (icode == CODE_FOR_nothing)
17923 {
17924 if (TARGET_DEBUG_BUILTIN)
17925 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17926 d->name);
17927
17928 continue;
17929 }
17930
17931 mode0 = insn_data[icode].operand[0].mode;
17932 mode1 = insn_data[icode].operand[1].mode;
17933 mode2 = insn_data[icode].operand[2].mode;
17934
17935 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17936 d->code, d->name);
17937 }
17938
17939 def_builtin (d->name, type, d->code);
17940 }
17941
17942 /* Add the simple unary operators. */
17943 d = bdesc_1arg;
17944 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17945 {
17946 machine_mode mode0, mode1;
17947 tree type;
17948 HOST_WIDE_INT mask = d->mask;
17949
17950 if ((mask & builtin_mask) != mask)
17951 {
17952 if (TARGET_DEBUG_BUILTIN)
17953 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17954 continue;
17955 }
17956
17957 if (rs6000_overloaded_builtin_p (d->code))
17958 {
17959 if (! (type = opaque_ftype_opaque))
17960 type = opaque_ftype_opaque
17961 = build_function_type_list (opaque_V4SI_type_node,
17962 opaque_V4SI_type_node,
17963 NULL_TREE);
17964 }
17965 else
17966 {
17967 enum insn_code icode = d->icode;
17968 if (d->name == 0)
17969 {
17970 if (TARGET_DEBUG_BUILTIN)
17971 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17972 (long unsigned)i);
17973
17974 continue;
17975 }
17976
17977 if (icode == CODE_FOR_nothing)
17978 {
17979 if (TARGET_DEBUG_BUILTIN)
17980 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17981 d->name);
17982
17983 continue;
17984 }
17985
17986 mode0 = insn_data[icode].operand[0].mode;
17987 mode1 = insn_data[icode].operand[1].mode;
17988
17989 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17990 d->code, d->name);
17991 }
17992
17993 def_builtin (d->name, type, d->code);
17994 }
17995
17996 /* Add the simple no-argument operators. */
17997 d = bdesc_0arg;
17998 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17999 {
18000 machine_mode mode0;
18001 tree type;
18002 HOST_WIDE_INT mask = d->mask;
18003
18004 if ((mask & builtin_mask) != mask)
18005 {
18006 if (TARGET_DEBUG_BUILTIN)
18007 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18008 continue;
18009 }
18010 if (rs6000_overloaded_builtin_p (d->code))
18011 {
18012 if (!opaque_ftype_opaque)
18013 opaque_ftype_opaque
18014 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18015 type = opaque_ftype_opaque;
18016 }
18017 else
18018 {
18019 enum insn_code icode = d->icode;
18020 if (d->name == 0)
18021 {
18022 if (TARGET_DEBUG_BUILTIN)
18023 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18024 (long unsigned) i);
18025 continue;
18026 }
18027 if (icode == CODE_FOR_nothing)
18028 {
18029 if (TARGET_DEBUG_BUILTIN)
18030 fprintf (stderr,
18031 "rs6000_builtin, skip no-argument %s (no code)\n",
18032 d->name);
18033 continue;
18034 }
18035 mode0 = insn_data[icode].operand[0].mode;
18036 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18037 d->code, d->name);
18038 }
18039 def_builtin (d->name, type, d->code);
18040 }
18041 }
18042
18043 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18044 static void
18045 init_float128_ibm (machine_mode mode)
18046 {
18047 if (!TARGET_XL_COMPAT)
18048 {
18049 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18050 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18051 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18052 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18053
18054 if (!TARGET_HARD_FLOAT)
18055 {
18056 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18057 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18058 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18059 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18060 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18061 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18062 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18063 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18064
18065 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18066 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18067 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18068 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18069 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18070 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18071 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18072 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18073 }
18074 }
18075 else
18076 {
18077 set_optab_libfunc (add_optab, mode, "_xlqadd");
18078 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18079 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18080 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18081 }
18082
18083 /* Add various conversions for IFmode to use the traditional TFmode
18084 names. */
18085 if (mode == IFmode)
18086 {
18087 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
18088 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
18089 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
18090 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
18091 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
18092 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
18093
18094 if (TARGET_POWERPC64)
18095 {
18096 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18097 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18098 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18099 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18100 }
18101 }
18102 }
18103
18104 /* Create a decl for either complex long double multiply or complex long double
18105 divide when long double is IEEE 128-bit floating point. We can't use
18106 __multc3 and __divtc3 because the original long double using IBM extended
18107 double used those names. The complex multiply/divide functions are encoded
18108 as builtin functions with a complex result and 4 scalar inputs. */
18109
18110 static void
18111 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18112 {
18113 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18114 name, NULL_TREE);
18115
18116 set_builtin_decl (fncode, fndecl, true);
18117
18118 if (TARGET_DEBUG_BUILTIN)
18119 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18120
18121 return;
18122 }
18123
18124 /* Set up IEEE 128-bit floating point routines. Use different names if the
18125 arguments can be passed in a vector register. The historical PowerPC
18126 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18127 continue to use that if we aren't using vector registers to pass IEEE
18128 128-bit floating point. */
18129
18130 static void
18131 init_float128_ieee (machine_mode mode)
18132 {
18133 if (FLOAT128_VECTOR_P (mode))
18134 {
18135 static bool complex_muldiv_init_p = false;
18136
18137 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18138 we have clone or target attributes, this will be called a second
18139 time. We want to create the built-in function only once. */
18140 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18141 {
18142 complex_muldiv_init_p = true;
18143 built_in_function fncode_mul =
18144 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18145 - MIN_MODE_COMPLEX_FLOAT);
18146 built_in_function fncode_div =
18147 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18148 - MIN_MODE_COMPLEX_FLOAT);
18149
18150 tree fntype = build_function_type_list (complex_long_double_type_node,
18151 long_double_type_node,
18152 long_double_type_node,
18153 long_double_type_node,
18154 long_double_type_node,
18155 NULL_TREE);
18156
18157 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18158 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18159 }
18160
18161 set_optab_libfunc (add_optab, mode, "__addkf3");
18162 set_optab_libfunc (sub_optab, mode, "__subkf3");
18163 set_optab_libfunc (neg_optab, mode, "__negkf2");
18164 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18165 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18166 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18167 set_optab_libfunc (abs_optab, mode, "__abskf2");
18168 set_optab_libfunc (powi_optab, mode, "__powikf2");
18169
18170 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18171 set_optab_libfunc (ne_optab, mode, "__nekf2");
18172 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18173 set_optab_libfunc (ge_optab, mode, "__gekf2");
18174 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18175 set_optab_libfunc (le_optab, mode, "__lekf2");
18176 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18177
18178 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18179 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18180 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18181 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18182
18183 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18184 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18185 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18186
18187 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18188 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18189 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18190
18191 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18192 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18193 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18194 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18195 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18196 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18197
18198 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18199 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18200 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18201 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18202
18203 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18204 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18205 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18206 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18207
18208 if (TARGET_POWERPC64)
18209 {
18210 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18211 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18212 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18213 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18214 }
18215 }
18216
18217 else
18218 {
18219 set_optab_libfunc (add_optab, mode, "_q_add");
18220 set_optab_libfunc (sub_optab, mode, "_q_sub");
18221 set_optab_libfunc (neg_optab, mode, "_q_neg");
18222 set_optab_libfunc (smul_optab, mode, "_q_mul");
18223 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18224 if (TARGET_PPC_GPOPT)
18225 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18226
18227 set_optab_libfunc (eq_optab, mode, "_q_feq");
18228 set_optab_libfunc (ne_optab, mode, "_q_fne");
18229 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18230 set_optab_libfunc (ge_optab, mode, "_q_fge");
18231 set_optab_libfunc (lt_optab, mode, "_q_flt");
18232 set_optab_libfunc (le_optab, mode, "_q_fle");
18233
18234 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18235 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18236 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18237 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18238 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18239 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18240 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18241 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18242 }
18243 }
18244
18245 static void
18246 rs6000_init_libfuncs (void)
18247 {
18248 /* __float128 support. */
18249 if (TARGET_FLOAT128_TYPE)
18250 {
18251 init_float128_ibm (IFmode);
18252 init_float128_ieee (KFmode);
18253 }
18254
18255 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18256 if (TARGET_LONG_DOUBLE_128)
18257 {
18258 if (!TARGET_IEEEQUAD)
18259 init_float128_ibm (TFmode);
18260
18261 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18262 else
18263 init_float128_ieee (TFmode);
18264 }
18265 }
18266
18267 /* Emit a potentially record-form instruction, setting DST from SRC.
18268 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18269 signed comparison of DST with zero. If DOT is 1, the generated RTL
18270 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18271 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18272 a separate COMPARE. */
18273
18274 void
18275 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18276 {
18277 if (dot == 0)
18278 {
18279 emit_move_insn (dst, src);
18280 return;
18281 }
18282
18283 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18284 {
18285 emit_move_insn (dst, src);
18286 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18287 return;
18288 }
18289
18290 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18291 if (dot == 1)
18292 {
18293 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18294 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18295 }
18296 else
18297 {
18298 rtx set = gen_rtx_SET (dst, src);
18299 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18300 }
18301 }
18302
18303 \f
18304 /* A validation routine: say whether CODE, a condition code, and MODE
18305 match. The other alternatives either don't make sense or should
18306 never be generated. */
18307
18308 void
18309 validate_condition_mode (enum rtx_code code, machine_mode mode)
18310 {
18311 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18312 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18313 && GET_MODE_CLASS (mode) == MODE_CC);
18314
18315 /* These don't make sense. */
18316 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18317 || mode != CCUNSmode);
18318
18319 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18320 || mode == CCUNSmode);
18321
18322 gcc_assert (mode == CCFPmode
18323 || (code != ORDERED && code != UNORDERED
18324 && code != UNEQ && code != LTGT
18325 && code != UNGT && code != UNLT
18326 && code != UNGE && code != UNLE));
18327
18328 /* These should never be generated except for
18329 flag_finite_math_only. */
18330 gcc_assert (mode != CCFPmode
18331 || flag_finite_math_only
18332 || (code != LE && code != GE
18333 && code != UNEQ && code != LTGT
18334 && code != UNGT && code != UNLT));
18335
18336 /* These are invalid; the information is not there. */
18337 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18338 }
18339
18340 \f
18341 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18342 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18343 not zero, store there the bit offset (counted from the right) where
18344 the single stretch of 1 bits begins; and similarly for B, the bit
18345 offset where it ends. */
18346
18347 bool
18348 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18349 {
18350 unsigned HOST_WIDE_INT val = INTVAL (mask);
18351 unsigned HOST_WIDE_INT bit;
18352 int nb, ne;
18353 int n = GET_MODE_PRECISION (mode);
18354
18355 if (mode != DImode && mode != SImode)
18356 return false;
18357
18358 if (INTVAL (mask) >= 0)
18359 {
18360 bit = val & -val;
18361 ne = exact_log2 (bit);
18362 nb = exact_log2 (val + bit);
18363 }
18364 else if (val + 1 == 0)
18365 {
18366 nb = n;
18367 ne = 0;
18368 }
18369 else if (val & 1)
18370 {
18371 val = ~val;
18372 bit = val & -val;
18373 nb = exact_log2 (bit);
18374 ne = exact_log2 (val + bit);
18375 }
18376 else
18377 {
18378 bit = val & -val;
18379 ne = exact_log2 (bit);
18380 if (val + bit == 0)
18381 nb = n;
18382 else
18383 nb = 0;
18384 }
18385
18386 nb--;
18387
18388 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18389 return false;
18390
18391 if (b)
18392 *b = nb;
18393 if (e)
18394 *e = ne;
18395
18396 return true;
18397 }
18398
18399 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18400 or rldicr instruction, to implement an AND with it in mode MODE. */
18401
18402 bool
18403 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18404 {
18405 int nb, ne;
18406
18407 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18408 return false;
18409
18410 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18411 does not wrap. */
18412 if (mode == DImode)
18413 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18414
18415 /* For SImode, rlwinm can do everything. */
18416 if (mode == SImode)
18417 return (nb < 32 && ne < 32);
18418
18419 return false;
18420 }
18421
18422 /* Return the instruction template for an AND with mask in mode MODE, with
18423 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18424
18425 const char *
18426 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18427 {
18428 int nb, ne;
18429
18430 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18431 gcc_unreachable ();
18432
18433 if (mode == DImode && ne == 0)
18434 {
18435 operands[3] = GEN_INT (63 - nb);
18436 if (dot)
18437 return "rldicl. %0,%1,0,%3";
18438 return "rldicl %0,%1,0,%3";
18439 }
18440
18441 if (mode == DImode && nb == 63)
18442 {
18443 operands[3] = GEN_INT (63 - ne);
18444 if (dot)
18445 return "rldicr. %0,%1,0,%3";
18446 return "rldicr %0,%1,0,%3";
18447 }
18448
18449 if (nb < 32 && ne < 32)
18450 {
18451 operands[3] = GEN_INT (31 - nb);
18452 operands[4] = GEN_INT (31 - ne);
18453 if (dot)
18454 return "rlwinm. %0,%1,0,%3,%4";
18455 return "rlwinm %0,%1,0,%3,%4";
18456 }
18457
18458 gcc_unreachable ();
18459 }
18460
18461 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18462 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18463 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18464
18465 bool
18466 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18467 {
18468 int nb, ne;
18469
18470 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18471 return false;
18472
18473 int n = GET_MODE_PRECISION (mode);
18474 int sh = -1;
18475
18476 if (CONST_INT_P (XEXP (shift, 1)))
18477 {
18478 sh = INTVAL (XEXP (shift, 1));
18479 if (sh < 0 || sh >= n)
18480 return false;
18481 }
18482
18483 rtx_code code = GET_CODE (shift);
18484
18485 /* Convert any shift by 0 to a rotate, to simplify below code. */
18486 if (sh == 0)
18487 code = ROTATE;
18488
18489 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18490 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18491 code = ASHIFT;
18492 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18493 {
18494 code = LSHIFTRT;
18495 sh = n - sh;
18496 }
18497
18498 /* DImode rotates need rld*. */
18499 if (mode == DImode && code == ROTATE)
18500 return (nb == 63 || ne == 0 || ne == sh);
18501
18502 /* SImode rotates need rlw*. */
18503 if (mode == SImode && code == ROTATE)
18504 return (nb < 32 && ne < 32 && sh < 32);
18505
18506 /* Wrap-around masks are only okay for rotates. */
18507 if (ne > nb)
18508 return false;
18509
18510 /* Variable shifts are only okay for rotates. */
18511 if (sh < 0)
18512 return false;
18513
18514 /* Don't allow ASHIFT if the mask is wrong for that. */
18515 if (code == ASHIFT && ne < sh)
18516 return false;
18517
18518 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18519 if the mask is wrong for that. */
18520 if (nb < 32 && ne < 32 && sh < 32
18521 && !(code == LSHIFTRT && nb >= 32 - sh))
18522 return true;
18523
18524 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18525 if the mask is wrong for that. */
18526 if (code == LSHIFTRT)
18527 sh = 64 - sh;
18528 if (nb == 63 || ne == 0 || ne == sh)
18529 return !(code == LSHIFTRT && nb >= sh);
18530
18531 return false;
18532 }
18533
18534 /* Return the instruction template for a shift with mask in mode MODE, with
18535 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18536
18537 const char *
18538 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18539 {
18540 int nb, ne;
18541
18542 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18543 gcc_unreachable ();
18544
18545 if (mode == DImode && ne == 0)
18546 {
18547 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18548 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18549 operands[3] = GEN_INT (63 - nb);
18550 if (dot)
18551 return "rld%I2cl. %0,%1,%2,%3";
18552 return "rld%I2cl %0,%1,%2,%3";
18553 }
18554
18555 if (mode == DImode && nb == 63)
18556 {
18557 operands[3] = GEN_INT (63 - ne);
18558 if (dot)
18559 return "rld%I2cr. %0,%1,%2,%3";
18560 return "rld%I2cr %0,%1,%2,%3";
18561 }
18562
18563 if (mode == DImode
18564 && GET_CODE (operands[4]) != LSHIFTRT
18565 && CONST_INT_P (operands[2])
18566 && ne == INTVAL (operands[2]))
18567 {
18568 operands[3] = GEN_INT (63 - nb);
18569 if (dot)
18570 return "rld%I2c. %0,%1,%2,%3";
18571 return "rld%I2c %0,%1,%2,%3";
18572 }
18573
18574 if (nb < 32 && ne < 32)
18575 {
18576 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18577 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18578 operands[3] = GEN_INT (31 - nb);
18579 operands[4] = GEN_INT (31 - ne);
18580 /* This insn can also be a 64-bit rotate with mask that really makes
18581 it just a shift right (with mask); the %h below are to adjust for
18582 that situation (shift count is >= 32 in that case). */
18583 if (dot)
18584 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18585 return "rlw%I2nm %0,%1,%h2,%3,%4";
18586 }
18587
18588 gcc_unreachable ();
18589 }
18590
18591 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18592 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18593 ASHIFT, or LSHIFTRT) in mode MODE. */
18594
18595 bool
18596 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18597 {
18598 int nb, ne;
18599
18600 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18601 return false;
18602
18603 int n = GET_MODE_PRECISION (mode);
18604
18605 int sh = INTVAL (XEXP (shift, 1));
18606 if (sh < 0 || sh >= n)
18607 return false;
18608
18609 rtx_code code = GET_CODE (shift);
18610
18611 /* Convert any shift by 0 to a rotate, to simplify below code. */
18612 if (sh == 0)
18613 code = ROTATE;
18614
18615 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18616 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18617 code = ASHIFT;
18618 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18619 {
18620 code = LSHIFTRT;
18621 sh = n - sh;
18622 }
18623
18624 /* DImode rotates need rldimi. */
18625 if (mode == DImode && code == ROTATE)
18626 return (ne == sh);
18627
18628 /* SImode rotates need rlwimi. */
18629 if (mode == SImode && code == ROTATE)
18630 return (nb < 32 && ne < 32 && sh < 32);
18631
18632 /* Wrap-around masks are only okay for rotates. */
18633 if (ne > nb)
18634 return false;
18635
18636 /* Don't allow ASHIFT if the mask is wrong for that. */
18637 if (code == ASHIFT && ne < sh)
18638 return false;
18639
18640 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18641 if the mask is wrong for that. */
18642 if (nb < 32 && ne < 32 && sh < 32
18643 && !(code == LSHIFTRT && nb >= 32 - sh))
18644 return true;
18645
18646 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18647 if the mask is wrong for that. */
18648 if (code == LSHIFTRT)
18649 sh = 64 - sh;
18650 if (ne == sh)
18651 return !(code == LSHIFTRT && nb >= sh);
18652
18653 return false;
18654 }
18655
18656 /* Return the instruction template for an insert with mask in mode MODE, with
18657 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18658
18659 const char *
18660 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18661 {
18662 int nb, ne;
18663
18664 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18665 gcc_unreachable ();
18666
18667 /* Prefer rldimi because rlwimi is cracked. */
18668 if (TARGET_POWERPC64
18669 && (!dot || mode == DImode)
18670 && GET_CODE (operands[4]) != LSHIFTRT
18671 && ne == INTVAL (operands[2]))
18672 {
18673 operands[3] = GEN_INT (63 - nb);
18674 if (dot)
18675 return "rldimi. %0,%1,%2,%3";
18676 return "rldimi %0,%1,%2,%3";
18677 }
18678
18679 if (nb < 32 && ne < 32)
18680 {
18681 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18682 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18683 operands[3] = GEN_INT (31 - nb);
18684 operands[4] = GEN_INT (31 - ne);
18685 if (dot)
18686 return "rlwimi. %0,%1,%2,%3,%4";
18687 return "rlwimi %0,%1,%2,%3,%4";
18688 }
18689
18690 gcc_unreachable ();
18691 }
18692
18693 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18694 using two machine instructions. */
18695
18696 bool
18697 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18698 {
18699 /* There are two kinds of AND we can handle with two insns:
18700 1) those we can do with two rl* insn;
18701 2) ori[s];xori[s].
18702
18703 We do not handle that last case yet. */
18704
18705 /* If there is just one stretch of ones, we can do it. */
18706 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18707 return true;
18708
18709 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18710 one insn, we can do the whole thing with two. */
18711 unsigned HOST_WIDE_INT val = INTVAL (c);
18712 unsigned HOST_WIDE_INT bit1 = val & -val;
18713 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18714 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18715 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18716 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18717 }
18718
18719 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18720 If EXPAND is true, split rotate-and-mask instructions we generate to
18721 their constituent parts as well (this is used during expand); if DOT
18722 is 1, make the last insn a record-form instruction clobbering the
18723 destination GPR and setting the CC reg (from operands[3]); if 2, set
18724 that GPR as well as the CC reg. */
18725
18726 void
18727 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18728 {
18729 gcc_assert (!(expand && dot));
18730
18731 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18732
18733 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18734 shift right. This generates better code than doing the masks without
18735 shifts, or shifting first right and then left. */
18736 int nb, ne;
18737 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18738 {
18739 gcc_assert (mode == DImode);
18740
18741 int shift = 63 - nb;
18742 if (expand)
18743 {
18744 rtx tmp1 = gen_reg_rtx (DImode);
18745 rtx tmp2 = gen_reg_rtx (DImode);
18746 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18747 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18748 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18749 }
18750 else
18751 {
18752 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18753 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18754 emit_move_insn (operands[0], tmp);
18755 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18756 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18757 }
18758 return;
18759 }
18760
18761 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18762 that does the rest. */
18763 unsigned HOST_WIDE_INT bit1 = val & -val;
18764 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18765 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18766 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18767
18768 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18769 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18770
18771 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18772
18773 /* Two "no-rotate"-and-mask instructions, for SImode. */
18774 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18775 {
18776 gcc_assert (mode == SImode);
18777
18778 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18779 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18780 emit_move_insn (reg, tmp);
18781 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18782 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18783 return;
18784 }
18785
18786 gcc_assert (mode == DImode);
18787
18788 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18789 insns; we have to do the first in SImode, because it wraps. */
18790 if (mask2 <= 0xffffffff
18791 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18792 {
18793 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18794 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18795 GEN_INT (mask1));
18796 rtx reg_low = gen_lowpart (SImode, reg);
18797 emit_move_insn (reg_low, tmp);
18798 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18799 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18800 return;
18801 }
18802
18803 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18804 at the top end), rotate back and clear the other hole. */
18805 int right = exact_log2 (bit3);
18806 int left = 64 - right;
18807
18808 /* Rotate the mask too. */
18809 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18810
18811 if (expand)
18812 {
18813 rtx tmp1 = gen_reg_rtx (DImode);
18814 rtx tmp2 = gen_reg_rtx (DImode);
18815 rtx tmp3 = gen_reg_rtx (DImode);
18816 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18817 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18818 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18819 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18820 }
18821 else
18822 {
18823 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18824 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18825 emit_move_insn (operands[0], tmp);
18826 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18827 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18828 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18829 }
18830 }
18831 \f
18832 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18833 for lfq and stfq insns iff the registers are hard registers. */
18834
18835 int
18836 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18837 {
18838 /* We might have been passed a SUBREG. */
18839 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18840 return 0;
18841
18842 /* We might have been passed non floating point registers. */
18843 if (!FP_REGNO_P (REGNO (reg1))
18844 || !FP_REGNO_P (REGNO (reg2)))
18845 return 0;
18846
18847 return (REGNO (reg1) == REGNO (reg2) - 1);
18848 }
18849
18850 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18851 addr1 and addr2 must be in consecutive memory locations
18852 (addr2 == addr1 + 8). */
18853
18854 int
18855 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18856 {
18857 rtx addr1, addr2;
18858 unsigned int reg1, reg2;
18859 int offset1, offset2;
18860
18861 /* The mems cannot be volatile. */
18862 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18863 return 0;
18864
18865 addr1 = XEXP (mem1, 0);
18866 addr2 = XEXP (mem2, 0);
18867
18868 /* Extract an offset (if used) from the first addr. */
18869 if (GET_CODE (addr1) == PLUS)
18870 {
18871 /* If not a REG, return zero. */
18872 if (GET_CODE (XEXP (addr1, 0)) != REG)
18873 return 0;
18874 else
18875 {
18876 reg1 = REGNO (XEXP (addr1, 0));
18877 /* The offset must be constant! */
18878 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18879 return 0;
18880 offset1 = INTVAL (XEXP (addr1, 1));
18881 }
18882 }
18883 else if (GET_CODE (addr1) != REG)
18884 return 0;
18885 else
18886 {
18887 reg1 = REGNO (addr1);
18888 /* This was a simple (mem (reg)) expression. Offset is 0. */
18889 offset1 = 0;
18890 }
18891
18892 /* And now for the second addr. */
18893 if (GET_CODE (addr2) == PLUS)
18894 {
18895 /* If not a REG, return zero. */
18896 if (GET_CODE (XEXP (addr2, 0)) != REG)
18897 return 0;
18898 else
18899 {
18900 reg2 = REGNO (XEXP (addr2, 0));
18901 /* The offset must be constant. */
18902 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18903 return 0;
18904 offset2 = INTVAL (XEXP (addr2, 1));
18905 }
18906 }
18907 else if (GET_CODE (addr2) != REG)
18908 return 0;
18909 else
18910 {
18911 reg2 = REGNO (addr2);
18912 /* This was a simple (mem (reg)) expression. Offset is 0. */
18913 offset2 = 0;
18914 }
18915
18916 /* Both of these must have the same base register. */
18917 if (reg1 != reg2)
18918 return 0;
18919
18920 /* The offset for the second addr must be 8 more than the first addr. */
18921 if (offset2 != offset1 + 8)
18922 return 0;
18923
18924 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18925 instructions. */
18926 return 1;
18927 }
18928 \f
18929 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18930 need to use DDmode, in all other cases we can use the same mode. */
18931 static machine_mode
18932 rs6000_secondary_memory_needed_mode (machine_mode mode)
18933 {
18934 if (lra_in_progress && mode == SDmode)
18935 return DDmode;
18936 return mode;
18937 }
18938
18939 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18940 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18941 only work on the traditional altivec registers, note if an altivec register
18942 was chosen. */
18943
18944 static enum rs6000_reg_type
18945 register_to_reg_type (rtx reg, bool *is_altivec)
18946 {
18947 HOST_WIDE_INT regno;
18948 enum reg_class rclass;
18949
18950 if (GET_CODE (reg) == SUBREG)
18951 reg = SUBREG_REG (reg);
18952
18953 if (!REG_P (reg))
18954 return NO_REG_TYPE;
18955
18956 regno = REGNO (reg);
18957 if (regno >= FIRST_PSEUDO_REGISTER)
18958 {
18959 if (!lra_in_progress && !reload_completed)
18960 return PSEUDO_REG_TYPE;
18961
18962 regno = true_regnum (reg);
18963 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18964 return PSEUDO_REG_TYPE;
18965 }
18966
18967 gcc_assert (regno >= 0);
18968
18969 if (is_altivec && ALTIVEC_REGNO_P (regno))
18970 *is_altivec = true;
18971
18972 rclass = rs6000_regno_regclass[regno];
18973 return reg_class_to_reg_type[(int)rclass];
18974 }
18975
18976 /* Helper function to return the cost of adding a TOC entry address. */
18977
18978 static inline int
18979 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18980 {
18981 int ret;
18982
18983 if (TARGET_CMODEL != CMODEL_SMALL)
18984 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18985
18986 else
18987 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18988
18989 return ret;
18990 }
18991
18992 /* Helper function for rs6000_secondary_reload to determine whether the memory
18993 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18994 needs reloading. Return negative if the memory is not handled by the memory
18995 helper functions and to try a different reload method, 0 if no additional
18996 instructions are need, and positive to give the extra cost for the
18997 memory. */
18998
18999 static int
19000 rs6000_secondary_reload_memory (rtx addr,
19001 enum reg_class rclass,
19002 machine_mode mode)
19003 {
19004 int extra_cost = 0;
19005 rtx reg, and_arg, plus_arg0, plus_arg1;
19006 addr_mask_type addr_mask;
19007 const char *type = NULL;
19008 const char *fail_msg = NULL;
19009
19010 if (GPR_REG_CLASS_P (rclass))
19011 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19012
19013 else if (rclass == FLOAT_REGS)
19014 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19015
19016 else if (rclass == ALTIVEC_REGS)
19017 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19018
19019 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19020 else if (rclass == VSX_REGS)
19021 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19022 & ~RELOAD_REG_AND_M16);
19023
19024 /* If the register allocator hasn't made up its mind yet on the register
19025 class to use, settle on defaults to use. */
19026 else if (rclass == NO_REGS)
19027 {
19028 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19029 & ~RELOAD_REG_AND_M16);
19030
19031 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19032 addr_mask &= ~(RELOAD_REG_INDEXED
19033 | RELOAD_REG_PRE_INCDEC
19034 | RELOAD_REG_PRE_MODIFY);
19035 }
19036
19037 else
19038 addr_mask = 0;
19039
19040 /* If the register isn't valid in this register class, just return now. */
19041 if ((addr_mask & RELOAD_REG_VALID) == 0)
19042 {
19043 if (TARGET_DEBUG_ADDR)
19044 {
19045 fprintf (stderr,
19046 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19047 "not valid in class\n",
19048 GET_MODE_NAME (mode), reg_class_names[rclass]);
19049 debug_rtx (addr);
19050 }
19051
19052 return -1;
19053 }
19054
19055 switch (GET_CODE (addr))
19056 {
19057 /* Does the register class supports auto update forms for this mode? We
19058 don't need a scratch register, since the powerpc only supports
19059 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19060 case PRE_INC:
19061 case PRE_DEC:
19062 reg = XEXP (addr, 0);
19063 if (!base_reg_operand (addr, GET_MODE (reg)))
19064 {
19065 fail_msg = "no base register #1";
19066 extra_cost = -1;
19067 }
19068
19069 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19070 {
19071 extra_cost = 1;
19072 type = "update";
19073 }
19074 break;
19075
19076 case PRE_MODIFY:
19077 reg = XEXP (addr, 0);
19078 plus_arg1 = XEXP (addr, 1);
19079 if (!base_reg_operand (reg, GET_MODE (reg))
19080 || GET_CODE (plus_arg1) != PLUS
19081 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19082 {
19083 fail_msg = "bad PRE_MODIFY";
19084 extra_cost = -1;
19085 }
19086
19087 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19088 {
19089 extra_cost = 1;
19090 type = "update";
19091 }
19092 break;
19093
19094 /* Do we need to simulate AND -16 to clear the bottom address bits used
19095 in VMX load/stores? Only allow the AND for vector sizes. */
19096 case AND:
19097 and_arg = XEXP (addr, 0);
19098 if (GET_MODE_SIZE (mode) != 16
19099 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19100 || INTVAL (XEXP (addr, 1)) != -16)
19101 {
19102 fail_msg = "bad Altivec AND #1";
19103 extra_cost = -1;
19104 }
19105
19106 if (rclass != ALTIVEC_REGS)
19107 {
19108 if (legitimate_indirect_address_p (and_arg, false))
19109 extra_cost = 1;
19110
19111 else if (legitimate_indexed_address_p (and_arg, false))
19112 extra_cost = 2;
19113
19114 else
19115 {
19116 fail_msg = "bad Altivec AND #2";
19117 extra_cost = -1;
19118 }
19119
19120 type = "and";
19121 }
19122 break;
19123
19124 /* If this is an indirect address, make sure it is a base register. */
19125 case REG:
19126 case SUBREG:
19127 if (!legitimate_indirect_address_p (addr, false))
19128 {
19129 extra_cost = 1;
19130 type = "move";
19131 }
19132 break;
19133
19134 /* If this is an indexed address, make sure the register class can handle
19135 indexed addresses for this mode. */
19136 case PLUS:
19137 plus_arg0 = XEXP (addr, 0);
19138 plus_arg1 = XEXP (addr, 1);
19139
19140 /* (plus (plus (reg) (constant)) (constant)) is generated during
19141 push_reload processing, so handle it now. */
19142 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19143 {
19144 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19145 {
19146 extra_cost = 1;
19147 type = "offset";
19148 }
19149 }
19150
19151 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19152 push_reload processing, so handle it now. */
19153 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19154 {
19155 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19156 {
19157 extra_cost = 1;
19158 type = "indexed #2";
19159 }
19160 }
19161
19162 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19163 {
19164 fail_msg = "no base register #2";
19165 extra_cost = -1;
19166 }
19167
19168 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19169 {
19170 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19171 || !legitimate_indexed_address_p (addr, false))
19172 {
19173 extra_cost = 1;
19174 type = "indexed";
19175 }
19176 }
19177
19178 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19179 && CONST_INT_P (plus_arg1))
19180 {
19181 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19182 {
19183 extra_cost = 1;
19184 type = "vector d-form offset";
19185 }
19186 }
19187
19188 /* Make sure the register class can handle offset addresses. */
19189 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19190 {
19191 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19192 {
19193 extra_cost = 1;
19194 type = "offset #2";
19195 }
19196 }
19197
19198 else
19199 {
19200 fail_msg = "bad PLUS";
19201 extra_cost = -1;
19202 }
19203
19204 break;
19205
19206 case LO_SUM:
19207 /* Quad offsets are restricted and can't handle normal addresses. */
19208 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19209 {
19210 extra_cost = -1;
19211 type = "vector d-form lo_sum";
19212 }
19213
19214 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19215 {
19216 fail_msg = "bad LO_SUM";
19217 extra_cost = -1;
19218 }
19219
19220 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19221 {
19222 extra_cost = 1;
19223 type = "lo_sum";
19224 }
19225 break;
19226
19227 /* Static addresses need to create a TOC entry. */
19228 case CONST:
19229 case SYMBOL_REF:
19230 case LABEL_REF:
19231 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19232 {
19233 extra_cost = -1;
19234 type = "vector d-form lo_sum #2";
19235 }
19236
19237 else
19238 {
19239 type = "address";
19240 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19241 }
19242 break;
19243
19244 /* TOC references look like offsetable memory. */
19245 case UNSPEC:
19246 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19247 {
19248 fail_msg = "bad UNSPEC";
19249 extra_cost = -1;
19250 }
19251
19252 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19253 {
19254 extra_cost = -1;
19255 type = "vector d-form lo_sum #3";
19256 }
19257
19258 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19259 {
19260 extra_cost = 1;
19261 type = "toc reference";
19262 }
19263 break;
19264
19265 default:
19266 {
19267 fail_msg = "bad address";
19268 extra_cost = -1;
19269 }
19270 }
19271
19272 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19273 {
19274 if (extra_cost < 0)
19275 fprintf (stderr,
19276 "rs6000_secondary_reload_memory error: mode = %s, "
19277 "class = %s, addr_mask = '%s', %s\n",
19278 GET_MODE_NAME (mode),
19279 reg_class_names[rclass],
19280 rs6000_debug_addr_mask (addr_mask, false),
19281 (fail_msg != NULL) ? fail_msg : "<bad address>");
19282
19283 else
19284 fprintf (stderr,
19285 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19286 "addr_mask = '%s', extra cost = %d, %s\n",
19287 GET_MODE_NAME (mode),
19288 reg_class_names[rclass],
19289 rs6000_debug_addr_mask (addr_mask, false),
19290 extra_cost,
19291 (type) ? type : "<none>");
19292
19293 debug_rtx (addr);
19294 }
19295
19296 return extra_cost;
19297 }
19298
19299 /* Helper function for rs6000_secondary_reload to return true if a move to a
19300 different register classe is really a simple move. */
19301
19302 static bool
19303 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19304 enum rs6000_reg_type from_type,
19305 machine_mode mode)
19306 {
19307 int size = GET_MODE_SIZE (mode);
19308
19309 /* Add support for various direct moves available. In this function, we only
19310 look at cases where we don't need any extra registers, and one or more
19311 simple move insns are issued. Originally small integers are not allowed
19312 in FPR/VSX registers. Single precision binary floating is not a simple
19313 move because we need to convert to the single precision memory layout.
19314 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19315 need special direct move handling, which we do not support yet. */
19316 if (TARGET_DIRECT_MOVE
19317 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19318 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19319 {
19320 if (TARGET_POWERPC64)
19321 {
19322 /* ISA 2.07: MTVSRD or MVFVSRD. */
19323 if (size == 8)
19324 return true;
19325
19326 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19327 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19328 return true;
19329 }
19330
19331 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19332 if (TARGET_P8_VECTOR)
19333 {
19334 if (mode == SImode)
19335 return true;
19336
19337 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19338 return true;
19339 }
19340
19341 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19342 if (mode == SDmode)
19343 return true;
19344 }
19345
19346 /* Power6+: MFTGPR or MFFGPR. */
19347 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19348 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19349 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19350 return true;
19351
19352 /* Move to/from SPR. */
19353 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19354 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19355 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19356 return true;
19357
19358 return false;
19359 }
19360
19361 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19362 special direct moves that involve allocating an extra register, return the
19363 insn code of the helper function if there is such a function or
19364 CODE_FOR_nothing if not. */
19365
19366 static bool
19367 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19368 enum rs6000_reg_type from_type,
19369 machine_mode mode,
19370 secondary_reload_info *sri,
19371 bool altivec_p)
19372 {
19373 bool ret = false;
19374 enum insn_code icode = CODE_FOR_nothing;
19375 int cost = 0;
19376 int size = GET_MODE_SIZE (mode);
19377
19378 if (TARGET_POWERPC64 && size == 16)
19379 {
19380 /* Handle moving 128-bit values from GPRs to VSX point registers on
19381 ISA 2.07 (power8, power9) when running in 64-bit mode using
19382 XXPERMDI to glue the two 64-bit values back together. */
19383 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19384 {
19385 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19386 icode = reg_addr[mode].reload_vsx_gpr;
19387 }
19388
19389 /* Handle moving 128-bit values from VSX point registers to GPRs on
19390 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19391 bottom 64-bit value. */
19392 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19393 {
19394 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19395 icode = reg_addr[mode].reload_gpr_vsx;
19396 }
19397 }
19398
19399 else if (TARGET_POWERPC64 && mode == SFmode)
19400 {
19401 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19402 {
19403 cost = 3; /* xscvdpspn, mfvsrd, and. */
19404 icode = reg_addr[mode].reload_gpr_vsx;
19405 }
19406
19407 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19408 {
19409 cost = 2; /* mtvsrz, xscvspdpn. */
19410 icode = reg_addr[mode].reload_vsx_gpr;
19411 }
19412 }
19413
19414 else if (!TARGET_POWERPC64 && size == 8)
19415 {
19416 /* Handle moving 64-bit values from GPRs to floating point registers on
19417 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19418 32-bit values back together. Altivec register classes must be handled
19419 specially since a different instruction is used, and the secondary
19420 reload support requires a single instruction class in the scratch
19421 register constraint. However, right now TFmode is not allowed in
19422 Altivec registers, so the pattern will never match. */
19423 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19424 {
19425 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19426 icode = reg_addr[mode].reload_fpr_gpr;
19427 }
19428 }
19429
19430 if (icode != CODE_FOR_nothing)
19431 {
19432 ret = true;
19433 if (sri)
19434 {
19435 sri->icode = icode;
19436 sri->extra_cost = cost;
19437 }
19438 }
19439
19440 return ret;
19441 }
19442
19443 /* Return whether a move between two register classes can be done either
19444 directly (simple move) or via a pattern that uses a single extra temporary
19445 (using ISA 2.07's direct move in this case. */
19446
19447 static bool
19448 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19449 enum rs6000_reg_type from_type,
19450 machine_mode mode,
19451 secondary_reload_info *sri,
19452 bool altivec_p)
19453 {
19454 /* Fall back to load/store reloads if either type is not a register. */
19455 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19456 return false;
19457
19458 /* If we haven't allocated registers yet, assume the move can be done for the
19459 standard register types. */
19460 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19461 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19462 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19463 return true;
19464
19465 /* Moves to the same set of registers is a simple move for non-specialized
19466 registers. */
19467 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19468 return true;
19469
19470 /* Check whether a simple move can be done directly. */
19471 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19472 {
19473 if (sri)
19474 {
19475 sri->icode = CODE_FOR_nothing;
19476 sri->extra_cost = 0;
19477 }
19478 return true;
19479 }
19480
19481 /* Now check if we can do it in a few steps. */
19482 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19483 altivec_p);
19484 }
19485
19486 /* Inform reload about cases where moving X with a mode MODE to a register in
19487 RCLASS requires an extra scratch or immediate register. Return the class
19488 needed for the immediate register.
19489
19490 For VSX and Altivec, we may need a register to convert sp+offset into
19491 reg+sp.
19492
19493 For misaligned 64-bit gpr loads and stores we need a register to
19494 convert an offset address to indirect. */
19495
19496 static reg_class_t
19497 rs6000_secondary_reload (bool in_p,
19498 rtx x,
19499 reg_class_t rclass_i,
19500 machine_mode mode,
19501 secondary_reload_info *sri)
19502 {
19503 enum reg_class rclass = (enum reg_class) rclass_i;
19504 reg_class_t ret = ALL_REGS;
19505 enum insn_code icode;
19506 bool default_p = false;
19507 bool done_p = false;
19508
19509 /* Allow subreg of memory before/during reload. */
19510 bool memory_p = (MEM_P (x)
19511 || (!reload_completed && GET_CODE (x) == SUBREG
19512 && MEM_P (SUBREG_REG (x))));
19513
19514 sri->icode = CODE_FOR_nothing;
19515 sri->t_icode = CODE_FOR_nothing;
19516 sri->extra_cost = 0;
19517 icode = ((in_p)
19518 ? reg_addr[mode].reload_load
19519 : reg_addr[mode].reload_store);
19520
19521 if (REG_P (x) || register_operand (x, mode))
19522 {
19523 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19524 bool altivec_p = (rclass == ALTIVEC_REGS);
19525 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19526
19527 if (!in_p)
19528 std::swap (to_type, from_type);
19529
19530 /* Can we do a direct move of some sort? */
19531 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19532 altivec_p))
19533 {
19534 icode = (enum insn_code)sri->icode;
19535 default_p = false;
19536 done_p = true;
19537 ret = NO_REGS;
19538 }
19539 }
19540
19541 /* Make sure 0.0 is not reloaded or forced into memory. */
19542 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19543 {
19544 ret = NO_REGS;
19545 default_p = false;
19546 done_p = true;
19547 }
19548
19549 /* If this is a scalar floating point value and we want to load it into the
19550 traditional Altivec registers, do it via a move via a traditional floating
19551 point register, unless we have D-form addressing. Also make sure that
19552 non-zero constants use a FPR. */
19553 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19554 && !mode_supports_vmx_dform (mode)
19555 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19556 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19557 {
19558 ret = FLOAT_REGS;
19559 default_p = false;
19560 done_p = true;
19561 }
19562
19563 /* Handle reload of load/stores if we have reload helper functions. */
19564 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19565 {
19566 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19567 mode);
19568
19569 if (extra_cost >= 0)
19570 {
19571 done_p = true;
19572 ret = NO_REGS;
19573 if (extra_cost > 0)
19574 {
19575 sri->extra_cost = extra_cost;
19576 sri->icode = icode;
19577 }
19578 }
19579 }
19580
19581 /* Handle unaligned loads and stores of integer registers. */
19582 if (!done_p && TARGET_POWERPC64
19583 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19584 && memory_p
19585 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19586 {
19587 rtx addr = XEXP (x, 0);
19588 rtx off = address_offset (addr);
19589
19590 if (off != NULL_RTX)
19591 {
19592 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19593 unsigned HOST_WIDE_INT offset = INTVAL (off);
19594
19595 /* We need a secondary reload when our legitimate_address_p
19596 says the address is good (as otherwise the entire address
19597 will be reloaded), and the offset is not a multiple of
19598 four or we have an address wrap. Address wrap will only
19599 occur for LO_SUMs since legitimate_offset_address_p
19600 rejects addresses for 16-byte mems that will wrap. */
19601 if (GET_CODE (addr) == LO_SUM
19602 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19603 && ((offset & 3) != 0
19604 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19605 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19606 && (offset & 3) != 0))
19607 {
19608 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19609 if (in_p)
19610 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19611 : CODE_FOR_reload_di_load);
19612 else
19613 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19614 : CODE_FOR_reload_di_store);
19615 sri->extra_cost = 2;
19616 ret = NO_REGS;
19617 done_p = true;
19618 }
19619 else
19620 default_p = true;
19621 }
19622 else
19623 default_p = true;
19624 }
19625
19626 if (!done_p && !TARGET_POWERPC64
19627 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19628 && memory_p
19629 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19630 {
19631 rtx addr = XEXP (x, 0);
19632 rtx off = address_offset (addr);
19633
19634 if (off != NULL_RTX)
19635 {
19636 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19637 unsigned HOST_WIDE_INT offset = INTVAL (off);
19638
19639 /* We need a secondary reload when our legitimate_address_p
19640 says the address is good (as otherwise the entire address
19641 will be reloaded), and we have a wrap.
19642
19643 legitimate_lo_sum_address_p allows LO_SUM addresses to
19644 have any offset so test for wrap in the low 16 bits.
19645
19646 legitimate_offset_address_p checks for the range
19647 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19648 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19649 [0x7ff4,0x7fff] respectively, so test for the
19650 intersection of these ranges, [0x7ffc,0x7fff] and
19651 [0x7ff4,0x7ff7] respectively.
19652
19653 Note that the address we see here may have been
19654 manipulated by legitimize_reload_address. */
19655 if (GET_CODE (addr) == LO_SUM
19656 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19657 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19658 {
19659 if (in_p)
19660 sri->icode = CODE_FOR_reload_si_load;
19661 else
19662 sri->icode = CODE_FOR_reload_si_store;
19663 sri->extra_cost = 2;
19664 ret = NO_REGS;
19665 done_p = true;
19666 }
19667 else
19668 default_p = true;
19669 }
19670 else
19671 default_p = true;
19672 }
19673
19674 if (!done_p)
19675 default_p = true;
19676
19677 if (default_p)
19678 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19679
19680 gcc_assert (ret != ALL_REGS);
19681
19682 if (TARGET_DEBUG_ADDR)
19683 {
19684 fprintf (stderr,
19685 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19686 "mode = %s",
19687 reg_class_names[ret],
19688 in_p ? "true" : "false",
19689 reg_class_names[rclass],
19690 GET_MODE_NAME (mode));
19691
19692 if (reload_completed)
19693 fputs (", after reload", stderr);
19694
19695 if (!done_p)
19696 fputs (", done_p not set", stderr);
19697
19698 if (default_p)
19699 fputs (", default secondary reload", stderr);
19700
19701 if (sri->icode != CODE_FOR_nothing)
19702 fprintf (stderr, ", reload func = %s, extra cost = %d",
19703 insn_data[sri->icode].name, sri->extra_cost);
19704
19705 else if (sri->extra_cost > 0)
19706 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19707
19708 fputs ("\n", stderr);
19709 debug_rtx (x);
19710 }
19711
19712 return ret;
19713 }
19714
19715 /* Better tracing for rs6000_secondary_reload_inner. */
19716
19717 static void
19718 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19719 bool store_p)
19720 {
19721 rtx set, clobber;
19722
19723 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19724
19725 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19726 store_p ? "store" : "load");
19727
19728 if (store_p)
19729 set = gen_rtx_SET (mem, reg);
19730 else
19731 set = gen_rtx_SET (reg, mem);
19732
19733 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19734 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19735 }
19736
19737 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19738 ATTRIBUTE_NORETURN;
19739
19740 static void
19741 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19742 bool store_p)
19743 {
19744 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19745 gcc_unreachable ();
19746 }
19747
19748 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19749 reload helper functions. These were identified in
19750 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19751 reload, it calls the insns:
19752 reload_<RELOAD:mode>_<P:mptrsize>_store
19753 reload_<RELOAD:mode>_<P:mptrsize>_load
19754
19755 which in turn calls this function, to do whatever is necessary to create
19756 valid addresses. */
19757
19758 void
19759 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19760 {
19761 int regno = true_regnum (reg);
19762 machine_mode mode = GET_MODE (reg);
19763 addr_mask_type addr_mask;
19764 rtx addr;
19765 rtx new_addr;
19766 rtx op_reg, op0, op1;
19767 rtx and_op;
19768 rtx cc_clobber;
19769 rtvec rv;
19770
19771 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19772 || !base_reg_operand (scratch, GET_MODE (scratch)))
19773 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19774
19775 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19776 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19777
19778 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19779 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19780
19781 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19782 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19783
19784 else
19785 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19786
19787 /* Make sure the mode is valid in this register class. */
19788 if ((addr_mask & RELOAD_REG_VALID) == 0)
19789 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19790
19791 if (TARGET_DEBUG_ADDR)
19792 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19793
19794 new_addr = addr = XEXP (mem, 0);
19795 switch (GET_CODE (addr))
19796 {
19797 /* Does the register class support auto update forms for this mode? If
19798 not, do the update now. We don't need a scratch register, since the
19799 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19800 case PRE_INC:
19801 case PRE_DEC:
19802 op_reg = XEXP (addr, 0);
19803 if (!base_reg_operand (op_reg, Pmode))
19804 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19805
19806 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19807 {
19808 int delta = GET_MODE_SIZE (mode);
19809 if (GET_CODE (addr) == PRE_DEC)
19810 delta = -delta;
19811 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19812 new_addr = op_reg;
19813 }
19814 break;
19815
19816 case PRE_MODIFY:
19817 op0 = XEXP (addr, 0);
19818 op1 = XEXP (addr, 1);
19819 if (!base_reg_operand (op0, Pmode)
19820 || GET_CODE (op1) != PLUS
19821 || !rtx_equal_p (op0, XEXP (op1, 0)))
19822 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19823
19824 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19825 {
19826 emit_insn (gen_rtx_SET (op0, op1));
19827 new_addr = reg;
19828 }
19829 break;
19830
19831 /* Do we need to simulate AND -16 to clear the bottom address bits used
19832 in VMX load/stores? */
19833 case AND:
19834 op0 = XEXP (addr, 0);
19835 op1 = XEXP (addr, 1);
19836 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19837 {
19838 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19839 op_reg = op0;
19840
19841 else if (GET_CODE (op1) == PLUS)
19842 {
19843 emit_insn (gen_rtx_SET (scratch, op1));
19844 op_reg = scratch;
19845 }
19846
19847 else
19848 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19849
19850 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19851 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19852 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19853 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19854 new_addr = scratch;
19855 }
19856 break;
19857
19858 /* If this is an indirect address, make sure it is a base register. */
19859 case REG:
19860 case SUBREG:
19861 if (!base_reg_operand (addr, GET_MODE (addr)))
19862 {
19863 emit_insn (gen_rtx_SET (scratch, addr));
19864 new_addr = scratch;
19865 }
19866 break;
19867
19868 /* If this is an indexed address, make sure the register class can handle
19869 indexed addresses for this mode. */
19870 case PLUS:
19871 op0 = XEXP (addr, 0);
19872 op1 = XEXP (addr, 1);
19873 if (!base_reg_operand (op0, Pmode))
19874 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19875
19876 else if (int_reg_operand (op1, Pmode))
19877 {
19878 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19879 {
19880 emit_insn (gen_rtx_SET (scratch, addr));
19881 new_addr = scratch;
19882 }
19883 }
19884
19885 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19886 {
19887 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19888 || !quad_address_p (addr, mode, false))
19889 {
19890 emit_insn (gen_rtx_SET (scratch, addr));
19891 new_addr = scratch;
19892 }
19893 }
19894
19895 /* Make sure the register class can handle offset addresses. */
19896 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19897 {
19898 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19899 {
19900 emit_insn (gen_rtx_SET (scratch, addr));
19901 new_addr = scratch;
19902 }
19903 }
19904
19905 else
19906 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19907
19908 break;
19909
19910 case LO_SUM:
19911 op0 = XEXP (addr, 0);
19912 op1 = XEXP (addr, 1);
19913 if (!base_reg_operand (op0, Pmode))
19914 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19915
19916 else if (int_reg_operand (op1, Pmode))
19917 {
19918 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19919 {
19920 emit_insn (gen_rtx_SET (scratch, addr));
19921 new_addr = scratch;
19922 }
19923 }
19924
19925 /* Quad offsets are restricted and can't handle normal addresses. */
19926 else if (mode_supports_dq_form (mode))
19927 {
19928 emit_insn (gen_rtx_SET (scratch, addr));
19929 new_addr = scratch;
19930 }
19931
19932 /* Make sure the register class can handle offset addresses. */
19933 else if (legitimate_lo_sum_address_p (mode, addr, false))
19934 {
19935 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19936 {
19937 emit_insn (gen_rtx_SET (scratch, addr));
19938 new_addr = scratch;
19939 }
19940 }
19941
19942 else
19943 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19944
19945 break;
19946
19947 case SYMBOL_REF:
19948 case CONST:
19949 case LABEL_REF:
19950 rs6000_emit_move (scratch, addr, Pmode);
19951 new_addr = scratch;
19952 break;
19953
19954 default:
19955 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19956 }
19957
19958 /* Adjust the address if it changed. */
19959 if (addr != new_addr)
19960 {
19961 mem = replace_equiv_address_nv (mem, new_addr);
19962 if (TARGET_DEBUG_ADDR)
19963 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19964 }
19965
19966 /* Now create the move. */
19967 if (store_p)
19968 emit_insn (gen_rtx_SET (mem, reg));
19969 else
19970 emit_insn (gen_rtx_SET (reg, mem));
19971
19972 return;
19973 }
19974
19975 /* Convert reloads involving 64-bit gprs and misaligned offset
19976 addressing, or multiple 32-bit gprs and offsets that are too large,
19977 to use indirect addressing. */
19978
19979 void
19980 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19981 {
19982 int regno = true_regnum (reg);
19983 enum reg_class rclass;
19984 rtx addr;
19985 rtx scratch_or_premodify = scratch;
19986
19987 if (TARGET_DEBUG_ADDR)
19988 {
19989 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19990 store_p ? "store" : "load");
19991 fprintf (stderr, "reg:\n");
19992 debug_rtx (reg);
19993 fprintf (stderr, "mem:\n");
19994 debug_rtx (mem);
19995 fprintf (stderr, "scratch:\n");
19996 debug_rtx (scratch);
19997 }
19998
19999 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20000 gcc_assert (GET_CODE (mem) == MEM);
20001 rclass = REGNO_REG_CLASS (regno);
20002 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20003 addr = XEXP (mem, 0);
20004
20005 if (GET_CODE (addr) == PRE_MODIFY)
20006 {
20007 gcc_assert (REG_P (XEXP (addr, 0))
20008 && GET_CODE (XEXP (addr, 1)) == PLUS
20009 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20010 scratch_or_premodify = XEXP (addr, 0);
20011 addr = XEXP (addr, 1);
20012 }
20013 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20014
20015 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20016
20017 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20018
20019 /* Now create the move. */
20020 if (store_p)
20021 emit_insn (gen_rtx_SET (mem, reg));
20022 else
20023 emit_insn (gen_rtx_SET (reg, mem));
20024
20025 return;
20026 }
20027
20028 /* Given an rtx X being reloaded into a reg required to be
20029 in class CLASS, return the class of reg to actually use.
20030 In general this is just CLASS; but on some machines
20031 in some cases it is preferable to use a more restrictive class.
20032
20033 On the RS/6000, we have to return NO_REGS when we want to reload a
20034 floating-point CONST_DOUBLE to force it to be copied to memory.
20035
20036 We also don't want to reload integer values into floating-point
20037 registers if we can at all help it. In fact, this can
20038 cause reload to die, if it tries to generate a reload of CTR
20039 into a FP register and discovers it doesn't have the memory location
20040 required.
20041
20042 ??? Would it be a good idea to have reload do the converse, that is
20043 try to reload floating modes into FP registers if possible?
20044 */
20045
20046 static enum reg_class
20047 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20048 {
20049 machine_mode mode = GET_MODE (x);
20050 bool is_constant = CONSTANT_P (x);
20051
20052 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20053 reload class for it. */
20054 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20055 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20056 return NO_REGS;
20057
20058 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20059 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20060 return NO_REGS;
20061
20062 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20063 the reloading of address expressions using PLUS into floating point
20064 registers. */
20065 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20066 {
20067 if (is_constant)
20068 {
20069 /* Zero is always allowed in all VSX registers. */
20070 if (x == CONST0_RTX (mode))
20071 return rclass;
20072
20073 /* If this is a vector constant that can be formed with a few Altivec
20074 instructions, we want altivec registers. */
20075 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20076 return ALTIVEC_REGS;
20077
20078 /* If this is an integer constant that can easily be loaded into
20079 vector registers, allow it. */
20080 if (CONST_INT_P (x))
20081 {
20082 HOST_WIDE_INT value = INTVAL (x);
20083
20084 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20085 2.06 can generate it in the Altivec registers with
20086 VSPLTI<x>. */
20087 if (value == -1)
20088 {
20089 if (TARGET_P8_VECTOR)
20090 return rclass;
20091 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20092 return ALTIVEC_REGS;
20093 else
20094 return NO_REGS;
20095 }
20096
20097 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20098 a sign extend in the Altivec registers. */
20099 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20100 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20101 return ALTIVEC_REGS;
20102 }
20103
20104 /* Force constant to memory. */
20105 return NO_REGS;
20106 }
20107
20108 /* D-form addressing can easily reload the value. */
20109 if (mode_supports_vmx_dform (mode)
20110 || mode_supports_dq_form (mode))
20111 return rclass;
20112
20113 /* If this is a scalar floating point value and we don't have D-form
20114 addressing, prefer the traditional floating point registers so that we
20115 can use D-form (register+offset) addressing. */
20116 if (rclass == VSX_REGS
20117 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20118 return FLOAT_REGS;
20119
20120 /* Prefer the Altivec registers if Altivec is handling the vector
20121 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20122 loads. */
20123 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20124 || mode == V1TImode)
20125 return ALTIVEC_REGS;
20126
20127 return rclass;
20128 }
20129
20130 if (is_constant || GET_CODE (x) == PLUS)
20131 {
20132 if (reg_class_subset_p (GENERAL_REGS, rclass))
20133 return GENERAL_REGS;
20134 if (reg_class_subset_p (BASE_REGS, rclass))
20135 return BASE_REGS;
20136 return NO_REGS;
20137 }
20138
20139 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20140 return GENERAL_REGS;
20141
20142 return rclass;
20143 }
20144
20145 /* Debug version of rs6000_preferred_reload_class. */
20146 static enum reg_class
20147 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20148 {
20149 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20150
20151 fprintf (stderr,
20152 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20153 "mode = %s, x:\n",
20154 reg_class_names[ret], reg_class_names[rclass],
20155 GET_MODE_NAME (GET_MODE (x)));
20156 debug_rtx (x);
20157
20158 return ret;
20159 }
20160
20161 /* If we are copying between FP or AltiVec registers and anything else, we need
20162 a memory location. The exception is when we are targeting ppc64 and the
20163 move to/from fpr to gpr instructions are available. Also, under VSX, you
20164 can copy vector registers from the FP register set to the Altivec register
20165 set and vice versa. */
20166
20167 static bool
20168 rs6000_secondary_memory_needed (machine_mode mode,
20169 reg_class_t from_class,
20170 reg_class_t to_class)
20171 {
20172 enum rs6000_reg_type from_type, to_type;
20173 bool altivec_p = ((from_class == ALTIVEC_REGS)
20174 || (to_class == ALTIVEC_REGS));
20175
20176 /* If a simple/direct move is available, we don't need secondary memory */
20177 from_type = reg_class_to_reg_type[(int)from_class];
20178 to_type = reg_class_to_reg_type[(int)to_class];
20179
20180 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20181 (secondary_reload_info *)0, altivec_p))
20182 return false;
20183
20184 /* If we have a floating point or vector register class, we need to use
20185 memory to transfer the data. */
20186 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20187 return true;
20188
20189 return false;
20190 }
20191
20192 /* Debug version of rs6000_secondary_memory_needed. */
20193 static bool
20194 rs6000_debug_secondary_memory_needed (machine_mode mode,
20195 reg_class_t from_class,
20196 reg_class_t to_class)
20197 {
20198 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20199
20200 fprintf (stderr,
20201 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20202 "to_class = %s, mode = %s\n",
20203 ret ? "true" : "false",
20204 reg_class_names[from_class],
20205 reg_class_names[to_class],
20206 GET_MODE_NAME (mode));
20207
20208 return ret;
20209 }
20210
20211 /* Return the register class of a scratch register needed to copy IN into
20212 or out of a register in RCLASS in MODE. If it can be done directly,
20213 NO_REGS is returned. */
20214
20215 static enum reg_class
20216 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20217 rtx in)
20218 {
20219 int regno;
20220
20221 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20222 #if TARGET_MACHO
20223 && MACHOPIC_INDIRECT
20224 #endif
20225 ))
20226 {
20227 /* We cannot copy a symbolic operand directly into anything
20228 other than BASE_REGS for TARGET_ELF. So indicate that a
20229 register from BASE_REGS is needed as an intermediate
20230 register.
20231
20232 On Darwin, pic addresses require a load from memory, which
20233 needs a base register. */
20234 if (rclass != BASE_REGS
20235 && (GET_CODE (in) == SYMBOL_REF
20236 || GET_CODE (in) == HIGH
20237 || GET_CODE (in) == LABEL_REF
20238 || GET_CODE (in) == CONST))
20239 return BASE_REGS;
20240 }
20241
20242 if (GET_CODE (in) == REG)
20243 {
20244 regno = REGNO (in);
20245 if (regno >= FIRST_PSEUDO_REGISTER)
20246 {
20247 regno = true_regnum (in);
20248 if (regno >= FIRST_PSEUDO_REGISTER)
20249 regno = -1;
20250 }
20251 }
20252 else if (GET_CODE (in) == SUBREG)
20253 {
20254 regno = true_regnum (in);
20255 if (regno >= FIRST_PSEUDO_REGISTER)
20256 regno = -1;
20257 }
20258 else
20259 regno = -1;
20260
20261 /* If we have VSX register moves, prefer moving scalar values between
20262 Altivec registers and GPR by going via an FPR (and then via memory)
20263 instead of reloading the secondary memory address for Altivec moves. */
20264 if (TARGET_VSX
20265 && GET_MODE_SIZE (mode) < 16
20266 && !mode_supports_vmx_dform (mode)
20267 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20268 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20269 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20270 && (regno >= 0 && INT_REGNO_P (regno)))))
20271 return FLOAT_REGS;
20272
20273 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20274 into anything. */
20275 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20276 || (regno >= 0 && INT_REGNO_P (regno)))
20277 return NO_REGS;
20278
20279 /* Constants, memory, and VSX registers can go into VSX registers (both the
20280 traditional floating point and the altivec registers). */
20281 if (rclass == VSX_REGS
20282 && (regno == -1 || VSX_REGNO_P (regno)))
20283 return NO_REGS;
20284
20285 /* Constants, memory, and FP registers can go into FP registers. */
20286 if ((regno == -1 || FP_REGNO_P (regno))
20287 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20288 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20289
20290 /* Memory, and AltiVec registers can go into AltiVec registers. */
20291 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20292 && rclass == ALTIVEC_REGS)
20293 return NO_REGS;
20294
20295 /* We can copy among the CR registers. */
20296 if ((rclass == CR_REGS || rclass == CR0_REGS)
20297 && regno >= 0 && CR_REGNO_P (regno))
20298 return NO_REGS;
20299
20300 /* Otherwise, we need GENERAL_REGS. */
20301 return GENERAL_REGS;
20302 }
20303
20304 /* Debug version of rs6000_secondary_reload_class. */
20305 static enum reg_class
20306 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20307 machine_mode mode, rtx in)
20308 {
20309 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20310 fprintf (stderr,
20311 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20312 "mode = %s, input rtx:\n",
20313 reg_class_names[ret], reg_class_names[rclass],
20314 GET_MODE_NAME (mode));
20315 debug_rtx (in);
20316
20317 return ret;
20318 }
20319
20320 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20321
20322 static bool
20323 rs6000_can_change_mode_class (machine_mode from,
20324 machine_mode to,
20325 reg_class_t rclass)
20326 {
20327 unsigned from_size = GET_MODE_SIZE (from);
20328 unsigned to_size = GET_MODE_SIZE (to);
20329
20330 if (from_size != to_size)
20331 {
20332 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20333
20334 if (reg_classes_intersect_p (xclass, rclass))
20335 {
20336 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20337 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20338 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20339 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20340
20341 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20342 single register under VSX because the scalar part of the register
20343 is in the upper 64-bits, and not the lower 64-bits. Types like
20344 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20345 IEEE floating point can't overlap, and neither can small
20346 values. */
20347
20348 if (to_float128_vector_p && from_float128_vector_p)
20349 return true;
20350
20351 else if (to_float128_vector_p || from_float128_vector_p)
20352 return false;
20353
20354 /* TDmode in floating-mode registers must always go into a register
20355 pair with the most significant word in the even-numbered register
20356 to match ISA requirements. In little-endian mode, this does not
20357 match subreg numbering, so we cannot allow subregs. */
20358 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20359 return false;
20360
20361 if (from_size < 8 || to_size < 8)
20362 return false;
20363
20364 if (from_size == 8 && (8 * to_nregs) != to_size)
20365 return false;
20366
20367 if (to_size == 8 && (8 * from_nregs) != from_size)
20368 return false;
20369
20370 return true;
20371 }
20372 else
20373 return true;
20374 }
20375
20376 /* Since the VSX register set includes traditional floating point registers
20377 and altivec registers, just check for the size being different instead of
20378 trying to check whether the modes are vector modes. Otherwise it won't
20379 allow say DF and DI to change classes. For types like TFmode and TDmode
20380 that take 2 64-bit registers, rather than a single 128-bit register, don't
20381 allow subregs of those types to other 128 bit types. */
20382 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20383 {
20384 unsigned num_regs = (from_size + 15) / 16;
20385 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20386 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20387 return false;
20388
20389 return (from_size == 8 || from_size == 16);
20390 }
20391
20392 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20393 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20394 return false;
20395
20396 return true;
20397 }
20398
20399 /* Debug version of rs6000_can_change_mode_class. */
20400 static bool
20401 rs6000_debug_can_change_mode_class (machine_mode from,
20402 machine_mode to,
20403 reg_class_t rclass)
20404 {
20405 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20406
20407 fprintf (stderr,
20408 "rs6000_can_change_mode_class, return %s, from = %s, "
20409 "to = %s, rclass = %s\n",
20410 ret ? "true" : "false",
20411 GET_MODE_NAME (from), GET_MODE_NAME (to),
20412 reg_class_names[rclass]);
20413
20414 return ret;
20415 }
20416 \f
20417 /* Return a string to do a move operation of 128 bits of data. */
20418
20419 const char *
20420 rs6000_output_move_128bit (rtx operands[])
20421 {
20422 rtx dest = operands[0];
20423 rtx src = operands[1];
20424 machine_mode mode = GET_MODE (dest);
20425 int dest_regno;
20426 int src_regno;
20427 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20428 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20429
20430 if (REG_P (dest))
20431 {
20432 dest_regno = REGNO (dest);
20433 dest_gpr_p = INT_REGNO_P (dest_regno);
20434 dest_fp_p = FP_REGNO_P (dest_regno);
20435 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20436 dest_vsx_p = dest_fp_p | dest_vmx_p;
20437 }
20438 else
20439 {
20440 dest_regno = -1;
20441 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20442 }
20443
20444 if (REG_P (src))
20445 {
20446 src_regno = REGNO (src);
20447 src_gpr_p = INT_REGNO_P (src_regno);
20448 src_fp_p = FP_REGNO_P (src_regno);
20449 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20450 src_vsx_p = src_fp_p | src_vmx_p;
20451 }
20452 else
20453 {
20454 src_regno = -1;
20455 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20456 }
20457
20458 /* Register moves. */
20459 if (dest_regno >= 0 && src_regno >= 0)
20460 {
20461 if (dest_gpr_p)
20462 {
20463 if (src_gpr_p)
20464 return "#";
20465
20466 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20467 return (WORDS_BIG_ENDIAN
20468 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20469 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20470
20471 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20472 return "#";
20473 }
20474
20475 else if (TARGET_VSX && dest_vsx_p)
20476 {
20477 if (src_vsx_p)
20478 return "xxlor %x0,%x1,%x1";
20479
20480 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20481 return (WORDS_BIG_ENDIAN
20482 ? "mtvsrdd %x0,%1,%L1"
20483 : "mtvsrdd %x0,%L1,%1");
20484
20485 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20486 return "#";
20487 }
20488
20489 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20490 return "vor %0,%1,%1";
20491
20492 else if (dest_fp_p && src_fp_p)
20493 return "#";
20494 }
20495
20496 /* Loads. */
20497 else if (dest_regno >= 0 && MEM_P (src))
20498 {
20499 if (dest_gpr_p)
20500 {
20501 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20502 return "lq %0,%1";
20503 else
20504 return "#";
20505 }
20506
20507 else if (TARGET_ALTIVEC && dest_vmx_p
20508 && altivec_indexed_or_indirect_operand (src, mode))
20509 return "lvx %0,%y1";
20510
20511 else if (TARGET_VSX && dest_vsx_p)
20512 {
20513 if (mode_supports_dq_form (mode)
20514 && quad_address_p (XEXP (src, 0), mode, true))
20515 return "lxv %x0,%1";
20516
20517 else if (TARGET_P9_VECTOR)
20518 return "lxvx %x0,%y1";
20519
20520 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20521 return "lxvw4x %x0,%y1";
20522
20523 else
20524 return "lxvd2x %x0,%y1";
20525 }
20526
20527 else if (TARGET_ALTIVEC && dest_vmx_p)
20528 return "lvx %0,%y1";
20529
20530 else if (dest_fp_p)
20531 return "#";
20532 }
20533
20534 /* Stores. */
20535 else if (src_regno >= 0 && MEM_P (dest))
20536 {
20537 if (src_gpr_p)
20538 {
20539 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20540 return "stq %1,%0";
20541 else
20542 return "#";
20543 }
20544
20545 else if (TARGET_ALTIVEC && src_vmx_p
20546 && altivec_indexed_or_indirect_operand (dest, mode))
20547 return "stvx %1,%y0";
20548
20549 else if (TARGET_VSX && src_vsx_p)
20550 {
20551 if (mode_supports_dq_form (mode)
20552 && quad_address_p (XEXP (dest, 0), mode, true))
20553 return "stxv %x1,%0";
20554
20555 else if (TARGET_P9_VECTOR)
20556 return "stxvx %x1,%y0";
20557
20558 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20559 return "stxvw4x %x1,%y0";
20560
20561 else
20562 return "stxvd2x %x1,%y0";
20563 }
20564
20565 else if (TARGET_ALTIVEC && src_vmx_p)
20566 return "stvx %1,%y0";
20567
20568 else if (src_fp_p)
20569 return "#";
20570 }
20571
20572 /* Constants. */
20573 else if (dest_regno >= 0
20574 && (GET_CODE (src) == CONST_INT
20575 || GET_CODE (src) == CONST_WIDE_INT
20576 || GET_CODE (src) == CONST_DOUBLE
20577 || GET_CODE (src) == CONST_VECTOR))
20578 {
20579 if (dest_gpr_p)
20580 return "#";
20581
20582 else if ((dest_vmx_p && TARGET_ALTIVEC)
20583 || (dest_vsx_p && TARGET_VSX))
20584 return output_vec_const_move (operands);
20585 }
20586
20587 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20588 }
20589
20590 /* Validate a 128-bit move. */
20591 bool
20592 rs6000_move_128bit_ok_p (rtx operands[])
20593 {
20594 machine_mode mode = GET_MODE (operands[0]);
20595 return (gpc_reg_operand (operands[0], mode)
20596 || gpc_reg_operand (operands[1], mode));
20597 }
20598
20599 /* Return true if a 128-bit move needs to be split. */
20600 bool
20601 rs6000_split_128bit_ok_p (rtx operands[])
20602 {
20603 if (!reload_completed)
20604 return false;
20605
20606 if (!gpr_or_gpr_p (operands[0], operands[1]))
20607 return false;
20608
20609 if (quad_load_store_p (operands[0], operands[1]))
20610 return false;
20611
20612 return true;
20613 }
20614
20615 \f
20616 /* Given a comparison operation, return the bit number in CCR to test. We
20617 know this is a valid comparison.
20618
20619 SCC_P is 1 if this is for an scc. That means that %D will have been
20620 used instead of %C, so the bits will be in different places.
20621
20622 Return -1 if OP isn't a valid comparison for some reason. */
20623
20624 int
20625 ccr_bit (rtx op, int scc_p)
20626 {
20627 enum rtx_code code = GET_CODE (op);
20628 machine_mode cc_mode;
20629 int cc_regnum;
20630 int base_bit;
20631 rtx reg;
20632
20633 if (!COMPARISON_P (op))
20634 return -1;
20635
20636 reg = XEXP (op, 0);
20637
20638 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20639 return -1;
20640
20641 cc_mode = GET_MODE (reg);
20642 cc_regnum = REGNO (reg);
20643 base_bit = 4 * (cc_regnum - CR0_REGNO);
20644
20645 validate_condition_mode (code, cc_mode);
20646
20647 /* When generating a sCOND operation, only positive conditions are
20648 allowed. */
20649 if (scc_p)
20650 switch (code)
20651 {
20652 case EQ:
20653 case GT:
20654 case LT:
20655 case UNORDERED:
20656 case GTU:
20657 case LTU:
20658 break;
20659 default:
20660 return -1;
20661 }
20662
20663 switch (code)
20664 {
20665 case NE:
20666 return scc_p ? base_bit + 3 : base_bit + 2;
20667 case EQ:
20668 return base_bit + 2;
20669 case GT: case GTU: case UNLE:
20670 return base_bit + 1;
20671 case LT: case LTU: case UNGE:
20672 return base_bit;
20673 case ORDERED: case UNORDERED:
20674 return base_bit + 3;
20675
20676 case GE: case GEU:
20677 /* If scc, we will have done a cror to put the bit in the
20678 unordered position. So test that bit. For integer, this is ! LT
20679 unless this is an scc insn. */
20680 return scc_p ? base_bit + 3 : base_bit;
20681
20682 case LE: case LEU:
20683 return scc_p ? base_bit + 3 : base_bit + 1;
20684
20685 default:
20686 return -1;
20687 }
20688 }
20689 \f
20690 /* Return the GOT register. */
20691
20692 rtx
20693 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20694 {
20695 /* The second flow pass currently (June 1999) can't update
20696 regs_ever_live without disturbing other parts of the compiler, so
20697 update it here to make the prolog/epilogue code happy. */
20698 if (!can_create_pseudo_p ()
20699 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20700 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20701
20702 crtl->uses_pic_offset_table = 1;
20703
20704 return pic_offset_table_rtx;
20705 }
20706 \f
20707 static rs6000_stack_t stack_info;
20708
20709 /* Function to init struct machine_function.
20710 This will be called, via a pointer variable,
20711 from push_function_context. */
20712
20713 static struct machine_function *
20714 rs6000_init_machine_status (void)
20715 {
20716 stack_info.reload_completed = 0;
20717 return ggc_cleared_alloc<machine_function> ();
20718 }
20719 \f
20720 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20721
20722 /* Write out a function code label. */
20723
20724 void
20725 rs6000_output_function_entry (FILE *file, const char *fname)
20726 {
20727 if (fname[0] != '.')
20728 {
20729 switch (DEFAULT_ABI)
20730 {
20731 default:
20732 gcc_unreachable ();
20733
20734 case ABI_AIX:
20735 if (DOT_SYMBOLS)
20736 putc ('.', file);
20737 else
20738 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20739 break;
20740
20741 case ABI_ELFv2:
20742 case ABI_V4:
20743 case ABI_DARWIN:
20744 break;
20745 }
20746 }
20747
20748 RS6000_OUTPUT_BASENAME (file, fname);
20749 }
20750
20751 /* Print an operand. Recognize special options, documented below. */
20752
20753 #if TARGET_ELF
20754 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20755 only introduced by the linker, when applying the sda21
20756 relocation. */
20757 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20758 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20759 #else
20760 #define SMALL_DATA_RELOC "sda21"
20761 #define SMALL_DATA_REG 0
20762 #endif
20763
20764 void
20765 print_operand (FILE *file, rtx x, int code)
20766 {
20767 int i;
20768 unsigned HOST_WIDE_INT uval;
20769
20770 switch (code)
20771 {
20772 /* %a is output_address. */
20773
20774 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20775 output_operand. */
20776
20777 case 'D':
20778 /* Like 'J' but get to the GT bit only. */
20779 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20780 {
20781 output_operand_lossage ("invalid %%D value");
20782 return;
20783 }
20784
20785 /* Bit 1 is GT bit. */
20786 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20787
20788 /* Add one for shift count in rlinm for scc. */
20789 fprintf (file, "%d", i + 1);
20790 return;
20791
20792 case 'e':
20793 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20794 if (! INT_P (x))
20795 {
20796 output_operand_lossage ("invalid %%e value");
20797 return;
20798 }
20799
20800 uval = INTVAL (x);
20801 if ((uval & 0xffff) == 0 && uval != 0)
20802 putc ('s', file);
20803 return;
20804
20805 case 'E':
20806 /* X is a CR register. Print the number of the EQ bit of the CR */
20807 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20808 output_operand_lossage ("invalid %%E value");
20809 else
20810 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20811 return;
20812
20813 case 'f':
20814 /* X is a CR register. Print the shift count needed to move it
20815 to the high-order four bits. */
20816 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20817 output_operand_lossage ("invalid %%f value");
20818 else
20819 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20820 return;
20821
20822 case 'F':
20823 /* Similar, but print the count for the rotate in the opposite
20824 direction. */
20825 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20826 output_operand_lossage ("invalid %%F value");
20827 else
20828 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20829 return;
20830
20831 case 'G':
20832 /* X is a constant integer. If it is negative, print "m",
20833 otherwise print "z". This is to make an aze or ame insn. */
20834 if (GET_CODE (x) != CONST_INT)
20835 output_operand_lossage ("invalid %%G value");
20836 else if (INTVAL (x) >= 0)
20837 putc ('z', file);
20838 else
20839 putc ('m', file);
20840 return;
20841
20842 case 'h':
20843 /* If constant, output low-order five bits. Otherwise, write
20844 normally. */
20845 if (INT_P (x))
20846 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20847 else
20848 print_operand (file, x, 0);
20849 return;
20850
20851 case 'H':
20852 /* If constant, output low-order six bits. Otherwise, write
20853 normally. */
20854 if (INT_P (x))
20855 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20856 else
20857 print_operand (file, x, 0);
20858 return;
20859
20860 case 'I':
20861 /* Print `i' if this is a constant, else nothing. */
20862 if (INT_P (x))
20863 putc ('i', file);
20864 return;
20865
20866 case 'j':
20867 /* Write the bit number in CCR for jump. */
20868 i = ccr_bit (x, 0);
20869 if (i == -1)
20870 output_operand_lossage ("invalid %%j code");
20871 else
20872 fprintf (file, "%d", i);
20873 return;
20874
20875 case 'J':
20876 /* Similar, but add one for shift count in rlinm for scc and pass
20877 scc flag to `ccr_bit'. */
20878 i = ccr_bit (x, 1);
20879 if (i == -1)
20880 output_operand_lossage ("invalid %%J code");
20881 else
20882 /* If we want bit 31, write a shift count of zero, not 32. */
20883 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20884 return;
20885
20886 case 'k':
20887 /* X must be a constant. Write the 1's complement of the
20888 constant. */
20889 if (! INT_P (x))
20890 output_operand_lossage ("invalid %%k value");
20891 else
20892 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20893 return;
20894
20895 case 'K':
20896 /* X must be a symbolic constant on ELF. Write an
20897 expression suitable for an 'addi' that adds in the low 16
20898 bits of the MEM. */
20899 if (GET_CODE (x) == CONST)
20900 {
20901 if (GET_CODE (XEXP (x, 0)) != PLUS
20902 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20903 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20904 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20905 output_operand_lossage ("invalid %%K value");
20906 }
20907 print_operand_address (file, x);
20908 fputs ("@l", file);
20909 return;
20910
20911 /* %l is output_asm_label. */
20912
20913 case 'L':
20914 /* Write second word of DImode or DFmode reference. Works on register
20915 or non-indexed memory only. */
20916 if (REG_P (x))
20917 fputs (reg_names[REGNO (x) + 1], file);
20918 else if (MEM_P (x))
20919 {
20920 machine_mode mode = GET_MODE (x);
20921 /* Handle possible auto-increment. Since it is pre-increment and
20922 we have already done it, we can just use an offset of word. */
20923 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20924 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20925 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20926 UNITS_PER_WORD));
20927 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20928 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20929 UNITS_PER_WORD));
20930 else
20931 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20932 UNITS_PER_WORD),
20933 0));
20934
20935 if (small_data_operand (x, GET_MODE (x)))
20936 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20937 reg_names[SMALL_DATA_REG]);
20938 }
20939 return;
20940
20941 case 'N': /* Unused */
20942 /* Write the number of elements in the vector times 4. */
20943 if (GET_CODE (x) != PARALLEL)
20944 output_operand_lossage ("invalid %%N value");
20945 else
20946 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20947 return;
20948
20949 case 'O': /* Unused */
20950 /* Similar, but subtract 1 first. */
20951 if (GET_CODE (x) != PARALLEL)
20952 output_operand_lossage ("invalid %%O value");
20953 else
20954 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20955 return;
20956
20957 case 'p':
20958 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20959 if (! INT_P (x)
20960 || INTVAL (x) < 0
20961 || (i = exact_log2 (INTVAL (x))) < 0)
20962 output_operand_lossage ("invalid %%p value");
20963 else
20964 fprintf (file, "%d", i);
20965 return;
20966
20967 case 'P':
20968 /* The operand must be an indirect memory reference. The result
20969 is the register name. */
20970 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20971 || REGNO (XEXP (x, 0)) >= 32)
20972 output_operand_lossage ("invalid %%P value");
20973 else
20974 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20975 return;
20976
20977 case 'q':
20978 /* This outputs the logical code corresponding to a boolean
20979 expression. The expression may have one or both operands
20980 negated (if one, only the first one). For condition register
20981 logical operations, it will also treat the negated
20982 CR codes as NOTs, but not handle NOTs of them. */
20983 {
20984 const char *const *t = 0;
20985 const char *s;
20986 enum rtx_code code = GET_CODE (x);
20987 static const char * const tbl[3][3] = {
20988 { "and", "andc", "nor" },
20989 { "or", "orc", "nand" },
20990 { "xor", "eqv", "xor" } };
20991
20992 if (code == AND)
20993 t = tbl[0];
20994 else if (code == IOR)
20995 t = tbl[1];
20996 else if (code == XOR)
20997 t = tbl[2];
20998 else
20999 output_operand_lossage ("invalid %%q value");
21000
21001 if (GET_CODE (XEXP (x, 0)) != NOT)
21002 s = t[0];
21003 else
21004 {
21005 if (GET_CODE (XEXP (x, 1)) == NOT)
21006 s = t[2];
21007 else
21008 s = t[1];
21009 }
21010
21011 fputs (s, file);
21012 }
21013 return;
21014
21015 case 'Q':
21016 if (! TARGET_MFCRF)
21017 return;
21018 fputc (',', file);
21019 /* FALLTHRU */
21020
21021 case 'R':
21022 /* X is a CR register. Print the mask for `mtcrf'. */
21023 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21024 output_operand_lossage ("invalid %%R value");
21025 else
21026 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21027 return;
21028
21029 case 's':
21030 /* Low 5 bits of 32 - value */
21031 if (! INT_P (x))
21032 output_operand_lossage ("invalid %%s value");
21033 else
21034 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21035 return;
21036
21037 case 't':
21038 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21039 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21040 {
21041 output_operand_lossage ("invalid %%t value");
21042 return;
21043 }
21044
21045 /* Bit 3 is OV bit. */
21046 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21047
21048 /* If we want bit 31, write a shift count of zero, not 32. */
21049 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21050 return;
21051
21052 case 'T':
21053 /* Print the symbolic name of a branch target register. */
21054 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21055 x = XVECEXP (x, 0, 0);
21056 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21057 && REGNO (x) != CTR_REGNO))
21058 output_operand_lossage ("invalid %%T value");
21059 else if (REGNO (x) == LR_REGNO)
21060 fputs ("lr", file);
21061 else
21062 fputs ("ctr", file);
21063 return;
21064
21065 case 'u':
21066 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21067 for use in unsigned operand. */
21068 if (! INT_P (x))
21069 {
21070 output_operand_lossage ("invalid %%u value");
21071 return;
21072 }
21073
21074 uval = INTVAL (x);
21075 if ((uval & 0xffff) == 0)
21076 uval >>= 16;
21077
21078 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21079 return;
21080
21081 case 'v':
21082 /* High-order 16 bits of constant for use in signed operand. */
21083 if (! INT_P (x))
21084 output_operand_lossage ("invalid %%v value");
21085 else
21086 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21087 (INTVAL (x) >> 16) & 0xffff);
21088 return;
21089
21090 case 'U':
21091 /* Print `u' if this has an auto-increment or auto-decrement. */
21092 if (MEM_P (x)
21093 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21094 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21095 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21096 putc ('u', file);
21097 return;
21098
21099 case 'V':
21100 /* Print the trap code for this operand. */
21101 switch (GET_CODE (x))
21102 {
21103 case EQ:
21104 fputs ("eq", file); /* 4 */
21105 break;
21106 case NE:
21107 fputs ("ne", file); /* 24 */
21108 break;
21109 case LT:
21110 fputs ("lt", file); /* 16 */
21111 break;
21112 case LE:
21113 fputs ("le", file); /* 20 */
21114 break;
21115 case GT:
21116 fputs ("gt", file); /* 8 */
21117 break;
21118 case GE:
21119 fputs ("ge", file); /* 12 */
21120 break;
21121 case LTU:
21122 fputs ("llt", file); /* 2 */
21123 break;
21124 case LEU:
21125 fputs ("lle", file); /* 6 */
21126 break;
21127 case GTU:
21128 fputs ("lgt", file); /* 1 */
21129 break;
21130 case GEU:
21131 fputs ("lge", file); /* 5 */
21132 break;
21133 default:
21134 output_operand_lossage ("invalid %%V value");
21135 }
21136 break;
21137
21138 case 'w':
21139 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21140 normally. */
21141 if (INT_P (x))
21142 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21143 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21144 else
21145 print_operand (file, x, 0);
21146 return;
21147
21148 case 'x':
21149 /* X is a FPR or Altivec register used in a VSX context. */
21150 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21151 output_operand_lossage ("invalid %%x value");
21152 else
21153 {
21154 int reg = REGNO (x);
21155 int vsx_reg = (FP_REGNO_P (reg)
21156 ? reg - 32
21157 : reg - FIRST_ALTIVEC_REGNO + 32);
21158
21159 #ifdef TARGET_REGNAMES
21160 if (TARGET_REGNAMES)
21161 fprintf (file, "%%vs%d", vsx_reg);
21162 else
21163 #endif
21164 fprintf (file, "%d", vsx_reg);
21165 }
21166 return;
21167
21168 case 'X':
21169 if (MEM_P (x)
21170 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21171 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21172 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21173 putc ('x', file);
21174 return;
21175
21176 case 'Y':
21177 /* Like 'L', for third word of TImode/PTImode */
21178 if (REG_P (x))
21179 fputs (reg_names[REGNO (x) + 2], file);
21180 else if (MEM_P (x))
21181 {
21182 machine_mode mode = GET_MODE (x);
21183 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21184 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21185 output_address (mode, plus_constant (Pmode,
21186 XEXP (XEXP (x, 0), 0), 8));
21187 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21188 output_address (mode, plus_constant (Pmode,
21189 XEXP (XEXP (x, 0), 0), 8));
21190 else
21191 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21192 if (small_data_operand (x, GET_MODE (x)))
21193 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21194 reg_names[SMALL_DATA_REG]);
21195 }
21196 return;
21197
21198 case 'z':
21199 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21200 x = XVECEXP (x, 0, 1);
21201 /* X is a SYMBOL_REF. Write out the name preceded by a
21202 period and without any trailing data in brackets. Used for function
21203 names. If we are configured for System V (or the embedded ABI) on
21204 the PowerPC, do not emit the period, since those systems do not use
21205 TOCs and the like. */
21206 if (!SYMBOL_REF_P (x))
21207 {
21208 output_operand_lossage ("invalid %%z value");
21209 return;
21210 }
21211
21212 /* For macho, check to see if we need a stub. */
21213 if (TARGET_MACHO)
21214 {
21215 const char *name = XSTR (x, 0);
21216 #if TARGET_MACHO
21217 if (darwin_emit_branch_islands
21218 && MACHOPIC_INDIRECT
21219 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21220 name = machopic_indirection_name (x, /*stub_p=*/true);
21221 #endif
21222 assemble_name (file, name);
21223 }
21224 else if (!DOT_SYMBOLS)
21225 assemble_name (file, XSTR (x, 0));
21226 else
21227 rs6000_output_function_entry (file, XSTR (x, 0));
21228 return;
21229
21230 case 'Z':
21231 /* Like 'L', for last word of TImode/PTImode. */
21232 if (REG_P (x))
21233 fputs (reg_names[REGNO (x) + 3], file);
21234 else if (MEM_P (x))
21235 {
21236 machine_mode mode = GET_MODE (x);
21237 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21238 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21239 output_address (mode, plus_constant (Pmode,
21240 XEXP (XEXP (x, 0), 0), 12));
21241 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21242 output_address (mode, plus_constant (Pmode,
21243 XEXP (XEXP (x, 0), 0), 12));
21244 else
21245 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21246 if (small_data_operand (x, GET_MODE (x)))
21247 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21248 reg_names[SMALL_DATA_REG]);
21249 }
21250 return;
21251
21252 /* Print AltiVec memory operand. */
21253 case 'y':
21254 {
21255 rtx tmp;
21256
21257 gcc_assert (MEM_P (x));
21258
21259 tmp = XEXP (x, 0);
21260
21261 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21262 && GET_CODE (tmp) == AND
21263 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21264 && INTVAL (XEXP (tmp, 1)) == -16)
21265 tmp = XEXP (tmp, 0);
21266 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21267 && GET_CODE (tmp) == PRE_MODIFY)
21268 tmp = XEXP (tmp, 1);
21269 if (REG_P (tmp))
21270 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21271 else
21272 {
21273 if (GET_CODE (tmp) != PLUS
21274 || !REG_P (XEXP (tmp, 0))
21275 || !REG_P (XEXP (tmp, 1)))
21276 {
21277 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21278 break;
21279 }
21280
21281 if (REGNO (XEXP (tmp, 0)) == 0)
21282 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21283 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21284 else
21285 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21286 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21287 }
21288 break;
21289 }
21290
21291 case 0:
21292 if (REG_P (x))
21293 fprintf (file, "%s", reg_names[REGNO (x)]);
21294 else if (MEM_P (x))
21295 {
21296 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21297 know the width from the mode. */
21298 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21299 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21300 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21301 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21302 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21303 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21304 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21305 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21306 else
21307 output_address (GET_MODE (x), XEXP (x, 0));
21308 }
21309 else if (toc_relative_expr_p (x, false,
21310 &tocrel_base_oac, &tocrel_offset_oac))
21311 /* This hack along with a corresponding hack in
21312 rs6000_output_addr_const_extra arranges to output addends
21313 where the assembler expects to find them. eg.
21314 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21315 without this hack would be output as "x@toc+4". We
21316 want "x+4@toc". */
21317 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21318 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21319 output_addr_const (file, XVECEXP (x, 0, 0));
21320 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21321 output_addr_const (file, XVECEXP (x, 0, 1));
21322 else
21323 output_addr_const (file, x);
21324 return;
21325
21326 case '&':
21327 if (const char *name = get_some_local_dynamic_name ())
21328 assemble_name (file, name);
21329 else
21330 output_operand_lossage ("'%%&' used without any "
21331 "local dynamic TLS references");
21332 return;
21333
21334 default:
21335 output_operand_lossage ("invalid %%xn code");
21336 }
21337 }
21338 \f
21339 /* Print the address of an operand. */
21340
21341 void
21342 print_operand_address (FILE *file, rtx x)
21343 {
21344 if (REG_P (x))
21345 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21346 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21347 || GET_CODE (x) == LABEL_REF)
21348 {
21349 output_addr_const (file, x);
21350 if (small_data_operand (x, GET_MODE (x)))
21351 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21352 reg_names[SMALL_DATA_REG]);
21353 else
21354 gcc_assert (!TARGET_TOC);
21355 }
21356 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21357 && REG_P (XEXP (x, 1)))
21358 {
21359 if (REGNO (XEXP (x, 0)) == 0)
21360 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21361 reg_names[ REGNO (XEXP (x, 0)) ]);
21362 else
21363 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21364 reg_names[ REGNO (XEXP (x, 1)) ]);
21365 }
21366 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21367 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21368 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21369 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21370 #if TARGET_MACHO
21371 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21372 && CONSTANT_P (XEXP (x, 1)))
21373 {
21374 fprintf (file, "lo16(");
21375 output_addr_const (file, XEXP (x, 1));
21376 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21377 }
21378 #endif
21379 #if TARGET_ELF
21380 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21381 && CONSTANT_P (XEXP (x, 1)))
21382 {
21383 output_addr_const (file, XEXP (x, 1));
21384 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21385 }
21386 #endif
21387 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21388 {
21389 /* This hack along with a corresponding hack in
21390 rs6000_output_addr_const_extra arranges to output addends
21391 where the assembler expects to find them. eg.
21392 (lo_sum (reg 9)
21393 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21394 without this hack would be output as "x@toc+8@l(9)". We
21395 want "x+8@toc@l(9)". */
21396 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21397 if (GET_CODE (x) == LO_SUM)
21398 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21399 else
21400 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21401 }
21402 else
21403 output_addr_const (file, x);
21404 }
21405 \f
21406 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21407
21408 static bool
21409 rs6000_output_addr_const_extra (FILE *file, rtx x)
21410 {
21411 if (GET_CODE (x) == UNSPEC)
21412 switch (XINT (x, 1))
21413 {
21414 case UNSPEC_TOCREL:
21415 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21416 && REG_P (XVECEXP (x, 0, 1))
21417 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21418 output_addr_const (file, XVECEXP (x, 0, 0));
21419 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21420 {
21421 if (INTVAL (tocrel_offset_oac) >= 0)
21422 fprintf (file, "+");
21423 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21424 }
21425 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21426 {
21427 putc ('-', file);
21428 assemble_name (file, toc_label_name);
21429 need_toc_init = 1;
21430 }
21431 else if (TARGET_ELF)
21432 fputs ("@toc", file);
21433 return true;
21434
21435 #if TARGET_MACHO
21436 case UNSPEC_MACHOPIC_OFFSET:
21437 output_addr_const (file, XVECEXP (x, 0, 0));
21438 putc ('-', file);
21439 machopic_output_function_base_name (file);
21440 return true;
21441 #endif
21442 }
21443 return false;
21444 }
21445 \f
21446 /* Target hook for assembling integer objects. The PowerPC version has
21447 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21448 is defined. It also needs to handle DI-mode objects on 64-bit
21449 targets. */
21450
21451 static bool
21452 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21453 {
21454 #ifdef RELOCATABLE_NEEDS_FIXUP
21455 /* Special handling for SI values. */
21456 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21457 {
21458 static int recurse = 0;
21459
21460 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21461 the .fixup section. Since the TOC section is already relocated, we
21462 don't need to mark it here. We used to skip the text section, but it
21463 should never be valid for relocated addresses to be placed in the text
21464 section. */
21465 if (DEFAULT_ABI == ABI_V4
21466 && (TARGET_RELOCATABLE || flag_pic > 1)
21467 && in_section != toc_section
21468 && !recurse
21469 && !CONST_SCALAR_INT_P (x)
21470 && CONSTANT_P (x))
21471 {
21472 char buf[256];
21473
21474 recurse = 1;
21475 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21476 fixuplabelno++;
21477 ASM_OUTPUT_LABEL (asm_out_file, buf);
21478 fprintf (asm_out_file, "\t.long\t(");
21479 output_addr_const (asm_out_file, x);
21480 fprintf (asm_out_file, ")@fixup\n");
21481 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21482 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21483 fprintf (asm_out_file, "\t.long\t");
21484 assemble_name (asm_out_file, buf);
21485 fprintf (asm_out_file, "\n\t.previous\n");
21486 recurse = 0;
21487 return true;
21488 }
21489 /* Remove initial .'s to turn a -mcall-aixdesc function
21490 address into the address of the descriptor, not the function
21491 itself. */
21492 else if (GET_CODE (x) == SYMBOL_REF
21493 && XSTR (x, 0)[0] == '.'
21494 && DEFAULT_ABI == ABI_AIX)
21495 {
21496 const char *name = XSTR (x, 0);
21497 while (*name == '.')
21498 name++;
21499
21500 fprintf (asm_out_file, "\t.long\t%s\n", name);
21501 return true;
21502 }
21503 }
21504 #endif /* RELOCATABLE_NEEDS_FIXUP */
21505 return default_assemble_integer (x, size, aligned_p);
21506 }
21507
21508 /* Return a template string for assembly to emit when making an
21509 external call. FUNOP is the call mem argument operand number. */
21510
21511 static const char *
21512 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21513 {
21514 /* -Wformat-overflow workaround, without which gcc thinks that %u
21515 might produce 10 digits. */
21516 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21517
21518 char arg[12];
21519 arg[0] = 0;
21520 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21521 {
21522 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21523 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21524 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21525 sprintf (arg, "(%%&@tlsld)");
21526 else
21527 gcc_unreachable ();
21528 }
21529
21530 /* The magic 32768 offset here corresponds to the offset of
21531 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21532 char z[11];
21533 sprintf (z, "%%z%u%s", funop,
21534 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21535 ? "+32768" : ""));
21536
21537 static char str[32]; /* 2 spare */
21538 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21539 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21540 sibcall ? "" : "\n\tnop");
21541 else if (DEFAULT_ABI == ABI_V4)
21542 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21543 flag_pic ? "@plt" : "");
21544 #if TARGET_MACHO
21545 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21546 else if (DEFAULT_ABI == ABI_DARWIN)
21547 {
21548 /* The cookie is in operand func+2. */
21549 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21550 int cookie = INTVAL (operands[funop + 2]);
21551 if (cookie & CALL_LONG)
21552 {
21553 tree funname = get_identifier (XSTR (operands[funop], 0));
21554 tree labelname = get_prev_label (funname);
21555 gcc_checking_assert (labelname && !sibcall);
21556
21557 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21558 instruction will reach 'foo', otherwise link as 'bl L42'".
21559 "L42" should be a 'branch island', that will do a far jump to
21560 'foo'. Branch islands are generated in
21561 macho_branch_islands(). */
21562 sprintf (str, "jbsr %%z%u,%.10s", funop,
21563 IDENTIFIER_POINTER (labelname));
21564 }
21565 else
21566 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21567 after the call. */
21568 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21569 }
21570 #endif
21571 else
21572 gcc_unreachable ();
21573 return str;
21574 }
21575
21576 const char *
21577 rs6000_call_template (rtx *operands, unsigned int funop)
21578 {
21579 return rs6000_call_template_1 (operands, funop, false);
21580 }
21581
21582 const char *
21583 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21584 {
21585 return rs6000_call_template_1 (operands, funop, true);
21586 }
21587
21588 /* As above, for indirect calls. */
21589
21590 static const char *
21591 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21592 bool sibcall)
21593 {
21594 /* -Wformat-overflow workaround, without which gcc thinks that %u
21595 might produce 10 digits. */
21596 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21597
21598 static char str[144]; /* 1 spare */
21599 char *s = str;
21600 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21601
21602 if (DEFAULT_ABI == ABI_AIX)
21603 s += sprintf (s,
21604 "l%s 2,%%%u\n\t",
21605 ptrload, funop + 2);
21606
21607 /* We don't need the extra code to stop indirect call speculation if
21608 calling via LR. */
21609 bool speculate = (TARGET_MACHO
21610 || rs6000_speculate_indirect_jumps
21611 || (REG_P (operands[funop])
21612 && REGNO (operands[funop]) == LR_REGNO));
21613
21614 if (!TARGET_MACHO && HAVE_AS_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21615 {
21616 const char *rel64 = TARGET_64BIT ? "64" : "";
21617 char tls[29];
21618 tls[0] = 0;
21619 if (GET_CODE (operands[funop + 1]) == UNSPEC)
21620 {
21621 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21622 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21623 rel64, funop + 1);
21624 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21625 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21626 rel64);
21627 else
21628 gcc_unreachable ();
21629 }
21630
21631 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21632 && flag_pic == 2 ? "+32768" : "");
21633 if (!speculate)
21634 {
21635 s += sprintf (s,
21636 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21637 tls, rel64, funop, addend);
21638 s += sprintf (s, "crset 2\n\t");
21639 }
21640 s += sprintf (s,
21641 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21642 tls, rel64, funop, addend);
21643 }
21644 else if (!speculate)
21645 s += sprintf (s, "crset 2\n\t");
21646
21647 if (DEFAULT_ABI == ABI_AIX)
21648 {
21649 if (speculate)
21650 sprintf (s,
21651 "b%%T%ul\n\t"
21652 "l%s 2,%%%u(1)",
21653 funop, ptrload, funop + 3);
21654 else
21655 sprintf (s,
21656 "beq%%T%ul-\n\t"
21657 "l%s 2,%%%u(1)",
21658 funop, ptrload, funop + 3);
21659 }
21660 else if (DEFAULT_ABI == ABI_ELFv2)
21661 {
21662 if (speculate)
21663 sprintf (s,
21664 "b%%T%ul\n\t"
21665 "l%s 2,%%%u(1)",
21666 funop, ptrload, funop + 2);
21667 else
21668 sprintf (s,
21669 "beq%%T%ul-\n\t"
21670 "l%s 2,%%%u(1)",
21671 funop, ptrload, funop + 2);
21672 }
21673 else
21674 {
21675 if (speculate)
21676 sprintf (s,
21677 "b%%T%u%s",
21678 funop, sibcall ? "" : "l");
21679 else
21680 sprintf (s,
21681 "beq%%T%u%s-%s",
21682 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21683 }
21684 return str;
21685 }
21686
21687 const char *
21688 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21689 {
21690 return rs6000_indirect_call_template_1 (operands, funop, false);
21691 }
21692
21693 const char *
21694 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21695 {
21696 return rs6000_indirect_call_template_1 (operands, funop, true);
21697 }
21698
21699 #if HAVE_AS_PLTSEQ
21700 /* Output indirect call insns.
21701 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21702 const char *
21703 rs6000_pltseq_template (rtx *operands, int which)
21704 {
21705 const char *rel64 = TARGET_64BIT ? "64" : "";
21706 char tls[28];
21707 tls[0] = 0;
21708 if (GET_CODE (operands[3]) == UNSPEC)
21709 {
21710 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21711 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21712 rel64);
21713 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21714 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21715 rel64);
21716 else
21717 gcc_unreachable ();
21718 }
21719
21720 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21721 static char str[96]; /* 15 spare */
21722 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21723 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21724 && flag_pic == 2 ? "+32768" : "");
21725 switch (which)
21726 {
21727 case 0:
21728 sprintf (str,
21729 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21730 "st%s",
21731 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21732 break;
21733 case 1:
21734 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21735 sprintf (str,
21736 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21737 "lis %%0,0",
21738 tls, off, rel64);
21739 else
21740 sprintf (str,
21741 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21742 "addis %%0,%%1,0",
21743 tls, off, rel64, addend);
21744 break;
21745 case 2:
21746 sprintf (str,
21747 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21748 "l%s %%0,0(%%1)",
21749 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21750 TARGET_64BIT ? "d" : "wz");
21751 break;
21752 case 3:
21753 sprintf (str,
21754 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21755 "mtctr %%1",
21756 tls, rel64, addend);
21757 break;
21758 default:
21759 gcc_unreachable ();
21760 }
21761 return str;
21762 }
21763 #endif
21764
21765 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21766 /* Emit an assembler directive to set symbol visibility for DECL to
21767 VISIBILITY_TYPE. */
21768
21769 static void
21770 rs6000_assemble_visibility (tree decl, int vis)
21771 {
21772 if (TARGET_XCOFF)
21773 return;
21774
21775 /* Functions need to have their entry point symbol visibility set as
21776 well as their descriptor symbol visibility. */
21777 if (DEFAULT_ABI == ABI_AIX
21778 && DOT_SYMBOLS
21779 && TREE_CODE (decl) == FUNCTION_DECL)
21780 {
21781 static const char * const visibility_types[] = {
21782 NULL, "protected", "hidden", "internal"
21783 };
21784
21785 const char *name, *type;
21786
21787 name = ((* targetm.strip_name_encoding)
21788 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21789 type = visibility_types[vis];
21790
21791 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21792 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21793 }
21794 else
21795 default_assemble_visibility (decl, vis);
21796 }
21797 #endif
21798 \f
21799 enum rtx_code
21800 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21801 {
21802 /* Reversal of FP compares takes care -- an ordered compare
21803 becomes an unordered compare and vice versa. */
21804 if (mode == CCFPmode
21805 && (!flag_finite_math_only
21806 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21807 || code == UNEQ || code == LTGT))
21808 return reverse_condition_maybe_unordered (code);
21809 else
21810 return reverse_condition (code);
21811 }
21812
21813 /* Generate a compare for CODE. Return a brand-new rtx that
21814 represents the result of the compare. */
21815
21816 static rtx
21817 rs6000_generate_compare (rtx cmp, machine_mode mode)
21818 {
21819 machine_mode comp_mode;
21820 rtx compare_result;
21821 enum rtx_code code = GET_CODE (cmp);
21822 rtx op0 = XEXP (cmp, 0);
21823 rtx op1 = XEXP (cmp, 1);
21824
21825 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21826 comp_mode = CCmode;
21827 else if (FLOAT_MODE_P (mode))
21828 comp_mode = CCFPmode;
21829 else if (code == GTU || code == LTU
21830 || code == GEU || code == LEU)
21831 comp_mode = CCUNSmode;
21832 else if ((code == EQ || code == NE)
21833 && unsigned_reg_p (op0)
21834 && (unsigned_reg_p (op1)
21835 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21836 /* These are unsigned values, perhaps there will be a later
21837 ordering compare that can be shared with this one. */
21838 comp_mode = CCUNSmode;
21839 else
21840 comp_mode = CCmode;
21841
21842 /* If we have an unsigned compare, make sure we don't have a signed value as
21843 an immediate. */
21844 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21845 && INTVAL (op1) < 0)
21846 {
21847 op0 = copy_rtx_if_shared (op0);
21848 op1 = force_reg (GET_MODE (op0), op1);
21849 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21850 }
21851
21852 /* First, the compare. */
21853 compare_result = gen_reg_rtx (comp_mode);
21854
21855 /* IEEE 128-bit support in VSX registers when we do not have hardware
21856 support. */
21857 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21858 {
21859 rtx libfunc = NULL_RTX;
21860 bool check_nan = false;
21861 rtx dest;
21862
21863 switch (code)
21864 {
21865 case EQ:
21866 case NE:
21867 libfunc = optab_libfunc (eq_optab, mode);
21868 break;
21869
21870 case GT:
21871 case GE:
21872 libfunc = optab_libfunc (ge_optab, mode);
21873 break;
21874
21875 case LT:
21876 case LE:
21877 libfunc = optab_libfunc (le_optab, mode);
21878 break;
21879
21880 case UNORDERED:
21881 case ORDERED:
21882 libfunc = optab_libfunc (unord_optab, mode);
21883 code = (code == UNORDERED) ? NE : EQ;
21884 break;
21885
21886 case UNGE:
21887 case UNGT:
21888 check_nan = true;
21889 libfunc = optab_libfunc (ge_optab, mode);
21890 code = (code == UNGE) ? GE : GT;
21891 break;
21892
21893 case UNLE:
21894 case UNLT:
21895 check_nan = true;
21896 libfunc = optab_libfunc (le_optab, mode);
21897 code = (code == UNLE) ? LE : LT;
21898 break;
21899
21900 case UNEQ:
21901 case LTGT:
21902 check_nan = true;
21903 libfunc = optab_libfunc (eq_optab, mode);
21904 code = (code = UNEQ) ? EQ : NE;
21905 break;
21906
21907 default:
21908 gcc_unreachable ();
21909 }
21910
21911 gcc_assert (libfunc);
21912
21913 if (!check_nan)
21914 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21915 SImode, op0, mode, op1, mode);
21916
21917 /* The library signals an exception for signalling NaNs, so we need to
21918 handle isgreater, etc. by first checking isordered. */
21919 else
21920 {
21921 rtx ne_rtx, normal_dest, unord_dest;
21922 rtx unord_func = optab_libfunc (unord_optab, mode);
21923 rtx join_label = gen_label_rtx ();
21924 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21925 rtx unord_cmp = gen_reg_rtx (comp_mode);
21926
21927
21928 /* Test for either value being a NaN. */
21929 gcc_assert (unord_func);
21930 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21931 SImode, op0, mode, op1, mode);
21932
21933 /* Set value (0) if either value is a NaN, and jump to the join
21934 label. */
21935 dest = gen_reg_rtx (SImode);
21936 emit_move_insn (dest, const1_rtx);
21937 emit_insn (gen_rtx_SET (unord_cmp,
21938 gen_rtx_COMPARE (comp_mode, unord_dest,
21939 const0_rtx)));
21940
21941 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21942 emit_jump_insn (gen_rtx_SET (pc_rtx,
21943 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21944 join_ref,
21945 pc_rtx)));
21946
21947 /* Do the normal comparison, knowing that the values are not
21948 NaNs. */
21949 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21950 SImode, op0, mode, op1, mode);
21951
21952 emit_insn (gen_cstoresi4 (dest,
21953 gen_rtx_fmt_ee (code, SImode, normal_dest,
21954 const0_rtx),
21955 normal_dest, const0_rtx));
21956
21957 /* Join NaN and non-Nan paths. Compare dest against 0. */
21958 emit_label (join_label);
21959 code = NE;
21960 }
21961
21962 emit_insn (gen_rtx_SET (compare_result,
21963 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21964 }
21965
21966 else
21967 {
21968 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21969 CLOBBERs to match cmptf_internal2 pattern. */
21970 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21971 && FLOAT128_IBM_P (GET_MODE (op0))
21972 && TARGET_HARD_FLOAT)
21973 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21974 gen_rtvec (10,
21975 gen_rtx_SET (compare_result,
21976 gen_rtx_COMPARE (comp_mode, op0, op1)),
21977 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21978 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21979 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21980 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21981 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21982 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21983 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21984 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21985 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21986 else if (GET_CODE (op1) == UNSPEC
21987 && XINT (op1, 1) == UNSPEC_SP_TEST)
21988 {
21989 rtx op1b = XVECEXP (op1, 0, 0);
21990 comp_mode = CCEQmode;
21991 compare_result = gen_reg_rtx (CCEQmode);
21992 if (TARGET_64BIT)
21993 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21994 else
21995 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21996 }
21997 else
21998 emit_insn (gen_rtx_SET (compare_result,
21999 gen_rtx_COMPARE (comp_mode, op0, op1)));
22000 }
22001
22002 /* Some kinds of FP comparisons need an OR operation;
22003 under flag_finite_math_only we don't bother. */
22004 if (FLOAT_MODE_P (mode)
22005 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22006 && !flag_finite_math_only
22007 && (code == LE || code == GE
22008 || code == UNEQ || code == LTGT
22009 || code == UNGT || code == UNLT))
22010 {
22011 enum rtx_code or1, or2;
22012 rtx or1_rtx, or2_rtx, compare2_rtx;
22013 rtx or_result = gen_reg_rtx (CCEQmode);
22014
22015 switch (code)
22016 {
22017 case LE: or1 = LT; or2 = EQ; break;
22018 case GE: or1 = GT; or2 = EQ; break;
22019 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22020 case LTGT: or1 = LT; or2 = GT; break;
22021 case UNGT: or1 = UNORDERED; or2 = GT; break;
22022 case UNLT: or1 = UNORDERED; or2 = LT; break;
22023 default: gcc_unreachable ();
22024 }
22025 validate_condition_mode (or1, comp_mode);
22026 validate_condition_mode (or2, comp_mode);
22027 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22028 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22029 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22030 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22031 const_true_rtx);
22032 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22033
22034 compare_result = or_result;
22035 code = EQ;
22036 }
22037
22038 validate_condition_mode (code, GET_MODE (compare_result));
22039
22040 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22041 }
22042
22043 \f
22044 /* Return the diagnostic message string if the binary operation OP is
22045 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22046
22047 static const char*
22048 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22049 const_tree type1,
22050 const_tree type2)
22051 {
22052 machine_mode mode1 = TYPE_MODE (type1);
22053 machine_mode mode2 = TYPE_MODE (type2);
22054
22055 /* For complex modes, use the inner type. */
22056 if (COMPLEX_MODE_P (mode1))
22057 mode1 = GET_MODE_INNER (mode1);
22058
22059 if (COMPLEX_MODE_P (mode2))
22060 mode2 = GET_MODE_INNER (mode2);
22061
22062 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22063 double to intermix unless -mfloat128-convert. */
22064 if (mode1 == mode2)
22065 return NULL;
22066
22067 if (!TARGET_FLOAT128_CVT)
22068 {
22069 if ((mode1 == KFmode && mode2 == IFmode)
22070 || (mode1 == IFmode && mode2 == KFmode))
22071 return N_("__float128 and __ibm128 cannot be used in the same "
22072 "expression");
22073
22074 if (TARGET_IEEEQUAD
22075 && ((mode1 == IFmode && mode2 == TFmode)
22076 || (mode1 == TFmode && mode2 == IFmode)))
22077 return N_("__ibm128 and long double cannot be used in the same "
22078 "expression");
22079
22080 if (!TARGET_IEEEQUAD
22081 && ((mode1 == KFmode && mode2 == TFmode)
22082 || (mode1 == TFmode && mode2 == KFmode)))
22083 return N_("__float128 and long double cannot be used in the same "
22084 "expression");
22085 }
22086
22087 return NULL;
22088 }
22089
22090 \f
22091 /* Expand floating point conversion to/from __float128 and __ibm128. */
22092
22093 void
22094 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22095 {
22096 machine_mode dest_mode = GET_MODE (dest);
22097 machine_mode src_mode = GET_MODE (src);
22098 convert_optab cvt = unknown_optab;
22099 bool do_move = false;
22100 rtx libfunc = NULL_RTX;
22101 rtx dest2;
22102 typedef rtx (*rtx_2func_t) (rtx, rtx);
22103 rtx_2func_t hw_convert = (rtx_2func_t)0;
22104 size_t kf_or_tf;
22105
22106 struct hw_conv_t {
22107 rtx_2func_t from_df;
22108 rtx_2func_t from_sf;
22109 rtx_2func_t from_si_sign;
22110 rtx_2func_t from_si_uns;
22111 rtx_2func_t from_di_sign;
22112 rtx_2func_t from_di_uns;
22113 rtx_2func_t to_df;
22114 rtx_2func_t to_sf;
22115 rtx_2func_t to_si_sign;
22116 rtx_2func_t to_si_uns;
22117 rtx_2func_t to_di_sign;
22118 rtx_2func_t to_di_uns;
22119 } hw_conversions[2] = {
22120 /* convertions to/from KFmode */
22121 {
22122 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22123 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22124 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22125 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22126 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22127 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22128 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22129 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22130 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22131 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22132 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22133 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22134 },
22135
22136 /* convertions to/from TFmode */
22137 {
22138 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22139 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22140 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22141 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22142 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22143 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22144 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22145 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22146 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22147 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22148 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22149 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22150 },
22151 };
22152
22153 if (dest_mode == src_mode)
22154 gcc_unreachable ();
22155
22156 /* Eliminate memory operations. */
22157 if (MEM_P (src))
22158 src = force_reg (src_mode, src);
22159
22160 if (MEM_P (dest))
22161 {
22162 rtx tmp = gen_reg_rtx (dest_mode);
22163 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22164 rs6000_emit_move (dest, tmp, dest_mode);
22165 return;
22166 }
22167
22168 /* Convert to IEEE 128-bit floating point. */
22169 if (FLOAT128_IEEE_P (dest_mode))
22170 {
22171 if (dest_mode == KFmode)
22172 kf_or_tf = 0;
22173 else if (dest_mode == TFmode)
22174 kf_or_tf = 1;
22175 else
22176 gcc_unreachable ();
22177
22178 switch (src_mode)
22179 {
22180 case E_DFmode:
22181 cvt = sext_optab;
22182 hw_convert = hw_conversions[kf_or_tf].from_df;
22183 break;
22184
22185 case E_SFmode:
22186 cvt = sext_optab;
22187 hw_convert = hw_conversions[kf_or_tf].from_sf;
22188 break;
22189
22190 case E_KFmode:
22191 case E_IFmode:
22192 case E_TFmode:
22193 if (FLOAT128_IBM_P (src_mode))
22194 cvt = sext_optab;
22195 else
22196 do_move = true;
22197 break;
22198
22199 case E_SImode:
22200 if (unsigned_p)
22201 {
22202 cvt = ufloat_optab;
22203 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22204 }
22205 else
22206 {
22207 cvt = sfloat_optab;
22208 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22209 }
22210 break;
22211
22212 case E_DImode:
22213 if (unsigned_p)
22214 {
22215 cvt = ufloat_optab;
22216 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22217 }
22218 else
22219 {
22220 cvt = sfloat_optab;
22221 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22222 }
22223 break;
22224
22225 default:
22226 gcc_unreachable ();
22227 }
22228 }
22229
22230 /* Convert from IEEE 128-bit floating point. */
22231 else if (FLOAT128_IEEE_P (src_mode))
22232 {
22233 if (src_mode == KFmode)
22234 kf_or_tf = 0;
22235 else if (src_mode == TFmode)
22236 kf_or_tf = 1;
22237 else
22238 gcc_unreachable ();
22239
22240 switch (dest_mode)
22241 {
22242 case E_DFmode:
22243 cvt = trunc_optab;
22244 hw_convert = hw_conversions[kf_or_tf].to_df;
22245 break;
22246
22247 case E_SFmode:
22248 cvt = trunc_optab;
22249 hw_convert = hw_conversions[kf_or_tf].to_sf;
22250 break;
22251
22252 case E_KFmode:
22253 case E_IFmode:
22254 case E_TFmode:
22255 if (FLOAT128_IBM_P (dest_mode))
22256 cvt = trunc_optab;
22257 else
22258 do_move = true;
22259 break;
22260
22261 case E_SImode:
22262 if (unsigned_p)
22263 {
22264 cvt = ufix_optab;
22265 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22266 }
22267 else
22268 {
22269 cvt = sfix_optab;
22270 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22271 }
22272 break;
22273
22274 case E_DImode:
22275 if (unsigned_p)
22276 {
22277 cvt = ufix_optab;
22278 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22279 }
22280 else
22281 {
22282 cvt = sfix_optab;
22283 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22284 }
22285 break;
22286
22287 default:
22288 gcc_unreachable ();
22289 }
22290 }
22291
22292 /* Both IBM format. */
22293 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22294 do_move = true;
22295
22296 else
22297 gcc_unreachable ();
22298
22299 /* Handle conversion between TFmode/KFmode/IFmode. */
22300 if (do_move)
22301 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22302
22303 /* Handle conversion if we have hardware support. */
22304 else if (TARGET_FLOAT128_HW && hw_convert)
22305 emit_insn ((hw_convert) (dest, src));
22306
22307 /* Call an external function to do the conversion. */
22308 else if (cvt != unknown_optab)
22309 {
22310 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22311 gcc_assert (libfunc != NULL_RTX);
22312
22313 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22314 src, src_mode);
22315
22316 gcc_assert (dest2 != NULL_RTX);
22317 if (!rtx_equal_p (dest, dest2))
22318 emit_move_insn (dest, dest2);
22319 }
22320
22321 else
22322 gcc_unreachable ();
22323
22324 return;
22325 }
22326
22327 \f
22328 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22329 can be used as that dest register. Return the dest register. */
22330
22331 rtx
22332 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22333 {
22334 if (op2 == const0_rtx)
22335 return op1;
22336
22337 if (GET_CODE (scratch) == SCRATCH)
22338 scratch = gen_reg_rtx (mode);
22339
22340 if (logical_operand (op2, mode))
22341 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22342 else
22343 emit_insn (gen_rtx_SET (scratch,
22344 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22345
22346 return scratch;
22347 }
22348
22349 void
22350 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22351 {
22352 rtx condition_rtx;
22353 machine_mode op_mode;
22354 enum rtx_code cond_code;
22355 rtx result = operands[0];
22356
22357 condition_rtx = rs6000_generate_compare (operands[1], mode);
22358 cond_code = GET_CODE (condition_rtx);
22359
22360 if (cond_code == NE
22361 || cond_code == GE || cond_code == LE
22362 || cond_code == GEU || cond_code == LEU
22363 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22364 {
22365 rtx not_result = gen_reg_rtx (CCEQmode);
22366 rtx not_op, rev_cond_rtx;
22367 machine_mode cc_mode;
22368
22369 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22370
22371 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22372 SImode, XEXP (condition_rtx, 0), const0_rtx);
22373 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22374 emit_insn (gen_rtx_SET (not_result, not_op));
22375 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22376 }
22377
22378 op_mode = GET_MODE (XEXP (operands[1], 0));
22379 if (op_mode == VOIDmode)
22380 op_mode = GET_MODE (XEXP (operands[1], 1));
22381
22382 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22383 {
22384 PUT_MODE (condition_rtx, DImode);
22385 convert_move (result, condition_rtx, 0);
22386 }
22387 else
22388 {
22389 PUT_MODE (condition_rtx, SImode);
22390 emit_insn (gen_rtx_SET (result, condition_rtx));
22391 }
22392 }
22393
22394 /* Emit a branch of kind CODE to location LOC. */
22395
22396 void
22397 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22398 {
22399 rtx condition_rtx, loc_ref;
22400
22401 condition_rtx = rs6000_generate_compare (operands[0], mode);
22402 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22403 emit_jump_insn (gen_rtx_SET (pc_rtx,
22404 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22405 loc_ref, pc_rtx)));
22406 }
22407
22408 /* Return the string to output a conditional branch to LABEL, which is
22409 the operand template of the label, or NULL if the branch is really a
22410 conditional return.
22411
22412 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22413 condition code register and its mode specifies what kind of
22414 comparison we made.
22415
22416 REVERSED is nonzero if we should reverse the sense of the comparison.
22417
22418 INSN is the insn. */
22419
22420 char *
22421 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22422 {
22423 static char string[64];
22424 enum rtx_code code = GET_CODE (op);
22425 rtx cc_reg = XEXP (op, 0);
22426 machine_mode mode = GET_MODE (cc_reg);
22427 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22428 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22429 int really_reversed = reversed ^ need_longbranch;
22430 char *s = string;
22431 const char *ccode;
22432 const char *pred;
22433 rtx note;
22434
22435 validate_condition_mode (code, mode);
22436
22437 /* Work out which way this really branches. We could use
22438 reverse_condition_maybe_unordered here always but this
22439 makes the resulting assembler clearer. */
22440 if (really_reversed)
22441 {
22442 /* Reversal of FP compares takes care -- an ordered compare
22443 becomes an unordered compare and vice versa. */
22444 if (mode == CCFPmode)
22445 code = reverse_condition_maybe_unordered (code);
22446 else
22447 code = reverse_condition (code);
22448 }
22449
22450 switch (code)
22451 {
22452 /* Not all of these are actually distinct opcodes, but
22453 we distinguish them for clarity of the resulting assembler. */
22454 case NE: case LTGT:
22455 ccode = "ne"; break;
22456 case EQ: case UNEQ:
22457 ccode = "eq"; break;
22458 case GE: case GEU:
22459 ccode = "ge"; break;
22460 case GT: case GTU: case UNGT:
22461 ccode = "gt"; break;
22462 case LE: case LEU:
22463 ccode = "le"; break;
22464 case LT: case LTU: case UNLT:
22465 ccode = "lt"; break;
22466 case UNORDERED: ccode = "un"; break;
22467 case ORDERED: ccode = "nu"; break;
22468 case UNGE: ccode = "nl"; break;
22469 case UNLE: ccode = "ng"; break;
22470 default:
22471 gcc_unreachable ();
22472 }
22473
22474 /* Maybe we have a guess as to how likely the branch is. */
22475 pred = "";
22476 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22477 if (note != NULL_RTX)
22478 {
22479 /* PROB is the difference from 50%. */
22480 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22481 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22482
22483 /* Only hint for highly probable/improbable branches on newer cpus when
22484 we have real profile data, as static prediction overrides processor
22485 dynamic prediction. For older cpus we may as well always hint, but
22486 assume not taken for branches that are very close to 50% as a
22487 mispredicted taken branch is more expensive than a
22488 mispredicted not-taken branch. */
22489 if (rs6000_always_hint
22490 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22491 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22492 && br_prob_note_reliable_p (note)))
22493 {
22494 if (abs (prob) > REG_BR_PROB_BASE / 20
22495 && ((prob > 0) ^ need_longbranch))
22496 pred = "+";
22497 else
22498 pred = "-";
22499 }
22500 }
22501
22502 if (label == NULL)
22503 s += sprintf (s, "b%slr%s ", ccode, pred);
22504 else
22505 s += sprintf (s, "b%s%s ", ccode, pred);
22506
22507 /* We need to escape any '%' characters in the reg_names string.
22508 Assume they'd only be the first character.... */
22509 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22510 *s++ = '%';
22511 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22512
22513 if (label != NULL)
22514 {
22515 /* If the branch distance was too far, we may have to use an
22516 unconditional branch to go the distance. */
22517 if (need_longbranch)
22518 s += sprintf (s, ",$+8\n\tb %s", label);
22519 else
22520 s += sprintf (s, ",%s", label);
22521 }
22522
22523 return string;
22524 }
22525
22526 /* Return insn for VSX or Altivec comparisons. */
22527
22528 static rtx
22529 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22530 {
22531 rtx mask;
22532 machine_mode mode = GET_MODE (op0);
22533
22534 switch (code)
22535 {
22536 default:
22537 break;
22538
22539 case GE:
22540 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22541 return NULL_RTX;
22542 /* FALLTHRU */
22543
22544 case EQ:
22545 case GT:
22546 case GTU:
22547 case ORDERED:
22548 case UNORDERED:
22549 case UNEQ:
22550 case LTGT:
22551 mask = gen_reg_rtx (mode);
22552 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22553 return mask;
22554 }
22555
22556 return NULL_RTX;
22557 }
22558
22559 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22560 DMODE is expected destination mode. This is a recursive function. */
22561
22562 static rtx
22563 rs6000_emit_vector_compare (enum rtx_code rcode,
22564 rtx op0, rtx op1,
22565 machine_mode dmode)
22566 {
22567 rtx mask;
22568 bool swap_operands = false;
22569 bool try_again = false;
22570
22571 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22572 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22573
22574 /* See if the comparison works as is. */
22575 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22576 if (mask)
22577 return mask;
22578
22579 switch (rcode)
22580 {
22581 case LT:
22582 rcode = GT;
22583 swap_operands = true;
22584 try_again = true;
22585 break;
22586 case LTU:
22587 rcode = GTU;
22588 swap_operands = true;
22589 try_again = true;
22590 break;
22591 case NE:
22592 case UNLE:
22593 case UNLT:
22594 case UNGE:
22595 case UNGT:
22596 /* Invert condition and try again.
22597 e.g., A != B becomes ~(A==B). */
22598 {
22599 enum rtx_code rev_code;
22600 enum insn_code nor_code;
22601 rtx mask2;
22602
22603 rev_code = reverse_condition_maybe_unordered (rcode);
22604 if (rev_code == UNKNOWN)
22605 return NULL_RTX;
22606
22607 nor_code = optab_handler (one_cmpl_optab, dmode);
22608 if (nor_code == CODE_FOR_nothing)
22609 return NULL_RTX;
22610
22611 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22612 if (!mask2)
22613 return NULL_RTX;
22614
22615 mask = gen_reg_rtx (dmode);
22616 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22617 return mask;
22618 }
22619 break;
22620 case GE:
22621 case GEU:
22622 case LE:
22623 case LEU:
22624 /* Try GT/GTU/LT/LTU OR EQ */
22625 {
22626 rtx c_rtx, eq_rtx;
22627 enum insn_code ior_code;
22628 enum rtx_code new_code;
22629
22630 switch (rcode)
22631 {
22632 case GE:
22633 new_code = GT;
22634 break;
22635
22636 case GEU:
22637 new_code = GTU;
22638 break;
22639
22640 case LE:
22641 new_code = LT;
22642 break;
22643
22644 case LEU:
22645 new_code = LTU;
22646 break;
22647
22648 default:
22649 gcc_unreachable ();
22650 }
22651
22652 ior_code = optab_handler (ior_optab, dmode);
22653 if (ior_code == CODE_FOR_nothing)
22654 return NULL_RTX;
22655
22656 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22657 if (!c_rtx)
22658 return NULL_RTX;
22659
22660 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22661 if (!eq_rtx)
22662 return NULL_RTX;
22663
22664 mask = gen_reg_rtx (dmode);
22665 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22666 return mask;
22667 }
22668 break;
22669 default:
22670 return NULL_RTX;
22671 }
22672
22673 if (try_again)
22674 {
22675 if (swap_operands)
22676 std::swap (op0, op1);
22677
22678 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22679 if (mask)
22680 return mask;
22681 }
22682
22683 /* You only get two chances. */
22684 return NULL_RTX;
22685 }
22686
22687 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22688 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22689 operands for the relation operation COND. */
22690
22691 int
22692 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22693 rtx cond, rtx cc_op0, rtx cc_op1)
22694 {
22695 machine_mode dest_mode = GET_MODE (dest);
22696 machine_mode mask_mode = GET_MODE (cc_op0);
22697 enum rtx_code rcode = GET_CODE (cond);
22698 machine_mode cc_mode = CCmode;
22699 rtx mask;
22700 rtx cond2;
22701 bool invert_move = false;
22702
22703 if (VECTOR_UNIT_NONE_P (dest_mode))
22704 return 0;
22705
22706 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22707 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22708
22709 switch (rcode)
22710 {
22711 /* Swap operands if we can, and fall back to doing the operation as
22712 specified, and doing a NOR to invert the test. */
22713 case NE:
22714 case UNLE:
22715 case UNLT:
22716 case UNGE:
22717 case UNGT:
22718 /* Invert condition and try again.
22719 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22720 invert_move = true;
22721 rcode = reverse_condition_maybe_unordered (rcode);
22722 if (rcode == UNKNOWN)
22723 return 0;
22724 break;
22725
22726 case GE:
22727 case LE:
22728 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22729 {
22730 /* Invert condition to avoid compound test. */
22731 invert_move = true;
22732 rcode = reverse_condition (rcode);
22733 }
22734 break;
22735
22736 case GTU:
22737 case GEU:
22738 case LTU:
22739 case LEU:
22740 /* Mark unsigned tests with CCUNSmode. */
22741 cc_mode = CCUNSmode;
22742
22743 /* Invert condition to avoid compound test if necessary. */
22744 if (rcode == GEU || rcode == LEU)
22745 {
22746 invert_move = true;
22747 rcode = reverse_condition (rcode);
22748 }
22749 break;
22750
22751 default:
22752 break;
22753 }
22754
22755 /* Get the vector mask for the given relational operations. */
22756 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22757
22758 if (!mask)
22759 return 0;
22760
22761 if (invert_move)
22762 std::swap (op_true, op_false);
22763
22764 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22765 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22766 && (GET_CODE (op_true) == CONST_VECTOR
22767 || GET_CODE (op_false) == CONST_VECTOR))
22768 {
22769 rtx constant_0 = CONST0_RTX (dest_mode);
22770 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22771
22772 if (op_true == constant_m1 && op_false == constant_0)
22773 {
22774 emit_move_insn (dest, mask);
22775 return 1;
22776 }
22777
22778 else if (op_true == constant_0 && op_false == constant_m1)
22779 {
22780 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22781 return 1;
22782 }
22783
22784 /* If we can't use the vector comparison directly, perhaps we can use
22785 the mask for the true or false fields, instead of loading up a
22786 constant. */
22787 if (op_true == constant_m1)
22788 op_true = mask;
22789
22790 if (op_false == constant_0)
22791 op_false = mask;
22792 }
22793
22794 if (!REG_P (op_true) && !SUBREG_P (op_true))
22795 op_true = force_reg (dest_mode, op_true);
22796
22797 if (!REG_P (op_false) && !SUBREG_P (op_false))
22798 op_false = force_reg (dest_mode, op_false);
22799
22800 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22801 CONST0_RTX (dest_mode));
22802 emit_insn (gen_rtx_SET (dest,
22803 gen_rtx_IF_THEN_ELSE (dest_mode,
22804 cond2,
22805 op_true,
22806 op_false)));
22807 return 1;
22808 }
22809
22810 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22811 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22812 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22813 hardware has no such operation. */
22814
22815 static int
22816 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22817 {
22818 enum rtx_code code = GET_CODE (op);
22819 rtx op0 = XEXP (op, 0);
22820 rtx op1 = XEXP (op, 1);
22821 machine_mode compare_mode = GET_MODE (op0);
22822 machine_mode result_mode = GET_MODE (dest);
22823 bool max_p = false;
22824
22825 if (result_mode != compare_mode)
22826 return 0;
22827
22828 if (code == GE || code == GT)
22829 max_p = true;
22830 else if (code == LE || code == LT)
22831 max_p = false;
22832 else
22833 return 0;
22834
22835 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22836 ;
22837
22838 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22839 max_p = !max_p;
22840
22841 else
22842 return 0;
22843
22844 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22845 return 1;
22846 }
22847
22848 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22849 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22850 operands of the last comparison is nonzero/true, FALSE_COND if it is
22851 zero/false. Return 0 if the hardware has no such operation. */
22852
22853 static int
22854 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22855 {
22856 enum rtx_code code = GET_CODE (op);
22857 rtx op0 = XEXP (op, 0);
22858 rtx op1 = XEXP (op, 1);
22859 machine_mode result_mode = GET_MODE (dest);
22860 rtx compare_rtx;
22861 rtx cmove_rtx;
22862 rtx clobber_rtx;
22863
22864 if (!can_create_pseudo_p ())
22865 return 0;
22866
22867 switch (code)
22868 {
22869 case EQ:
22870 case GE:
22871 case GT:
22872 break;
22873
22874 case NE:
22875 case LT:
22876 case LE:
22877 code = swap_condition (code);
22878 std::swap (op0, op1);
22879 break;
22880
22881 default:
22882 return 0;
22883 }
22884
22885 /* Generate: [(parallel [(set (dest)
22886 (if_then_else (op (cmp1) (cmp2))
22887 (true)
22888 (false)))
22889 (clobber (scratch))])]. */
22890
22891 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22892 cmove_rtx = gen_rtx_SET (dest,
22893 gen_rtx_IF_THEN_ELSE (result_mode,
22894 compare_rtx,
22895 true_cond,
22896 false_cond));
22897
22898 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22899 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22900 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22901
22902 return 1;
22903 }
22904
22905 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22906 operands of the last comparison is nonzero/true, FALSE_COND if it
22907 is zero/false. Return 0 if the hardware has no such operation. */
22908
22909 int
22910 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22911 {
22912 enum rtx_code code = GET_CODE (op);
22913 rtx op0 = XEXP (op, 0);
22914 rtx op1 = XEXP (op, 1);
22915 machine_mode compare_mode = GET_MODE (op0);
22916 machine_mode result_mode = GET_MODE (dest);
22917 rtx temp;
22918 bool is_against_zero;
22919
22920 /* These modes should always match. */
22921 if (GET_MODE (op1) != compare_mode
22922 /* In the isel case however, we can use a compare immediate, so
22923 op1 may be a small constant. */
22924 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22925 return 0;
22926 if (GET_MODE (true_cond) != result_mode)
22927 return 0;
22928 if (GET_MODE (false_cond) != result_mode)
22929 return 0;
22930
22931 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22932 if (TARGET_P9_MINMAX
22933 && (compare_mode == SFmode || compare_mode == DFmode)
22934 && (result_mode == SFmode || result_mode == DFmode))
22935 {
22936 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22937 return 1;
22938
22939 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22940 return 1;
22941 }
22942
22943 /* Don't allow using floating point comparisons for integer results for
22944 now. */
22945 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22946 return 0;
22947
22948 /* First, work out if the hardware can do this at all, or
22949 if it's too slow.... */
22950 if (!FLOAT_MODE_P (compare_mode))
22951 {
22952 if (TARGET_ISEL)
22953 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22954 return 0;
22955 }
22956
22957 is_against_zero = op1 == CONST0_RTX (compare_mode);
22958
22959 /* A floating-point subtract might overflow, underflow, or produce
22960 an inexact result, thus changing the floating-point flags, so it
22961 can't be generated if we care about that. It's safe if one side
22962 of the construct is zero, since then no subtract will be
22963 generated. */
22964 if (SCALAR_FLOAT_MODE_P (compare_mode)
22965 && flag_trapping_math && ! is_against_zero)
22966 return 0;
22967
22968 /* Eliminate half of the comparisons by switching operands, this
22969 makes the remaining code simpler. */
22970 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22971 || code == LTGT || code == LT || code == UNLE)
22972 {
22973 code = reverse_condition_maybe_unordered (code);
22974 temp = true_cond;
22975 true_cond = false_cond;
22976 false_cond = temp;
22977 }
22978
22979 /* UNEQ and LTGT take four instructions for a comparison with zero,
22980 it'll probably be faster to use a branch here too. */
22981 if (code == UNEQ && HONOR_NANS (compare_mode))
22982 return 0;
22983
22984 /* We're going to try to implement comparisons by performing
22985 a subtract, then comparing against zero. Unfortunately,
22986 Inf - Inf is NaN which is not zero, and so if we don't
22987 know that the operand is finite and the comparison
22988 would treat EQ different to UNORDERED, we can't do it. */
22989 if (HONOR_INFINITIES (compare_mode)
22990 && code != GT && code != UNGE
22991 && (GET_CODE (op1) != CONST_DOUBLE
22992 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22993 /* Constructs of the form (a OP b ? a : b) are safe. */
22994 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22995 || (! rtx_equal_p (op0, true_cond)
22996 && ! rtx_equal_p (op1, true_cond))))
22997 return 0;
22998
22999 /* At this point we know we can use fsel. */
23000
23001 /* Reduce the comparison to a comparison against zero. */
23002 if (! is_against_zero)
23003 {
23004 temp = gen_reg_rtx (compare_mode);
23005 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23006 op0 = temp;
23007 op1 = CONST0_RTX (compare_mode);
23008 }
23009
23010 /* If we don't care about NaNs we can reduce some of the comparisons
23011 down to faster ones. */
23012 if (! HONOR_NANS (compare_mode))
23013 switch (code)
23014 {
23015 case GT:
23016 code = LE;
23017 temp = true_cond;
23018 true_cond = false_cond;
23019 false_cond = temp;
23020 break;
23021 case UNGE:
23022 code = GE;
23023 break;
23024 case UNEQ:
23025 code = EQ;
23026 break;
23027 default:
23028 break;
23029 }
23030
23031 /* Now, reduce everything down to a GE. */
23032 switch (code)
23033 {
23034 case GE:
23035 break;
23036
23037 case LE:
23038 temp = gen_reg_rtx (compare_mode);
23039 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23040 op0 = temp;
23041 break;
23042
23043 case ORDERED:
23044 temp = gen_reg_rtx (compare_mode);
23045 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23046 op0 = temp;
23047 break;
23048
23049 case EQ:
23050 temp = gen_reg_rtx (compare_mode);
23051 emit_insn (gen_rtx_SET (temp,
23052 gen_rtx_NEG (compare_mode,
23053 gen_rtx_ABS (compare_mode, op0))));
23054 op0 = temp;
23055 break;
23056
23057 case UNGE:
23058 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23059 temp = gen_reg_rtx (result_mode);
23060 emit_insn (gen_rtx_SET (temp,
23061 gen_rtx_IF_THEN_ELSE (result_mode,
23062 gen_rtx_GE (VOIDmode,
23063 op0, op1),
23064 true_cond, false_cond)));
23065 false_cond = true_cond;
23066 true_cond = temp;
23067
23068 temp = gen_reg_rtx (compare_mode);
23069 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23070 op0 = temp;
23071 break;
23072
23073 case GT:
23074 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23075 temp = gen_reg_rtx (result_mode);
23076 emit_insn (gen_rtx_SET (temp,
23077 gen_rtx_IF_THEN_ELSE (result_mode,
23078 gen_rtx_GE (VOIDmode,
23079 op0, op1),
23080 true_cond, false_cond)));
23081 true_cond = false_cond;
23082 false_cond = temp;
23083
23084 temp = gen_reg_rtx (compare_mode);
23085 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23086 op0 = temp;
23087 break;
23088
23089 default:
23090 gcc_unreachable ();
23091 }
23092
23093 emit_insn (gen_rtx_SET (dest,
23094 gen_rtx_IF_THEN_ELSE (result_mode,
23095 gen_rtx_GE (VOIDmode,
23096 op0, op1),
23097 true_cond, false_cond)));
23098 return 1;
23099 }
23100
23101 /* Same as above, but for ints (isel). */
23102
23103 int
23104 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23105 {
23106 rtx condition_rtx, cr;
23107 machine_mode mode = GET_MODE (dest);
23108 enum rtx_code cond_code;
23109 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23110 bool signedp;
23111
23112 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23113 return 0;
23114
23115 /* We still have to do the compare, because isel doesn't do a
23116 compare, it just looks at the CRx bits set by a previous compare
23117 instruction. */
23118 condition_rtx = rs6000_generate_compare (op, mode);
23119 cond_code = GET_CODE (condition_rtx);
23120 cr = XEXP (condition_rtx, 0);
23121 signedp = GET_MODE (cr) == CCmode;
23122
23123 isel_func = (mode == SImode
23124 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23125 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23126
23127 switch (cond_code)
23128 {
23129 case LT: case GT: case LTU: case GTU: case EQ:
23130 /* isel handles these directly. */
23131 break;
23132
23133 default:
23134 /* We need to swap the sense of the comparison. */
23135 {
23136 std::swap (false_cond, true_cond);
23137 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23138 }
23139 break;
23140 }
23141
23142 false_cond = force_reg (mode, false_cond);
23143 if (true_cond != const0_rtx)
23144 true_cond = force_reg (mode, true_cond);
23145
23146 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23147
23148 return 1;
23149 }
23150
23151 void
23152 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23153 {
23154 machine_mode mode = GET_MODE (op0);
23155 enum rtx_code c;
23156 rtx target;
23157
23158 /* VSX/altivec have direct min/max insns. */
23159 if ((code == SMAX || code == SMIN)
23160 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23161 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23162 {
23163 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23164 return;
23165 }
23166
23167 if (code == SMAX || code == SMIN)
23168 c = GE;
23169 else
23170 c = GEU;
23171
23172 if (code == SMAX || code == UMAX)
23173 target = emit_conditional_move (dest, c, op0, op1, mode,
23174 op0, op1, mode, 0);
23175 else
23176 target = emit_conditional_move (dest, c, op0, op1, mode,
23177 op1, op0, mode, 0);
23178 gcc_assert (target);
23179 if (target != dest)
23180 emit_move_insn (dest, target);
23181 }
23182
23183 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23184 COND is true. Mark the jump as unlikely to be taken. */
23185
23186 static void
23187 emit_unlikely_jump (rtx cond, rtx label)
23188 {
23189 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23190 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23191 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23192 }
23193
23194 /* A subroutine of the atomic operation splitters. Emit a load-locked
23195 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23196 the zero_extend operation. */
23197
23198 static void
23199 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23200 {
23201 rtx (*fn) (rtx, rtx) = NULL;
23202
23203 switch (mode)
23204 {
23205 case E_QImode:
23206 fn = gen_load_lockedqi;
23207 break;
23208 case E_HImode:
23209 fn = gen_load_lockedhi;
23210 break;
23211 case E_SImode:
23212 if (GET_MODE (mem) == QImode)
23213 fn = gen_load_lockedqi_si;
23214 else if (GET_MODE (mem) == HImode)
23215 fn = gen_load_lockedhi_si;
23216 else
23217 fn = gen_load_lockedsi;
23218 break;
23219 case E_DImode:
23220 fn = gen_load_lockeddi;
23221 break;
23222 case E_TImode:
23223 fn = gen_load_lockedti;
23224 break;
23225 default:
23226 gcc_unreachable ();
23227 }
23228 emit_insn (fn (reg, mem));
23229 }
23230
23231 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23232 instruction in MODE. */
23233
23234 static void
23235 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23236 {
23237 rtx (*fn) (rtx, rtx, rtx) = NULL;
23238
23239 switch (mode)
23240 {
23241 case E_QImode:
23242 fn = gen_store_conditionalqi;
23243 break;
23244 case E_HImode:
23245 fn = gen_store_conditionalhi;
23246 break;
23247 case E_SImode:
23248 fn = gen_store_conditionalsi;
23249 break;
23250 case E_DImode:
23251 fn = gen_store_conditionaldi;
23252 break;
23253 case E_TImode:
23254 fn = gen_store_conditionalti;
23255 break;
23256 default:
23257 gcc_unreachable ();
23258 }
23259
23260 /* Emit sync before stwcx. to address PPC405 Erratum. */
23261 if (PPC405_ERRATUM77)
23262 emit_insn (gen_hwsync ());
23263
23264 emit_insn (fn (res, mem, val));
23265 }
23266
23267 /* Expand barriers before and after a load_locked/store_cond sequence. */
23268
23269 static rtx
23270 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23271 {
23272 rtx addr = XEXP (mem, 0);
23273
23274 if (!legitimate_indirect_address_p (addr, reload_completed)
23275 && !legitimate_indexed_address_p (addr, reload_completed))
23276 {
23277 addr = force_reg (Pmode, addr);
23278 mem = replace_equiv_address_nv (mem, addr);
23279 }
23280
23281 switch (model)
23282 {
23283 case MEMMODEL_RELAXED:
23284 case MEMMODEL_CONSUME:
23285 case MEMMODEL_ACQUIRE:
23286 break;
23287 case MEMMODEL_RELEASE:
23288 case MEMMODEL_ACQ_REL:
23289 emit_insn (gen_lwsync ());
23290 break;
23291 case MEMMODEL_SEQ_CST:
23292 emit_insn (gen_hwsync ());
23293 break;
23294 default:
23295 gcc_unreachable ();
23296 }
23297 return mem;
23298 }
23299
23300 static void
23301 rs6000_post_atomic_barrier (enum memmodel model)
23302 {
23303 switch (model)
23304 {
23305 case MEMMODEL_RELAXED:
23306 case MEMMODEL_CONSUME:
23307 case MEMMODEL_RELEASE:
23308 break;
23309 case MEMMODEL_ACQUIRE:
23310 case MEMMODEL_ACQ_REL:
23311 case MEMMODEL_SEQ_CST:
23312 emit_insn (gen_isync ());
23313 break;
23314 default:
23315 gcc_unreachable ();
23316 }
23317 }
23318
23319 /* A subroutine of the various atomic expanders. For sub-word operations,
23320 we must adjust things to operate on SImode. Given the original MEM,
23321 return a new aligned memory. Also build and return the quantities by
23322 which to shift and mask. */
23323
23324 static rtx
23325 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23326 {
23327 rtx addr, align, shift, mask, mem;
23328 HOST_WIDE_INT shift_mask;
23329 machine_mode mode = GET_MODE (orig_mem);
23330
23331 /* For smaller modes, we have to implement this via SImode. */
23332 shift_mask = (mode == QImode ? 0x18 : 0x10);
23333
23334 addr = XEXP (orig_mem, 0);
23335 addr = force_reg (GET_MODE (addr), addr);
23336
23337 /* Aligned memory containing subword. Generate a new memory. We
23338 do not want any of the existing MEM_ATTR data, as we're now
23339 accessing memory outside the original object. */
23340 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23341 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23342 mem = gen_rtx_MEM (SImode, align);
23343 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23344 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23345 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23346
23347 /* Shift amount for subword relative to aligned word. */
23348 shift = gen_reg_rtx (SImode);
23349 addr = gen_lowpart (SImode, addr);
23350 rtx tmp = gen_reg_rtx (SImode);
23351 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23352 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23353 if (BYTES_BIG_ENDIAN)
23354 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23355 shift, 1, OPTAB_LIB_WIDEN);
23356 *pshift = shift;
23357
23358 /* Mask for insertion. */
23359 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23360 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23361 *pmask = mask;
23362
23363 return mem;
23364 }
23365
23366 /* A subroutine of the various atomic expanders. For sub-word operands,
23367 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23368
23369 static rtx
23370 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23371 {
23372 rtx x;
23373
23374 x = gen_reg_rtx (SImode);
23375 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23376 gen_rtx_NOT (SImode, mask),
23377 oldval)));
23378
23379 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23380
23381 return x;
23382 }
23383
23384 /* A subroutine of the various atomic expanders. For sub-word operands,
23385 extract WIDE to NARROW via SHIFT. */
23386
23387 static void
23388 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23389 {
23390 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23391 wide, 1, OPTAB_LIB_WIDEN);
23392 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23393 }
23394
23395 /* Expand an atomic compare and swap operation. */
23396
23397 void
23398 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23399 {
23400 rtx boolval, retval, mem, oldval, newval, cond;
23401 rtx label1, label2, x, mask, shift;
23402 machine_mode mode, orig_mode;
23403 enum memmodel mod_s, mod_f;
23404 bool is_weak;
23405
23406 boolval = operands[0];
23407 retval = operands[1];
23408 mem = operands[2];
23409 oldval = operands[3];
23410 newval = operands[4];
23411 is_weak = (INTVAL (operands[5]) != 0);
23412 mod_s = memmodel_base (INTVAL (operands[6]));
23413 mod_f = memmodel_base (INTVAL (operands[7]));
23414 orig_mode = mode = GET_MODE (mem);
23415
23416 mask = shift = NULL_RTX;
23417 if (mode == QImode || mode == HImode)
23418 {
23419 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23420 lwarx and shift/mask operations. With power8, we need to do the
23421 comparison in SImode, but the store is still done in QI/HImode. */
23422 oldval = convert_modes (SImode, mode, oldval, 1);
23423
23424 if (!TARGET_SYNC_HI_QI)
23425 {
23426 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23427
23428 /* Shift and mask OLDVAL into position with the word. */
23429 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23430 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23431
23432 /* Shift and mask NEWVAL into position within the word. */
23433 newval = convert_modes (SImode, mode, newval, 1);
23434 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23435 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23436 }
23437
23438 /* Prepare to adjust the return value. */
23439 retval = gen_reg_rtx (SImode);
23440 mode = SImode;
23441 }
23442 else if (reg_overlap_mentioned_p (retval, oldval))
23443 oldval = copy_to_reg (oldval);
23444
23445 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23446 oldval = copy_to_mode_reg (mode, oldval);
23447
23448 if (reg_overlap_mentioned_p (retval, newval))
23449 newval = copy_to_reg (newval);
23450
23451 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23452
23453 label1 = NULL_RTX;
23454 if (!is_weak)
23455 {
23456 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23457 emit_label (XEXP (label1, 0));
23458 }
23459 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23460
23461 emit_load_locked (mode, retval, mem);
23462
23463 x = retval;
23464 if (mask)
23465 x = expand_simple_binop (SImode, AND, retval, mask,
23466 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23467
23468 cond = gen_reg_rtx (CCmode);
23469 /* If we have TImode, synthesize a comparison. */
23470 if (mode != TImode)
23471 x = gen_rtx_COMPARE (CCmode, x, oldval);
23472 else
23473 {
23474 rtx xor1_result = gen_reg_rtx (DImode);
23475 rtx xor2_result = gen_reg_rtx (DImode);
23476 rtx or_result = gen_reg_rtx (DImode);
23477 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23478 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23479 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23480 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23481
23482 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23483 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23484 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23485 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23486 }
23487
23488 emit_insn (gen_rtx_SET (cond, x));
23489
23490 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23491 emit_unlikely_jump (x, label2);
23492
23493 x = newval;
23494 if (mask)
23495 x = rs6000_mask_atomic_subword (retval, newval, mask);
23496
23497 emit_store_conditional (orig_mode, cond, mem, x);
23498
23499 if (!is_weak)
23500 {
23501 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23502 emit_unlikely_jump (x, label1);
23503 }
23504
23505 if (!is_mm_relaxed (mod_f))
23506 emit_label (XEXP (label2, 0));
23507
23508 rs6000_post_atomic_barrier (mod_s);
23509
23510 if (is_mm_relaxed (mod_f))
23511 emit_label (XEXP (label2, 0));
23512
23513 if (shift)
23514 rs6000_finish_atomic_subword (operands[1], retval, shift);
23515 else if (mode != GET_MODE (operands[1]))
23516 convert_move (operands[1], retval, 1);
23517
23518 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23519 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23520 emit_insn (gen_rtx_SET (boolval, x));
23521 }
23522
23523 /* Expand an atomic exchange operation. */
23524
23525 void
23526 rs6000_expand_atomic_exchange (rtx operands[])
23527 {
23528 rtx retval, mem, val, cond;
23529 machine_mode mode;
23530 enum memmodel model;
23531 rtx label, x, mask, shift;
23532
23533 retval = operands[0];
23534 mem = operands[1];
23535 val = operands[2];
23536 model = memmodel_base (INTVAL (operands[3]));
23537 mode = GET_MODE (mem);
23538
23539 mask = shift = NULL_RTX;
23540 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23541 {
23542 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23543
23544 /* Shift and mask VAL into position with the word. */
23545 val = convert_modes (SImode, mode, val, 1);
23546 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23547 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23548
23549 /* Prepare to adjust the return value. */
23550 retval = gen_reg_rtx (SImode);
23551 mode = SImode;
23552 }
23553
23554 mem = rs6000_pre_atomic_barrier (mem, model);
23555
23556 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23557 emit_label (XEXP (label, 0));
23558
23559 emit_load_locked (mode, retval, mem);
23560
23561 x = val;
23562 if (mask)
23563 x = rs6000_mask_atomic_subword (retval, val, mask);
23564
23565 cond = gen_reg_rtx (CCmode);
23566 emit_store_conditional (mode, cond, mem, x);
23567
23568 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23569 emit_unlikely_jump (x, label);
23570
23571 rs6000_post_atomic_barrier (model);
23572
23573 if (shift)
23574 rs6000_finish_atomic_subword (operands[0], retval, shift);
23575 }
23576
23577 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23578 to perform. MEM is the memory on which to operate. VAL is the second
23579 operand of the binary operator. BEFORE and AFTER are optional locations to
23580 return the value of MEM either before of after the operation. MODEL_RTX
23581 is a CONST_INT containing the memory model to use. */
23582
23583 void
23584 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23585 rtx orig_before, rtx orig_after, rtx model_rtx)
23586 {
23587 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23588 machine_mode mode = GET_MODE (mem);
23589 machine_mode store_mode = mode;
23590 rtx label, x, cond, mask, shift;
23591 rtx before = orig_before, after = orig_after;
23592
23593 mask = shift = NULL_RTX;
23594 /* On power8, we want to use SImode for the operation. On previous systems,
23595 use the operation in a subword and shift/mask to get the proper byte or
23596 halfword. */
23597 if (mode == QImode || mode == HImode)
23598 {
23599 if (TARGET_SYNC_HI_QI)
23600 {
23601 val = convert_modes (SImode, mode, val, 1);
23602
23603 /* Prepare to adjust the return value. */
23604 before = gen_reg_rtx (SImode);
23605 if (after)
23606 after = gen_reg_rtx (SImode);
23607 mode = SImode;
23608 }
23609 else
23610 {
23611 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23612
23613 /* Shift and mask VAL into position with the word. */
23614 val = convert_modes (SImode, mode, val, 1);
23615 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23616 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23617
23618 switch (code)
23619 {
23620 case IOR:
23621 case XOR:
23622 /* We've already zero-extended VAL. That is sufficient to
23623 make certain that it does not affect other bits. */
23624 mask = NULL;
23625 break;
23626
23627 case AND:
23628 /* If we make certain that all of the other bits in VAL are
23629 set, that will be sufficient to not affect other bits. */
23630 x = gen_rtx_NOT (SImode, mask);
23631 x = gen_rtx_IOR (SImode, x, val);
23632 emit_insn (gen_rtx_SET (val, x));
23633 mask = NULL;
23634 break;
23635
23636 case NOT:
23637 case PLUS:
23638 case MINUS:
23639 /* These will all affect bits outside the field and need
23640 adjustment via MASK within the loop. */
23641 break;
23642
23643 default:
23644 gcc_unreachable ();
23645 }
23646
23647 /* Prepare to adjust the return value. */
23648 before = gen_reg_rtx (SImode);
23649 if (after)
23650 after = gen_reg_rtx (SImode);
23651 store_mode = mode = SImode;
23652 }
23653 }
23654
23655 mem = rs6000_pre_atomic_barrier (mem, model);
23656
23657 label = gen_label_rtx ();
23658 emit_label (label);
23659 label = gen_rtx_LABEL_REF (VOIDmode, label);
23660
23661 if (before == NULL_RTX)
23662 before = gen_reg_rtx (mode);
23663
23664 emit_load_locked (mode, before, mem);
23665
23666 if (code == NOT)
23667 {
23668 x = expand_simple_binop (mode, AND, before, val,
23669 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23670 after = expand_simple_unop (mode, NOT, x, after, 1);
23671 }
23672 else
23673 {
23674 after = expand_simple_binop (mode, code, before, val,
23675 after, 1, OPTAB_LIB_WIDEN);
23676 }
23677
23678 x = after;
23679 if (mask)
23680 {
23681 x = expand_simple_binop (SImode, AND, after, mask,
23682 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23683 x = rs6000_mask_atomic_subword (before, x, mask);
23684 }
23685 else if (store_mode != mode)
23686 x = convert_modes (store_mode, mode, x, 1);
23687
23688 cond = gen_reg_rtx (CCmode);
23689 emit_store_conditional (store_mode, cond, mem, x);
23690
23691 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23692 emit_unlikely_jump (x, label);
23693
23694 rs6000_post_atomic_barrier (model);
23695
23696 if (shift)
23697 {
23698 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23699 then do the calcuations in a SImode register. */
23700 if (orig_before)
23701 rs6000_finish_atomic_subword (orig_before, before, shift);
23702 if (orig_after)
23703 rs6000_finish_atomic_subword (orig_after, after, shift);
23704 }
23705 else if (store_mode != mode)
23706 {
23707 /* QImode/HImode on machines with lbarx/lharx where we do the native
23708 operation and then do the calcuations in a SImode register. */
23709 if (orig_before)
23710 convert_move (orig_before, before, 1);
23711 if (orig_after)
23712 convert_move (orig_after, after, 1);
23713 }
23714 else if (orig_after && after != orig_after)
23715 emit_move_insn (orig_after, after);
23716 }
23717
23718 /* Emit instructions to move SRC to DST. Called by splitters for
23719 multi-register moves. It will emit at most one instruction for
23720 each register that is accessed; that is, it won't emit li/lis pairs
23721 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23722 register. */
23723
23724 void
23725 rs6000_split_multireg_move (rtx dst, rtx src)
23726 {
23727 /* The register number of the first register being moved. */
23728 int reg;
23729 /* The mode that is to be moved. */
23730 machine_mode mode;
23731 /* The mode that the move is being done in, and its size. */
23732 machine_mode reg_mode;
23733 int reg_mode_size;
23734 /* The number of registers that will be moved. */
23735 int nregs;
23736
23737 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23738 mode = GET_MODE (dst);
23739 nregs = hard_regno_nregs (reg, mode);
23740 if (FP_REGNO_P (reg))
23741 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23742 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23743 else if (ALTIVEC_REGNO_P (reg))
23744 reg_mode = V16QImode;
23745 else
23746 reg_mode = word_mode;
23747 reg_mode_size = GET_MODE_SIZE (reg_mode);
23748
23749 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23750
23751 /* TDmode residing in FP registers is special, since the ISA requires that
23752 the lower-numbered word of a register pair is always the most significant
23753 word, even in little-endian mode. This does not match the usual subreg
23754 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23755 the appropriate constituent registers "by hand" in little-endian mode.
23756
23757 Note we do not need to check for destructive overlap here since TDmode
23758 can only reside in even/odd register pairs. */
23759 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23760 {
23761 rtx p_src, p_dst;
23762 int i;
23763
23764 for (i = 0; i < nregs; i++)
23765 {
23766 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23767 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23768 else
23769 p_src = simplify_gen_subreg (reg_mode, src, mode,
23770 i * reg_mode_size);
23771
23772 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23773 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23774 else
23775 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23776 i * reg_mode_size);
23777
23778 emit_insn (gen_rtx_SET (p_dst, p_src));
23779 }
23780
23781 return;
23782 }
23783
23784 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23785 {
23786 /* Move register range backwards, if we might have destructive
23787 overlap. */
23788 int i;
23789 for (i = nregs - 1; i >= 0; i--)
23790 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23791 i * reg_mode_size),
23792 simplify_gen_subreg (reg_mode, src, mode,
23793 i * reg_mode_size)));
23794 }
23795 else
23796 {
23797 int i;
23798 int j = -1;
23799 bool used_update = false;
23800 rtx restore_basereg = NULL_RTX;
23801
23802 if (MEM_P (src) && INT_REGNO_P (reg))
23803 {
23804 rtx breg;
23805
23806 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23807 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23808 {
23809 rtx delta_rtx;
23810 breg = XEXP (XEXP (src, 0), 0);
23811 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23812 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23813 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23814 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23815 src = replace_equiv_address (src, breg);
23816 }
23817 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23818 {
23819 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23820 {
23821 rtx basereg = XEXP (XEXP (src, 0), 0);
23822 if (TARGET_UPDATE)
23823 {
23824 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23825 emit_insn (gen_rtx_SET (ndst,
23826 gen_rtx_MEM (reg_mode,
23827 XEXP (src, 0))));
23828 used_update = true;
23829 }
23830 else
23831 emit_insn (gen_rtx_SET (basereg,
23832 XEXP (XEXP (src, 0), 1)));
23833 src = replace_equiv_address (src, basereg);
23834 }
23835 else
23836 {
23837 rtx basereg = gen_rtx_REG (Pmode, reg);
23838 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23839 src = replace_equiv_address (src, basereg);
23840 }
23841 }
23842
23843 breg = XEXP (src, 0);
23844 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23845 breg = XEXP (breg, 0);
23846
23847 /* If the base register we are using to address memory is
23848 also a destination reg, then change that register last. */
23849 if (REG_P (breg)
23850 && REGNO (breg) >= REGNO (dst)
23851 && REGNO (breg) < REGNO (dst) + nregs)
23852 j = REGNO (breg) - REGNO (dst);
23853 }
23854 else if (MEM_P (dst) && INT_REGNO_P (reg))
23855 {
23856 rtx breg;
23857
23858 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23859 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23860 {
23861 rtx delta_rtx;
23862 breg = XEXP (XEXP (dst, 0), 0);
23863 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23864 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23865 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23866
23867 /* We have to update the breg before doing the store.
23868 Use store with update, if available. */
23869
23870 if (TARGET_UPDATE)
23871 {
23872 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23873 emit_insn (TARGET_32BIT
23874 ? (TARGET_POWERPC64
23875 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23876 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23877 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23878 used_update = true;
23879 }
23880 else
23881 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23882 dst = replace_equiv_address (dst, breg);
23883 }
23884 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23885 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23886 {
23887 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23888 {
23889 rtx basereg = XEXP (XEXP (dst, 0), 0);
23890 if (TARGET_UPDATE)
23891 {
23892 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23893 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23894 XEXP (dst, 0)),
23895 nsrc));
23896 used_update = true;
23897 }
23898 else
23899 emit_insn (gen_rtx_SET (basereg,
23900 XEXP (XEXP (dst, 0), 1)));
23901 dst = replace_equiv_address (dst, basereg);
23902 }
23903 else
23904 {
23905 rtx basereg = XEXP (XEXP (dst, 0), 0);
23906 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23907 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23908 && REG_P (basereg)
23909 && REG_P (offsetreg)
23910 && REGNO (basereg) != REGNO (offsetreg));
23911 if (REGNO (basereg) == 0)
23912 {
23913 rtx tmp = offsetreg;
23914 offsetreg = basereg;
23915 basereg = tmp;
23916 }
23917 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23918 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23919 dst = replace_equiv_address (dst, basereg);
23920 }
23921 }
23922 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23923 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23924 }
23925
23926 for (i = 0; i < nregs; i++)
23927 {
23928 /* Calculate index to next subword. */
23929 ++j;
23930 if (j == nregs)
23931 j = 0;
23932
23933 /* If compiler already emitted move of first word by
23934 store with update, no need to do anything. */
23935 if (j == 0 && used_update)
23936 continue;
23937
23938 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23939 j * reg_mode_size),
23940 simplify_gen_subreg (reg_mode, src, mode,
23941 j * reg_mode_size)));
23942 }
23943 if (restore_basereg != NULL_RTX)
23944 emit_insn (restore_basereg);
23945 }
23946 }
23947
23948 \f
23949 /* This page contains routines that are used to determine what the
23950 function prologue and epilogue code will do and write them out. */
23951
23952 /* Determine whether the REG is really used. */
23953
23954 static bool
23955 save_reg_p (int reg)
23956 {
23957 /* We need to mark the PIC offset register live for the same conditions
23958 as it is set up, or otherwise it won't be saved before we clobber it. */
23959
23960 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23961 {
23962 /* When calling eh_return, we must return true for all the cases
23963 where conditional_register_usage marks the PIC offset reg
23964 call used. */
23965 if (TARGET_TOC && TARGET_MINIMAL_TOC
23966 && (crtl->calls_eh_return
23967 || df_regs_ever_live_p (reg)
23968 || !constant_pool_empty_p ()))
23969 return true;
23970
23971 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23972 && flag_pic && crtl->uses_pic_offset_table)
23973 return true;
23974 }
23975
23976 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23977 }
23978
23979 /* Return the first fixed-point register that is required to be
23980 saved. 32 if none. */
23981
23982 int
23983 first_reg_to_save (void)
23984 {
23985 int first_reg;
23986
23987 /* Find lowest numbered live register. */
23988 for (first_reg = 13; first_reg <= 31; first_reg++)
23989 if (save_reg_p (first_reg))
23990 break;
23991
23992 return first_reg;
23993 }
23994
23995 /* Similar, for FP regs. */
23996
23997 int
23998 first_fp_reg_to_save (void)
23999 {
24000 int first_reg;
24001
24002 /* Find lowest numbered live register. */
24003 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24004 if (save_reg_p (first_reg))
24005 break;
24006
24007 return first_reg;
24008 }
24009
24010 /* Similar, for AltiVec regs. */
24011
24012 static int
24013 first_altivec_reg_to_save (void)
24014 {
24015 int i;
24016
24017 /* Stack frame remains as is unless we are in AltiVec ABI. */
24018 if (! TARGET_ALTIVEC_ABI)
24019 return LAST_ALTIVEC_REGNO + 1;
24020
24021 /* On Darwin, the unwind routines are compiled without
24022 TARGET_ALTIVEC, and use save_world to save/restore the
24023 altivec registers when necessary. */
24024 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24025 && ! TARGET_ALTIVEC)
24026 return FIRST_ALTIVEC_REGNO + 20;
24027
24028 /* Find lowest numbered live register. */
24029 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24030 if (save_reg_p (i))
24031 break;
24032
24033 return i;
24034 }
24035
24036 /* Return a 32-bit mask of the AltiVec registers we need to set in
24037 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24038 the 32-bit word is 0. */
24039
24040 static unsigned int
24041 compute_vrsave_mask (void)
24042 {
24043 unsigned int i, mask = 0;
24044
24045 /* On Darwin, the unwind routines are compiled without
24046 TARGET_ALTIVEC, and use save_world to save/restore the
24047 call-saved altivec registers when necessary. */
24048 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24049 && ! TARGET_ALTIVEC)
24050 mask |= 0xFFF;
24051
24052 /* First, find out if we use _any_ altivec registers. */
24053 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24054 if (df_regs_ever_live_p (i))
24055 mask |= ALTIVEC_REG_BIT (i);
24056
24057 if (mask == 0)
24058 return mask;
24059
24060 /* Next, remove the argument registers from the set. These must
24061 be in the VRSAVE mask set by the caller, so we don't need to add
24062 them in again. More importantly, the mask we compute here is
24063 used to generate CLOBBERs in the set_vrsave insn, and we do not
24064 wish the argument registers to die. */
24065 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24066 mask &= ~ALTIVEC_REG_BIT (i);
24067
24068 /* Similarly, remove the return value from the set. */
24069 {
24070 bool yes = false;
24071 diddle_return_value (is_altivec_return_reg, &yes);
24072 if (yes)
24073 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24074 }
24075
24076 return mask;
24077 }
24078
24079 /* For a very restricted set of circumstances, we can cut down the
24080 size of prologues/epilogues by calling our own save/restore-the-world
24081 routines. */
24082
24083 static void
24084 compute_save_world_info (rs6000_stack_t *info)
24085 {
24086 info->world_save_p = 1;
24087 info->world_save_p
24088 = (WORLD_SAVE_P (info)
24089 && DEFAULT_ABI == ABI_DARWIN
24090 && !cfun->has_nonlocal_label
24091 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24092 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24093 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24094 && info->cr_save_p);
24095
24096 /* This will not work in conjunction with sibcalls. Make sure there
24097 are none. (This check is expensive, but seldom executed.) */
24098 if (WORLD_SAVE_P (info))
24099 {
24100 rtx_insn *insn;
24101 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24102 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24103 {
24104 info->world_save_p = 0;
24105 break;
24106 }
24107 }
24108
24109 if (WORLD_SAVE_P (info))
24110 {
24111 /* Even if we're not touching VRsave, make sure there's room on the
24112 stack for it, if it looks like we're calling SAVE_WORLD, which
24113 will attempt to save it. */
24114 info->vrsave_size = 4;
24115
24116 /* If we are going to save the world, we need to save the link register too. */
24117 info->lr_save_p = 1;
24118
24119 /* "Save" the VRsave register too if we're saving the world. */
24120 if (info->vrsave_mask == 0)
24121 info->vrsave_mask = compute_vrsave_mask ();
24122
24123 /* Because the Darwin register save/restore routines only handle
24124 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24125 check. */
24126 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24127 && (info->first_altivec_reg_save
24128 >= FIRST_SAVED_ALTIVEC_REGNO));
24129 }
24130
24131 return;
24132 }
24133
24134
24135 static void
24136 is_altivec_return_reg (rtx reg, void *xyes)
24137 {
24138 bool *yes = (bool *) xyes;
24139 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24140 *yes = true;
24141 }
24142
24143 \f
24144 /* Return whether REG is a global user reg or has been specifed by
24145 -ffixed-REG. We should not restore these, and so cannot use
24146 lmw or out-of-line restore functions if there are any. We also
24147 can't save them (well, emit frame notes for them), because frame
24148 unwinding during exception handling will restore saved registers. */
24149
24150 static bool
24151 fixed_reg_p (int reg)
24152 {
24153 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24154 backend sets it, overriding anything the user might have given. */
24155 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24156 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24157 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24158 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24159 return false;
24160
24161 return fixed_regs[reg];
24162 }
24163
24164 /* Determine the strategy for savings/restoring registers. */
24165
24166 enum {
24167 SAVE_MULTIPLE = 0x1,
24168 SAVE_INLINE_GPRS = 0x2,
24169 SAVE_INLINE_FPRS = 0x4,
24170 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24171 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24172 SAVE_INLINE_VRS = 0x20,
24173 REST_MULTIPLE = 0x100,
24174 REST_INLINE_GPRS = 0x200,
24175 REST_INLINE_FPRS = 0x400,
24176 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24177 REST_INLINE_VRS = 0x1000
24178 };
24179
24180 static int
24181 rs6000_savres_strategy (rs6000_stack_t *info,
24182 bool using_static_chain_p)
24183 {
24184 int strategy = 0;
24185
24186 /* Select between in-line and out-of-line save and restore of regs.
24187 First, all the obvious cases where we don't use out-of-line. */
24188 if (crtl->calls_eh_return
24189 || cfun->machine->ra_need_lr)
24190 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24191 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24192 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24193
24194 if (info->first_gp_reg_save == 32)
24195 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24196
24197 if (info->first_fp_reg_save == 64)
24198 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24199
24200 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24201 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24202
24203 /* Define cutoff for using out-of-line functions to save registers. */
24204 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24205 {
24206 if (!optimize_size)
24207 {
24208 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24209 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24210 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24211 }
24212 else
24213 {
24214 /* Prefer out-of-line restore if it will exit. */
24215 if (info->first_fp_reg_save > 61)
24216 strategy |= SAVE_INLINE_FPRS;
24217 if (info->first_gp_reg_save > 29)
24218 {
24219 if (info->first_fp_reg_save == 64)
24220 strategy |= SAVE_INLINE_GPRS;
24221 else
24222 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24223 }
24224 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24225 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24226 }
24227 }
24228 else if (DEFAULT_ABI == ABI_DARWIN)
24229 {
24230 if (info->first_fp_reg_save > 60)
24231 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24232 if (info->first_gp_reg_save > 29)
24233 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24234 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24235 }
24236 else
24237 {
24238 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24239 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24240 || info->first_fp_reg_save > 61)
24241 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24242 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24243 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24244 }
24245
24246 /* Don't bother to try to save things out-of-line if r11 is occupied
24247 by the static chain. It would require too much fiddling and the
24248 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24249 pointer on Darwin, and AIX uses r1 or r12. */
24250 if (using_static_chain_p
24251 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24252 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24253 | SAVE_INLINE_GPRS
24254 | SAVE_INLINE_VRS);
24255
24256 /* Don't ever restore fixed regs. That means we can't use the
24257 out-of-line register restore functions if a fixed reg is in the
24258 range of regs restored. */
24259 if (!(strategy & REST_INLINE_FPRS))
24260 for (int i = info->first_fp_reg_save; i < 64; i++)
24261 if (fixed_regs[i])
24262 {
24263 strategy |= REST_INLINE_FPRS;
24264 break;
24265 }
24266
24267 /* We can only use the out-of-line routines to restore fprs if we've
24268 saved all the registers from first_fp_reg_save in the prologue.
24269 Otherwise, we risk loading garbage. Of course, if we have saved
24270 out-of-line then we know we haven't skipped any fprs. */
24271 if ((strategy & SAVE_INLINE_FPRS)
24272 && !(strategy & REST_INLINE_FPRS))
24273 for (int i = info->first_fp_reg_save; i < 64; i++)
24274 if (!save_reg_p (i))
24275 {
24276 strategy |= REST_INLINE_FPRS;
24277 break;
24278 }
24279
24280 /* Similarly, for altivec regs. */
24281 if (!(strategy & REST_INLINE_VRS))
24282 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24283 if (fixed_regs[i])
24284 {
24285 strategy |= REST_INLINE_VRS;
24286 break;
24287 }
24288
24289 if ((strategy & SAVE_INLINE_VRS)
24290 && !(strategy & REST_INLINE_VRS))
24291 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24292 if (!save_reg_p (i))
24293 {
24294 strategy |= REST_INLINE_VRS;
24295 break;
24296 }
24297
24298 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24299 saved is an out-of-line save or restore. Set up the value for
24300 the next test (excluding out-of-line gprs). */
24301 bool lr_save_p = (info->lr_save_p
24302 || !(strategy & SAVE_INLINE_FPRS)
24303 || !(strategy & SAVE_INLINE_VRS)
24304 || !(strategy & REST_INLINE_FPRS)
24305 || !(strategy & REST_INLINE_VRS));
24306
24307 if (TARGET_MULTIPLE
24308 && !TARGET_POWERPC64
24309 && info->first_gp_reg_save < 31
24310 && !(flag_shrink_wrap
24311 && flag_shrink_wrap_separate
24312 && optimize_function_for_speed_p (cfun)))
24313 {
24314 int count = 0;
24315 for (int i = info->first_gp_reg_save; i < 32; i++)
24316 if (save_reg_p (i))
24317 count++;
24318
24319 if (count <= 1)
24320 /* Don't use store multiple if only one reg needs to be
24321 saved. This can occur for example when the ABI_V4 pic reg
24322 (r30) needs to be saved to make calls, but r31 is not
24323 used. */
24324 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24325 else
24326 {
24327 /* Prefer store multiple for saves over out-of-line
24328 routines, since the store-multiple instruction will
24329 always be smaller. */
24330 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24331
24332 /* The situation is more complicated with load multiple.
24333 We'd prefer to use the out-of-line routines for restores,
24334 since the "exit" out-of-line routines can handle the
24335 restore of LR and the frame teardown. However if doesn't
24336 make sense to use the out-of-line routine if that is the
24337 only reason we'd need to save LR, and we can't use the
24338 "exit" out-of-line gpr restore if we have saved some
24339 fprs; In those cases it is advantageous to use load
24340 multiple when available. */
24341 if (info->first_fp_reg_save != 64 || !lr_save_p)
24342 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24343 }
24344 }
24345
24346 /* Using the "exit" out-of-line routine does not improve code size
24347 if using it would require lr to be saved and if only saving one
24348 or two gprs. */
24349 else if (!lr_save_p && info->first_gp_reg_save > 29)
24350 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24351
24352 /* Don't ever restore fixed regs. */
24353 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24354 for (int i = info->first_gp_reg_save; i < 32; i++)
24355 if (fixed_reg_p (i))
24356 {
24357 strategy |= REST_INLINE_GPRS;
24358 strategy &= ~REST_MULTIPLE;
24359 break;
24360 }
24361
24362 /* We can only use load multiple or the out-of-line routines to
24363 restore gprs if we've saved all the registers from
24364 first_gp_reg_save. Otherwise, we risk loading garbage.
24365 Of course, if we have saved out-of-line or used stmw then we know
24366 we haven't skipped any gprs. */
24367 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24368 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24369 for (int i = info->first_gp_reg_save; i < 32; i++)
24370 if (!save_reg_p (i))
24371 {
24372 strategy |= REST_INLINE_GPRS;
24373 strategy &= ~REST_MULTIPLE;
24374 break;
24375 }
24376
24377 if (TARGET_ELF && TARGET_64BIT)
24378 {
24379 if (!(strategy & SAVE_INLINE_FPRS))
24380 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24381 else if (!(strategy & SAVE_INLINE_GPRS)
24382 && info->first_fp_reg_save == 64)
24383 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24384 }
24385 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24386 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24387
24388 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24389 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24390
24391 return strategy;
24392 }
24393
24394 /* Calculate the stack information for the current function. This is
24395 complicated by having two separate calling sequences, the AIX calling
24396 sequence and the V.4 calling sequence.
24397
24398 AIX (and Darwin/Mac OS X) stack frames look like:
24399 32-bit 64-bit
24400 SP----> +---------------------------------------+
24401 | back chain to caller | 0 0
24402 +---------------------------------------+
24403 | saved CR | 4 8 (8-11)
24404 +---------------------------------------+
24405 | saved LR | 8 16
24406 +---------------------------------------+
24407 | reserved for compilers | 12 24
24408 +---------------------------------------+
24409 | reserved for binders | 16 32
24410 +---------------------------------------+
24411 | saved TOC pointer | 20 40
24412 +---------------------------------------+
24413 | Parameter save area (+padding*) (P) | 24 48
24414 +---------------------------------------+
24415 | Alloca space (A) | 24+P etc.
24416 +---------------------------------------+
24417 | Local variable space (L) | 24+P+A
24418 +---------------------------------------+
24419 | Float/int conversion temporary (X) | 24+P+A+L
24420 +---------------------------------------+
24421 | Save area for AltiVec registers (W) | 24+P+A+L+X
24422 +---------------------------------------+
24423 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24424 +---------------------------------------+
24425 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24426 +---------------------------------------+
24427 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24428 +---------------------------------------+
24429 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24430 +---------------------------------------+
24431 old SP->| back chain to caller's caller |
24432 +---------------------------------------+
24433
24434 * If the alloca area is present, the parameter save area is
24435 padded so that the former starts 16-byte aligned.
24436
24437 The required alignment for AIX configurations is two words (i.e., 8
24438 or 16 bytes).
24439
24440 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24441
24442 SP----> +---------------------------------------+
24443 | Back chain to caller | 0
24444 +---------------------------------------+
24445 | Save area for CR | 8
24446 +---------------------------------------+
24447 | Saved LR | 16
24448 +---------------------------------------+
24449 | Saved TOC pointer | 24
24450 +---------------------------------------+
24451 | Parameter save area (+padding*) (P) | 32
24452 +---------------------------------------+
24453 | Alloca space (A) | 32+P
24454 +---------------------------------------+
24455 | Local variable space (L) | 32+P+A
24456 +---------------------------------------+
24457 | Save area for AltiVec registers (W) | 32+P+A+L
24458 +---------------------------------------+
24459 | AltiVec alignment padding (Y) | 32+P+A+L+W
24460 +---------------------------------------+
24461 | Save area for GP registers (G) | 32+P+A+L+W+Y
24462 +---------------------------------------+
24463 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24464 +---------------------------------------+
24465 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24466 +---------------------------------------+
24467
24468 * If the alloca area is present, the parameter save area is
24469 padded so that the former starts 16-byte aligned.
24470
24471 V.4 stack frames look like:
24472
24473 SP----> +---------------------------------------+
24474 | back chain to caller | 0
24475 +---------------------------------------+
24476 | caller's saved LR | 4
24477 +---------------------------------------+
24478 | Parameter save area (+padding*) (P) | 8
24479 +---------------------------------------+
24480 | Alloca space (A) | 8+P
24481 +---------------------------------------+
24482 | Varargs save area (V) | 8+P+A
24483 +---------------------------------------+
24484 | Local variable space (L) | 8+P+A+V
24485 +---------------------------------------+
24486 | Float/int conversion temporary (X) | 8+P+A+V+L
24487 +---------------------------------------+
24488 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24489 +---------------------------------------+
24490 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24491 +---------------------------------------+
24492 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24493 +---------------------------------------+
24494 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24495 +---------------------------------------+
24496 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24497 +---------------------------------------+
24498 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24499 +---------------------------------------+
24500 old SP->| back chain to caller's caller |
24501 +---------------------------------------+
24502
24503 * If the alloca area is present and the required alignment is
24504 16 bytes, the parameter save area is padded so that the
24505 alloca area starts 16-byte aligned.
24506
24507 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24508 given. (But note below and in sysv4.h that we require only 8 and
24509 may round up the size of our stack frame anyways. The historical
24510 reason is early versions of powerpc-linux which didn't properly
24511 align the stack at program startup. A happy side-effect is that
24512 -mno-eabi libraries can be used with -meabi programs.)
24513
24514 The EABI configuration defaults to the V.4 layout. However,
24515 the stack alignment requirements may differ. If -mno-eabi is not
24516 given, the required stack alignment is 8 bytes; if -mno-eabi is
24517 given, the required alignment is 16 bytes. (But see V.4 comment
24518 above.) */
24519
24520 #ifndef ABI_STACK_BOUNDARY
24521 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24522 #endif
24523
24524 static rs6000_stack_t *
24525 rs6000_stack_info (void)
24526 {
24527 /* We should never be called for thunks, we are not set up for that. */
24528 gcc_assert (!cfun->is_thunk);
24529
24530 rs6000_stack_t *info = &stack_info;
24531 int reg_size = TARGET_32BIT ? 4 : 8;
24532 int ehrd_size;
24533 int ehcr_size;
24534 int save_align;
24535 int first_gp;
24536 HOST_WIDE_INT non_fixed_size;
24537 bool using_static_chain_p;
24538
24539 if (reload_completed && info->reload_completed)
24540 return info;
24541
24542 memset (info, 0, sizeof (*info));
24543 info->reload_completed = reload_completed;
24544
24545 /* Select which calling sequence. */
24546 info->abi = DEFAULT_ABI;
24547
24548 /* Calculate which registers need to be saved & save area size. */
24549 info->first_gp_reg_save = first_reg_to_save ();
24550 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24551 even if it currently looks like we won't. Reload may need it to
24552 get at a constant; if so, it will have already created a constant
24553 pool entry for it. */
24554 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24555 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24556 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24557 && crtl->uses_const_pool
24558 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24559 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24560 else
24561 first_gp = info->first_gp_reg_save;
24562
24563 info->gp_size = reg_size * (32 - first_gp);
24564
24565 info->first_fp_reg_save = first_fp_reg_to_save ();
24566 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24567
24568 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24569 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24570 - info->first_altivec_reg_save);
24571
24572 /* Does this function call anything? */
24573 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24574
24575 /* Determine if we need to save the condition code registers. */
24576 if (save_reg_p (CR2_REGNO)
24577 || save_reg_p (CR3_REGNO)
24578 || save_reg_p (CR4_REGNO))
24579 {
24580 info->cr_save_p = 1;
24581 if (DEFAULT_ABI == ABI_V4)
24582 info->cr_size = reg_size;
24583 }
24584
24585 /* If the current function calls __builtin_eh_return, then we need
24586 to allocate stack space for registers that will hold data for
24587 the exception handler. */
24588 if (crtl->calls_eh_return)
24589 {
24590 unsigned int i;
24591 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24592 continue;
24593
24594 ehrd_size = i * UNITS_PER_WORD;
24595 }
24596 else
24597 ehrd_size = 0;
24598
24599 /* In the ELFv2 ABI, we also need to allocate space for separate
24600 CR field save areas if the function calls __builtin_eh_return. */
24601 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24602 {
24603 /* This hard-codes that we have three call-saved CR fields. */
24604 ehcr_size = 3 * reg_size;
24605 /* We do *not* use the regular CR save mechanism. */
24606 info->cr_save_p = 0;
24607 }
24608 else
24609 ehcr_size = 0;
24610
24611 /* Determine various sizes. */
24612 info->reg_size = reg_size;
24613 info->fixed_size = RS6000_SAVE_AREA;
24614 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24615 if (cfun->calls_alloca)
24616 info->parm_size =
24617 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24618 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24619 else
24620 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24621 TARGET_ALTIVEC ? 16 : 8);
24622 if (FRAME_GROWS_DOWNWARD)
24623 info->vars_size
24624 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24625 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24626 - (info->fixed_size + info->vars_size + info->parm_size);
24627
24628 if (TARGET_ALTIVEC_ABI)
24629 info->vrsave_mask = compute_vrsave_mask ();
24630
24631 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24632 info->vrsave_size = 4;
24633
24634 compute_save_world_info (info);
24635
24636 /* Calculate the offsets. */
24637 switch (DEFAULT_ABI)
24638 {
24639 case ABI_NONE:
24640 default:
24641 gcc_unreachable ();
24642
24643 case ABI_AIX:
24644 case ABI_ELFv2:
24645 case ABI_DARWIN:
24646 info->fp_save_offset = -info->fp_size;
24647 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24648
24649 if (TARGET_ALTIVEC_ABI)
24650 {
24651 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24652
24653 /* Align stack so vector save area is on a quadword boundary.
24654 The padding goes above the vectors. */
24655 if (info->altivec_size != 0)
24656 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24657
24658 info->altivec_save_offset = info->vrsave_save_offset
24659 - info->altivec_padding_size
24660 - info->altivec_size;
24661 gcc_assert (info->altivec_size == 0
24662 || info->altivec_save_offset % 16 == 0);
24663
24664 /* Adjust for AltiVec case. */
24665 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24666 }
24667 else
24668 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24669
24670 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24671 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24672 info->lr_save_offset = 2*reg_size;
24673 break;
24674
24675 case ABI_V4:
24676 info->fp_save_offset = -info->fp_size;
24677 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24678 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24679
24680 if (TARGET_ALTIVEC_ABI)
24681 {
24682 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24683
24684 /* Align stack so vector save area is on a quadword boundary. */
24685 if (info->altivec_size != 0)
24686 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24687
24688 info->altivec_save_offset = info->vrsave_save_offset
24689 - info->altivec_padding_size
24690 - info->altivec_size;
24691
24692 /* Adjust for AltiVec case. */
24693 info->ehrd_offset = info->altivec_save_offset;
24694 }
24695 else
24696 info->ehrd_offset = info->cr_save_offset;
24697
24698 info->ehrd_offset -= ehrd_size;
24699 info->lr_save_offset = reg_size;
24700 }
24701
24702 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24703 info->save_size = RS6000_ALIGN (info->fp_size
24704 + info->gp_size
24705 + info->altivec_size
24706 + info->altivec_padding_size
24707 + ehrd_size
24708 + ehcr_size
24709 + info->cr_size
24710 + info->vrsave_size,
24711 save_align);
24712
24713 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24714
24715 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24716 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24717
24718 /* Determine if we need to save the link register. */
24719 if (info->calls_p
24720 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24721 && crtl->profile
24722 && !TARGET_PROFILE_KERNEL)
24723 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24724 #ifdef TARGET_RELOCATABLE
24725 || (DEFAULT_ABI == ABI_V4
24726 && (TARGET_RELOCATABLE || flag_pic > 1)
24727 && !constant_pool_empty_p ())
24728 #endif
24729 || rs6000_ra_ever_killed ())
24730 info->lr_save_p = 1;
24731
24732 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24733 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24734 && call_used_regs[STATIC_CHAIN_REGNUM]);
24735 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24736
24737 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24738 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24739 || !(info->savres_strategy & SAVE_INLINE_VRS)
24740 || !(info->savres_strategy & REST_INLINE_GPRS)
24741 || !(info->savres_strategy & REST_INLINE_FPRS)
24742 || !(info->savres_strategy & REST_INLINE_VRS))
24743 info->lr_save_p = 1;
24744
24745 if (info->lr_save_p)
24746 df_set_regs_ever_live (LR_REGNO, true);
24747
24748 /* Determine if we need to allocate any stack frame:
24749
24750 For AIX we need to push the stack if a frame pointer is needed
24751 (because the stack might be dynamically adjusted), if we are
24752 debugging, if we make calls, or if the sum of fp_save, gp_save,
24753 and local variables are more than the space needed to save all
24754 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24755 + 18*8 = 288 (GPR13 reserved).
24756
24757 For V.4 we don't have the stack cushion that AIX uses, but assume
24758 that the debugger can handle stackless frames. */
24759
24760 if (info->calls_p)
24761 info->push_p = 1;
24762
24763 else if (DEFAULT_ABI == ABI_V4)
24764 info->push_p = non_fixed_size != 0;
24765
24766 else if (frame_pointer_needed)
24767 info->push_p = 1;
24768
24769 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24770 info->push_p = 1;
24771
24772 else
24773 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24774
24775 return info;
24776 }
24777
24778 static void
24779 debug_stack_info (rs6000_stack_t *info)
24780 {
24781 const char *abi_string;
24782
24783 if (! info)
24784 info = rs6000_stack_info ();
24785
24786 fprintf (stderr, "\nStack information for function %s:\n",
24787 ((current_function_decl && DECL_NAME (current_function_decl))
24788 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24789 : "<unknown>"));
24790
24791 switch (info->abi)
24792 {
24793 default: abi_string = "Unknown"; break;
24794 case ABI_NONE: abi_string = "NONE"; break;
24795 case ABI_AIX: abi_string = "AIX"; break;
24796 case ABI_ELFv2: abi_string = "ELFv2"; break;
24797 case ABI_DARWIN: abi_string = "Darwin"; break;
24798 case ABI_V4: abi_string = "V.4"; break;
24799 }
24800
24801 fprintf (stderr, "\tABI = %5s\n", abi_string);
24802
24803 if (TARGET_ALTIVEC_ABI)
24804 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24805
24806 if (info->first_gp_reg_save != 32)
24807 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24808
24809 if (info->first_fp_reg_save != 64)
24810 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24811
24812 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24813 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24814 info->first_altivec_reg_save);
24815
24816 if (info->lr_save_p)
24817 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24818
24819 if (info->cr_save_p)
24820 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24821
24822 if (info->vrsave_mask)
24823 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24824
24825 if (info->push_p)
24826 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24827
24828 if (info->calls_p)
24829 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24830
24831 if (info->gp_size)
24832 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24833
24834 if (info->fp_size)
24835 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24836
24837 if (info->altivec_size)
24838 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24839 info->altivec_save_offset);
24840
24841 if (info->vrsave_size)
24842 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24843 info->vrsave_save_offset);
24844
24845 if (info->lr_save_p)
24846 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24847
24848 if (info->cr_save_p)
24849 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24850
24851 if (info->varargs_save_offset)
24852 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24853
24854 if (info->total_size)
24855 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24856 info->total_size);
24857
24858 if (info->vars_size)
24859 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24860 info->vars_size);
24861
24862 if (info->parm_size)
24863 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24864
24865 if (info->fixed_size)
24866 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24867
24868 if (info->gp_size)
24869 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24870
24871 if (info->fp_size)
24872 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24873
24874 if (info->altivec_size)
24875 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24876
24877 if (info->vrsave_size)
24878 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24879
24880 if (info->altivec_padding_size)
24881 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24882 info->altivec_padding_size);
24883
24884 if (info->cr_size)
24885 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24886
24887 if (info->save_size)
24888 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24889
24890 if (info->reg_size != 4)
24891 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24892
24893 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24894
24895 fprintf (stderr, "\n");
24896 }
24897
24898 rtx
24899 rs6000_return_addr (int count, rtx frame)
24900 {
24901 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24902 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24903 if (count != 0
24904 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24905 {
24906 cfun->machine->ra_needs_full_frame = 1;
24907
24908 if (count == 0)
24909 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24910 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24911 frame = stack_pointer_rtx;
24912 rtx prev_frame_addr = memory_address (Pmode, frame);
24913 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24914 rtx lr_save_off = plus_constant (Pmode,
24915 prev_frame, RETURN_ADDRESS_OFFSET);
24916 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24917 return gen_rtx_MEM (Pmode, lr_save_addr);
24918 }
24919
24920 cfun->machine->ra_need_lr = 1;
24921 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24922 }
24923
24924 /* Say whether a function is a candidate for sibcall handling or not. */
24925
24926 static bool
24927 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24928 {
24929 tree fntype;
24930
24931 /* The sibcall epilogue may clobber the static chain register.
24932 ??? We could work harder and avoid that, but it's probably
24933 not worth the hassle in practice. */
24934 if (CALL_EXPR_STATIC_CHAIN (exp))
24935 return false;
24936
24937 if (decl)
24938 fntype = TREE_TYPE (decl);
24939 else
24940 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24941
24942 /* We can't do it if the called function has more vector parameters
24943 than the current function; there's nowhere to put the VRsave code. */
24944 if (TARGET_ALTIVEC_ABI
24945 && TARGET_ALTIVEC_VRSAVE
24946 && !(decl && decl == current_function_decl))
24947 {
24948 function_args_iterator args_iter;
24949 tree type;
24950 int nvreg = 0;
24951
24952 /* Functions with vector parameters are required to have a
24953 prototype, so the argument type info must be available
24954 here. */
24955 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24956 if (TREE_CODE (type) == VECTOR_TYPE
24957 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24958 nvreg++;
24959
24960 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24961 if (TREE_CODE (type) == VECTOR_TYPE
24962 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24963 nvreg--;
24964
24965 if (nvreg > 0)
24966 return false;
24967 }
24968
24969 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24970 functions, because the callee may have a different TOC pointer to
24971 the caller and there's no way to ensure we restore the TOC when
24972 we return. With the secure-plt SYSV ABI we can't make non-local
24973 calls when -fpic/PIC because the plt call stubs use r30. */
24974 if (DEFAULT_ABI == ABI_DARWIN
24975 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24976 && decl
24977 && !DECL_EXTERNAL (decl)
24978 && !DECL_WEAK (decl)
24979 && (*targetm.binds_local_p) (decl))
24980 || (DEFAULT_ABI == ABI_V4
24981 && (!TARGET_SECURE_PLT
24982 || !flag_pic
24983 || (decl
24984 && (*targetm.binds_local_p) (decl)))))
24985 {
24986 tree attr_list = TYPE_ATTRIBUTES (fntype);
24987
24988 if (!lookup_attribute ("longcall", attr_list)
24989 || lookup_attribute ("shortcall", attr_list))
24990 return true;
24991 }
24992
24993 return false;
24994 }
24995
24996 static int
24997 rs6000_ra_ever_killed (void)
24998 {
24999 rtx_insn *top;
25000 rtx reg;
25001 rtx_insn *insn;
25002
25003 if (cfun->is_thunk)
25004 return 0;
25005
25006 if (cfun->machine->lr_save_state)
25007 return cfun->machine->lr_save_state - 1;
25008
25009 /* regs_ever_live has LR marked as used if any sibcalls are present,
25010 but this should not force saving and restoring in the
25011 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25012 clobbers LR, so that is inappropriate. */
25013
25014 /* Also, the prologue can generate a store into LR that
25015 doesn't really count, like this:
25016
25017 move LR->R0
25018 bcl to set PIC register
25019 move LR->R31
25020 move R0->LR
25021
25022 When we're called from the epilogue, we need to avoid counting
25023 this as a store. */
25024
25025 push_topmost_sequence ();
25026 top = get_insns ();
25027 pop_topmost_sequence ();
25028 reg = gen_rtx_REG (Pmode, LR_REGNO);
25029
25030 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25031 {
25032 if (INSN_P (insn))
25033 {
25034 if (CALL_P (insn))
25035 {
25036 if (!SIBLING_CALL_P (insn))
25037 return 1;
25038 }
25039 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25040 return 1;
25041 else if (set_of (reg, insn) != NULL_RTX
25042 && !prologue_epilogue_contains (insn))
25043 return 1;
25044 }
25045 }
25046 return 0;
25047 }
25048 \f
25049 /* Emit instructions needed to load the TOC register.
25050 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25051 a constant pool; or for SVR4 -fpic. */
25052
25053 void
25054 rs6000_emit_load_toc_table (int fromprolog)
25055 {
25056 rtx dest;
25057 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25058
25059 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25060 {
25061 char buf[30];
25062 rtx lab, tmp1, tmp2, got;
25063
25064 lab = gen_label_rtx ();
25065 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25066 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25067 if (flag_pic == 2)
25068 {
25069 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25070 need_toc_init = 1;
25071 }
25072 else
25073 got = rs6000_got_sym ();
25074 tmp1 = tmp2 = dest;
25075 if (!fromprolog)
25076 {
25077 tmp1 = gen_reg_rtx (Pmode);
25078 tmp2 = gen_reg_rtx (Pmode);
25079 }
25080 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25081 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25082 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25083 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25084 }
25085 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25086 {
25087 emit_insn (gen_load_toc_v4_pic_si ());
25088 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25089 }
25090 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25091 {
25092 char buf[30];
25093 rtx temp0 = (fromprolog
25094 ? gen_rtx_REG (Pmode, 0)
25095 : gen_reg_rtx (Pmode));
25096
25097 if (fromprolog)
25098 {
25099 rtx symF, symL;
25100
25101 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25102 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25103
25104 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25105 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25106
25107 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25108 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25109 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25110 }
25111 else
25112 {
25113 rtx tocsym, lab;
25114
25115 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25116 need_toc_init = 1;
25117 lab = gen_label_rtx ();
25118 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25119 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25120 if (TARGET_LINK_STACK)
25121 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25122 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25123 }
25124 emit_insn (gen_addsi3 (dest, temp0, dest));
25125 }
25126 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25127 {
25128 /* This is for AIX code running in non-PIC ELF32. */
25129 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25130
25131 need_toc_init = 1;
25132 emit_insn (gen_elf_high (dest, realsym));
25133 emit_insn (gen_elf_low (dest, dest, realsym));
25134 }
25135 else
25136 {
25137 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25138
25139 if (TARGET_32BIT)
25140 emit_insn (gen_load_toc_aix_si (dest));
25141 else
25142 emit_insn (gen_load_toc_aix_di (dest));
25143 }
25144 }
25145
25146 /* Emit instructions to restore the link register after determining where
25147 its value has been stored. */
25148
25149 void
25150 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25151 {
25152 rs6000_stack_t *info = rs6000_stack_info ();
25153 rtx operands[2];
25154
25155 operands[0] = source;
25156 operands[1] = scratch;
25157
25158 if (info->lr_save_p)
25159 {
25160 rtx frame_rtx = stack_pointer_rtx;
25161 HOST_WIDE_INT sp_offset = 0;
25162 rtx tmp;
25163
25164 if (frame_pointer_needed
25165 || cfun->calls_alloca
25166 || info->total_size > 32767)
25167 {
25168 tmp = gen_frame_mem (Pmode, frame_rtx);
25169 emit_move_insn (operands[1], tmp);
25170 frame_rtx = operands[1];
25171 }
25172 else if (info->push_p)
25173 sp_offset = info->total_size;
25174
25175 tmp = plus_constant (Pmode, frame_rtx,
25176 info->lr_save_offset + sp_offset);
25177 tmp = gen_frame_mem (Pmode, tmp);
25178 emit_move_insn (tmp, operands[0]);
25179 }
25180 else
25181 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25182
25183 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25184 state of lr_save_p so any change from here on would be a bug. In
25185 particular, stop rs6000_ra_ever_killed from considering the SET
25186 of lr we may have added just above. */
25187 cfun->machine->lr_save_state = info->lr_save_p + 1;
25188 }
25189
25190 static GTY(()) alias_set_type set = -1;
25191
25192 alias_set_type
25193 get_TOC_alias_set (void)
25194 {
25195 if (set == -1)
25196 set = new_alias_set ();
25197 return set;
25198 }
25199
25200 /* This returns nonzero if the current function uses the TOC. This is
25201 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25202 is generated by the ABI_V4 load_toc_* patterns.
25203 Return 2 instead of 1 if the load_toc_* pattern is in the function
25204 partition that doesn't start the function. */
25205 #if TARGET_ELF
25206 static int
25207 uses_TOC (void)
25208 {
25209 rtx_insn *insn;
25210 int ret = 1;
25211
25212 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25213 {
25214 if (INSN_P (insn))
25215 {
25216 rtx pat = PATTERN (insn);
25217 int i;
25218
25219 if (GET_CODE (pat) == PARALLEL)
25220 for (i = 0; i < XVECLEN (pat, 0); i++)
25221 {
25222 rtx sub = XVECEXP (pat, 0, i);
25223 if (GET_CODE (sub) == USE)
25224 {
25225 sub = XEXP (sub, 0);
25226 if (GET_CODE (sub) == UNSPEC
25227 && XINT (sub, 1) == UNSPEC_TOC)
25228 return ret;
25229 }
25230 }
25231 }
25232 else if (crtl->has_bb_partition
25233 && NOTE_P (insn)
25234 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25235 ret = 2;
25236 }
25237 return 0;
25238 }
25239 #endif
25240
25241 rtx
25242 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25243 {
25244 rtx tocrel, tocreg, hi;
25245
25246 if (TARGET_DEBUG_ADDR)
25247 {
25248 if (GET_CODE (symbol) == SYMBOL_REF)
25249 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25250 XSTR (symbol, 0));
25251 else
25252 {
25253 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25254 GET_RTX_NAME (GET_CODE (symbol)));
25255 debug_rtx (symbol);
25256 }
25257 }
25258
25259 if (!can_create_pseudo_p ())
25260 df_set_regs_ever_live (TOC_REGISTER, true);
25261
25262 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25263 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25264 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25265 return tocrel;
25266
25267 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25268 if (largetoc_reg != NULL)
25269 {
25270 emit_move_insn (largetoc_reg, hi);
25271 hi = largetoc_reg;
25272 }
25273 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25274 }
25275
25276 /* Issue assembly directives that create a reference to the given DWARF
25277 FRAME_TABLE_LABEL from the current function section. */
25278 void
25279 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25280 {
25281 fprintf (asm_out_file, "\t.ref %s\n",
25282 (* targetm.strip_name_encoding) (frame_table_label));
25283 }
25284 \f
25285 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25286 and the change to the stack pointer. */
25287
25288 static void
25289 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25290 {
25291 rtvec p;
25292 int i;
25293 rtx regs[3];
25294
25295 i = 0;
25296 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25297 if (hard_frame_needed)
25298 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25299 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25300 || (hard_frame_needed
25301 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25302 regs[i++] = fp;
25303
25304 p = rtvec_alloc (i);
25305 while (--i >= 0)
25306 {
25307 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25308 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25309 }
25310
25311 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25312 }
25313
25314 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25315 and set the appropriate attributes for the generated insn. Return the
25316 first insn which adjusts the stack pointer or the last insn before
25317 the stack adjustment loop.
25318
25319 SIZE_INT is used to create the CFI note for the allocation.
25320
25321 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25322 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25323
25324 ORIG_SP contains the backchain value that must be stored at *sp. */
25325
25326 static rtx_insn *
25327 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25328 {
25329 rtx_insn *insn;
25330
25331 rtx size_rtx = GEN_INT (-size_int);
25332 if (size_int > 32767)
25333 {
25334 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25335 /* Need a note here so that try_split doesn't get confused. */
25336 if (get_last_insn () == NULL_RTX)
25337 emit_note (NOTE_INSN_DELETED);
25338 insn = emit_move_insn (tmp_reg, size_rtx);
25339 try_split (PATTERN (insn), insn, 0);
25340 size_rtx = tmp_reg;
25341 }
25342
25343 if (Pmode == SImode)
25344 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25345 stack_pointer_rtx,
25346 size_rtx,
25347 orig_sp));
25348 else
25349 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25350 stack_pointer_rtx,
25351 size_rtx,
25352 orig_sp));
25353 rtx par = PATTERN (insn);
25354 gcc_assert (GET_CODE (par) == PARALLEL);
25355 rtx set = XVECEXP (par, 0, 0);
25356 gcc_assert (GET_CODE (set) == SET);
25357 rtx mem = SET_DEST (set);
25358 gcc_assert (MEM_P (mem));
25359 MEM_NOTRAP_P (mem) = 1;
25360 set_mem_alias_set (mem, get_frame_alias_set ());
25361
25362 RTX_FRAME_RELATED_P (insn) = 1;
25363 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25364 gen_rtx_SET (stack_pointer_rtx,
25365 gen_rtx_PLUS (Pmode,
25366 stack_pointer_rtx,
25367 GEN_INT (-size_int))));
25368
25369 /* Emit a blockage to ensure the allocation/probing insns are
25370 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25371 note for similar reasons. */
25372 if (flag_stack_clash_protection)
25373 {
25374 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25375 emit_insn (gen_blockage ());
25376 }
25377
25378 return insn;
25379 }
25380
25381 static HOST_WIDE_INT
25382 get_stack_clash_protection_probe_interval (void)
25383 {
25384 return (HOST_WIDE_INT_1U
25385 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25386 }
25387
25388 static HOST_WIDE_INT
25389 get_stack_clash_protection_guard_size (void)
25390 {
25391 return (HOST_WIDE_INT_1U
25392 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25393 }
25394
25395 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25396 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25397
25398 COPY_REG, if non-null, should contain a copy of the original
25399 stack pointer at exit from this function.
25400
25401 This is subtly different than the Ada probing in that it tries hard to
25402 prevent attacks that jump the stack guard. Thus it is never allowed to
25403 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25404 space without a suitable probe. */
25405 static rtx_insn *
25406 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25407 rtx copy_reg)
25408 {
25409 rtx orig_sp = copy_reg;
25410
25411 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25412
25413 /* Round the size down to a multiple of PROBE_INTERVAL. */
25414 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25415
25416 /* If explicitly requested,
25417 or the rounded size is not the same as the original size
25418 or the the rounded size is greater than a page,
25419 then we will need a copy of the original stack pointer. */
25420 if (rounded_size != orig_size
25421 || rounded_size > probe_interval
25422 || copy_reg)
25423 {
25424 /* If the caller did not request a copy of the incoming stack
25425 pointer, then we use r0 to hold the copy. */
25426 if (!copy_reg)
25427 orig_sp = gen_rtx_REG (Pmode, 0);
25428 emit_move_insn (orig_sp, stack_pointer_rtx);
25429 }
25430
25431 /* There's three cases here.
25432
25433 One is a single probe which is the most common and most efficiently
25434 implemented as it does not have to have a copy of the original
25435 stack pointer if there are no residuals.
25436
25437 Second is unrolled allocation/probes which we use if there's just
25438 a few of them. It needs to save the original stack pointer into a
25439 temporary for use as a source register in the allocation/probe.
25440
25441 Last is a loop. This is the most uncommon case and least efficient. */
25442 rtx_insn *retval = NULL;
25443 if (rounded_size == probe_interval)
25444 {
25445 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25446
25447 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25448 }
25449 else if (rounded_size <= 8 * probe_interval)
25450 {
25451 /* The ABI requires using the store with update insns to allocate
25452 space and store the backchain into the stack
25453
25454 So we save the current stack pointer into a temporary, then
25455 emit the store-with-update insns to store the saved stack pointer
25456 into the right location in each new page. */
25457 for (int i = 0; i < rounded_size; i += probe_interval)
25458 {
25459 rtx_insn *insn
25460 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25461
25462 /* Save the first stack adjustment in RETVAL. */
25463 if (i == 0)
25464 retval = insn;
25465 }
25466
25467 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25468 }
25469 else
25470 {
25471 /* Compute the ending address. */
25472 rtx end_addr
25473 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25474 rtx rs = GEN_INT (-rounded_size);
25475 rtx_insn *insn;
25476 if (add_operand (rs, Pmode))
25477 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25478 else
25479 {
25480 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25481 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25482 stack_pointer_rtx));
25483 /* Describe the effect of INSN to the CFI engine. */
25484 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25485 gen_rtx_SET (end_addr,
25486 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25487 rs)));
25488 }
25489 RTX_FRAME_RELATED_P (insn) = 1;
25490
25491 /* Emit the loop. */
25492 if (TARGET_64BIT)
25493 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25494 stack_pointer_rtx, orig_sp,
25495 end_addr));
25496 else
25497 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25498 stack_pointer_rtx, orig_sp,
25499 end_addr));
25500 RTX_FRAME_RELATED_P (retval) = 1;
25501 /* Describe the effect of INSN to the CFI engine. */
25502 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25503 gen_rtx_SET (stack_pointer_rtx, end_addr));
25504
25505 /* Emit a blockage to ensure the allocation/probing insns are
25506 not optimized, combined, removed, etc. Other cases handle this
25507 within their call to rs6000_emit_allocate_stack_1. */
25508 emit_insn (gen_blockage ());
25509
25510 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25511 }
25512
25513 if (orig_size != rounded_size)
25514 {
25515 /* Allocate (and implicitly probe) any residual space. */
25516 HOST_WIDE_INT residual = orig_size - rounded_size;
25517
25518 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25519
25520 /* If the residual was the only allocation, then we can return the
25521 allocating insn. */
25522 if (!retval)
25523 retval = insn;
25524 }
25525
25526 return retval;
25527 }
25528
25529 /* Emit the correct code for allocating stack space, as insns.
25530 If COPY_REG, make sure a copy of the old frame is left there.
25531 The generated code may use hard register 0 as a temporary. */
25532
25533 static rtx_insn *
25534 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25535 {
25536 rtx_insn *insn;
25537 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25538 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25539 rtx todec = gen_int_mode (-size, Pmode);
25540
25541 if (INTVAL (todec) != -size)
25542 {
25543 warning (0, "stack frame too large");
25544 emit_insn (gen_trap ());
25545 return 0;
25546 }
25547
25548 if (crtl->limit_stack)
25549 {
25550 if (REG_P (stack_limit_rtx)
25551 && REGNO (stack_limit_rtx) > 1
25552 && REGNO (stack_limit_rtx) <= 31)
25553 {
25554 rtx_insn *insn
25555 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25556 gcc_assert (insn);
25557 emit_insn (insn);
25558 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25559 }
25560 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25561 && TARGET_32BIT
25562 && DEFAULT_ABI == ABI_V4
25563 && !flag_pic)
25564 {
25565 rtx toload = gen_rtx_CONST (VOIDmode,
25566 gen_rtx_PLUS (Pmode,
25567 stack_limit_rtx,
25568 GEN_INT (size)));
25569
25570 emit_insn (gen_elf_high (tmp_reg, toload));
25571 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25572 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25573 const0_rtx));
25574 }
25575 else
25576 warning (0, "stack limit expression is not supported");
25577 }
25578
25579 if (flag_stack_clash_protection)
25580 {
25581 if (size < get_stack_clash_protection_guard_size ())
25582 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25583 else
25584 {
25585 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25586 copy_reg);
25587
25588 /* If we asked for a copy with an offset, then we still need add in
25589 the offset. */
25590 if (copy_reg && copy_off)
25591 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25592 return insn;
25593 }
25594 }
25595
25596 if (copy_reg)
25597 {
25598 if (copy_off != 0)
25599 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25600 else
25601 emit_move_insn (copy_reg, stack_reg);
25602 }
25603
25604 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25605 it now and set the alias set/attributes. The above gen_*_update
25606 calls will generate a PARALLEL with the MEM set being the first
25607 operation. */
25608 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25609 return insn;
25610 }
25611
25612 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25613
25614 #if PROBE_INTERVAL > 32768
25615 #error Cannot use indexed addressing mode for stack probing
25616 #endif
25617
25618 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25619 inclusive. These are offsets from the current stack pointer. */
25620
25621 static void
25622 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25623 {
25624 /* See if we have a constant small number of probes to generate. If so,
25625 that's the easy case. */
25626 if (first + size <= 32768)
25627 {
25628 HOST_WIDE_INT i;
25629
25630 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25631 it exceeds SIZE. If only one probe is needed, this will not
25632 generate any code. Then probe at FIRST + SIZE. */
25633 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25634 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25635 -(first + i)));
25636
25637 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25638 -(first + size)));
25639 }
25640
25641 /* Otherwise, do the same as above, but in a loop. Note that we must be
25642 extra careful with variables wrapping around because we might be at
25643 the very top (or the very bottom) of the address space and we have
25644 to be able to handle this case properly; in particular, we use an
25645 equality test for the loop condition. */
25646 else
25647 {
25648 HOST_WIDE_INT rounded_size;
25649 rtx r12 = gen_rtx_REG (Pmode, 12);
25650 rtx r0 = gen_rtx_REG (Pmode, 0);
25651
25652 /* Sanity check for the addressing mode we're going to use. */
25653 gcc_assert (first <= 32768);
25654
25655 /* Step 1: round SIZE to the previous multiple of the interval. */
25656
25657 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25658
25659
25660 /* Step 2: compute initial and final value of the loop counter. */
25661
25662 /* TEST_ADDR = SP + FIRST. */
25663 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25664 -first)));
25665
25666 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25667 if (rounded_size > 32768)
25668 {
25669 emit_move_insn (r0, GEN_INT (-rounded_size));
25670 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25671 }
25672 else
25673 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25674 -rounded_size)));
25675
25676
25677 /* Step 3: the loop
25678
25679 do
25680 {
25681 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25682 probe at TEST_ADDR
25683 }
25684 while (TEST_ADDR != LAST_ADDR)
25685
25686 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25687 until it is equal to ROUNDED_SIZE. */
25688
25689 if (TARGET_64BIT)
25690 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25691 else
25692 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25693
25694
25695 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25696 that SIZE is equal to ROUNDED_SIZE. */
25697
25698 if (size != rounded_size)
25699 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25700 }
25701 }
25702
25703 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25704 addresses, not offsets. */
25705
25706 static const char *
25707 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25708 {
25709 static int labelno = 0;
25710 char loop_lab[32];
25711 rtx xops[2];
25712
25713 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25714
25715 /* Loop. */
25716 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25717
25718 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25719 xops[0] = reg1;
25720 xops[1] = GEN_INT (-PROBE_INTERVAL);
25721 output_asm_insn ("addi %0,%0,%1", xops);
25722
25723 /* Probe at TEST_ADDR. */
25724 xops[1] = gen_rtx_REG (Pmode, 0);
25725 output_asm_insn ("stw %1,0(%0)", xops);
25726
25727 /* Test if TEST_ADDR == LAST_ADDR. */
25728 xops[1] = reg2;
25729 if (TARGET_64BIT)
25730 output_asm_insn ("cmpd 0,%0,%1", xops);
25731 else
25732 output_asm_insn ("cmpw 0,%0,%1", xops);
25733
25734 /* Branch. */
25735 fputs ("\tbne 0,", asm_out_file);
25736 assemble_name_raw (asm_out_file, loop_lab);
25737 fputc ('\n', asm_out_file);
25738
25739 return "";
25740 }
25741
25742 /* This function is called when rs6000_frame_related is processing
25743 SETs within a PARALLEL, and returns whether the REGNO save ought to
25744 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25745 for out-of-line register save functions, store multiple, and the
25746 Darwin world_save. They may contain registers that don't really
25747 need saving. */
25748
25749 static bool
25750 interesting_frame_related_regno (unsigned int regno)
25751 {
25752 /* Saves apparently of r0 are actually saving LR. It doesn't make
25753 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25754 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25755 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25756 as frame related. */
25757 if (regno == 0)
25758 return true;
25759 /* If we see CR2 then we are here on a Darwin world save. Saves of
25760 CR2 signify the whole CR is being saved. This is a long-standing
25761 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25762 that CR needs to be saved. */
25763 if (regno == CR2_REGNO)
25764 return true;
25765 /* Omit frame info for any user-defined global regs. If frame info
25766 is supplied for them, frame unwinding will restore a user reg.
25767 Also omit frame info for any reg we don't need to save, as that
25768 bloats frame info and can cause problems with shrink wrapping.
25769 Since global regs won't be seen as needing to be saved, both of
25770 these conditions are covered by save_reg_p. */
25771 return save_reg_p (regno);
25772 }
25773
25774 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25775 addresses, not offsets.
25776
25777 REG2 contains the backchain that must be stored into *sp at each allocation.
25778
25779 This is subtly different than the Ada probing above in that it tries hard
25780 to prevent attacks that jump the stack guard. Thus, it is never allowed
25781 to allocate more than PROBE_INTERVAL bytes of stack space without a
25782 suitable probe. */
25783
25784 static const char *
25785 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25786 {
25787 static int labelno = 0;
25788 char loop_lab[32];
25789 rtx xops[3];
25790
25791 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25792
25793 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25794
25795 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25796
25797 /* This allocates and probes. */
25798 xops[0] = reg1;
25799 xops[1] = reg2;
25800 xops[2] = GEN_INT (-probe_interval);
25801 if (TARGET_64BIT)
25802 output_asm_insn ("stdu %1,%2(%0)", xops);
25803 else
25804 output_asm_insn ("stwu %1,%2(%0)", xops);
25805
25806 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25807 xops[0] = reg1;
25808 xops[1] = reg3;
25809 if (TARGET_64BIT)
25810 output_asm_insn ("cmpd 0,%0,%1", xops);
25811 else
25812 output_asm_insn ("cmpw 0,%0,%1", xops);
25813
25814 fputs ("\tbne 0,", asm_out_file);
25815 assemble_name_raw (asm_out_file, loop_lab);
25816 fputc ('\n', asm_out_file);
25817
25818 return "";
25819 }
25820
25821 /* Wrapper around the output_probe_stack_range routines. */
25822 const char *
25823 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25824 {
25825 if (flag_stack_clash_protection)
25826 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25827 else
25828 return output_probe_stack_range_1 (reg1, reg3);
25829 }
25830
25831 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25832 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25833 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25834 deduce these equivalences by itself so it wasn't necessary to hold
25835 its hand so much. Don't be tempted to always supply d2_f_d_e with
25836 the actual cfa register, ie. r31 when we are using a hard frame
25837 pointer. That fails when saving regs off r1, and sched moves the
25838 r31 setup past the reg saves. */
25839
25840 static rtx_insn *
25841 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25842 rtx reg2, rtx repl2)
25843 {
25844 rtx repl;
25845
25846 if (REGNO (reg) == STACK_POINTER_REGNUM)
25847 {
25848 gcc_checking_assert (val == 0);
25849 repl = NULL_RTX;
25850 }
25851 else
25852 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25853 GEN_INT (val));
25854
25855 rtx pat = PATTERN (insn);
25856 if (!repl && !reg2)
25857 {
25858 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25859 if (GET_CODE (pat) == PARALLEL)
25860 for (int i = 0; i < XVECLEN (pat, 0); i++)
25861 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25862 {
25863 rtx set = XVECEXP (pat, 0, i);
25864
25865 if (!REG_P (SET_SRC (set))
25866 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25867 RTX_FRAME_RELATED_P (set) = 1;
25868 }
25869 RTX_FRAME_RELATED_P (insn) = 1;
25870 return insn;
25871 }
25872
25873 /* We expect that 'pat' is either a SET or a PARALLEL containing
25874 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25875 are important so they all have to be marked RTX_FRAME_RELATED_P.
25876 Call simplify_replace_rtx on the SETs rather than the whole insn
25877 so as to leave the other stuff alone (for example USE of r12). */
25878
25879 set_used_flags (pat);
25880 if (GET_CODE (pat) == SET)
25881 {
25882 if (repl)
25883 pat = simplify_replace_rtx (pat, reg, repl);
25884 if (reg2)
25885 pat = simplify_replace_rtx (pat, reg2, repl2);
25886 }
25887 else if (GET_CODE (pat) == PARALLEL)
25888 {
25889 pat = shallow_copy_rtx (pat);
25890 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25891
25892 for (int i = 0; i < XVECLEN (pat, 0); i++)
25893 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25894 {
25895 rtx set = XVECEXP (pat, 0, i);
25896
25897 if (repl)
25898 set = simplify_replace_rtx (set, reg, repl);
25899 if (reg2)
25900 set = simplify_replace_rtx (set, reg2, repl2);
25901 XVECEXP (pat, 0, i) = set;
25902
25903 if (!REG_P (SET_SRC (set))
25904 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25905 RTX_FRAME_RELATED_P (set) = 1;
25906 }
25907 }
25908 else
25909 gcc_unreachable ();
25910
25911 RTX_FRAME_RELATED_P (insn) = 1;
25912 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25913
25914 return insn;
25915 }
25916
25917 /* Returns an insn that has a vrsave set operation with the
25918 appropriate CLOBBERs. */
25919
25920 static rtx
25921 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25922 {
25923 int nclobs, i;
25924 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25925 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25926
25927 clobs[0]
25928 = gen_rtx_SET (vrsave,
25929 gen_rtx_UNSPEC_VOLATILE (SImode,
25930 gen_rtvec (2, reg, vrsave),
25931 UNSPECV_SET_VRSAVE));
25932
25933 nclobs = 1;
25934
25935 /* We need to clobber the registers in the mask so the scheduler
25936 does not move sets to VRSAVE before sets of AltiVec registers.
25937
25938 However, if the function receives nonlocal gotos, reload will set
25939 all call saved registers live. We will end up with:
25940
25941 (set (reg 999) (mem))
25942 (parallel [ (set (reg vrsave) (unspec blah))
25943 (clobber (reg 999))])
25944
25945 The clobber will cause the store into reg 999 to be dead, and
25946 flow will attempt to delete an epilogue insn. In this case, we
25947 need an unspec use/set of the register. */
25948
25949 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25950 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25951 {
25952 if (!epiloguep || call_used_regs [i])
25953 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25954 gen_rtx_REG (V4SImode, i));
25955 else
25956 {
25957 rtx reg = gen_rtx_REG (V4SImode, i);
25958
25959 clobs[nclobs++]
25960 = gen_rtx_SET (reg,
25961 gen_rtx_UNSPEC (V4SImode,
25962 gen_rtvec (1, reg), 27));
25963 }
25964 }
25965
25966 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25967
25968 for (i = 0; i < nclobs; ++i)
25969 XVECEXP (insn, 0, i) = clobs[i];
25970
25971 return insn;
25972 }
25973
25974 static rtx
25975 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25976 {
25977 rtx addr, mem;
25978
25979 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25980 mem = gen_frame_mem (GET_MODE (reg), addr);
25981 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25982 }
25983
25984 static rtx
25985 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25986 {
25987 return gen_frame_set (reg, frame_reg, offset, false);
25988 }
25989
25990 static rtx
25991 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25992 {
25993 return gen_frame_set (reg, frame_reg, offset, true);
25994 }
25995
25996 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25997 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25998
25999 static rtx_insn *
26000 emit_frame_save (rtx frame_reg, machine_mode mode,
26001 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26002 {
26003 rtx reg;
26004
26005 /* Some cases that need register indexed addressing. */
26006 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26007 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26008
26009 reg = gen_rtx_REG (mode, regno);
26010 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26011 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26012 NULL_RTX, NULL_RTX);
26013 }
26014
26015 /* Emit an offset memory reference suitable for a frame store, while
26016 converting to a valid addressing mode. */
26017
26018 static rtx
26019 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26020 {
26021 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26022 }
26023
26024 #ifndef TARGET_FIX_AND_CONTINUE
26025 #define TARGET_FIX_AND_CONTINUE 0
26026 #endif
26027
26028 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26029 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26030 #define LAST_SAVRES_REGISTER 31
26031 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26032
26033 enum {
26034 SAVRES_LR = 0x1,
26035 SAVRES_SAVE = 0x2,
26036 SAVRES_REG = 0x0c,
26037 SAVRES_GPR = 0,
26038 SAVRES_FPR = 4,
26039 SAVRES_VR = 8
26040 };
26041
26042 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26043
26044 /* Temporary holding space for an out-of-line register save/restore
26045 routine name. */
26046 static char savres_routine_name[30];
26047
26048 /* Return the name for an out-of-line register save/restore routine.
26049 We are saving/restoring GPRs if GPR is true. */
26050
26051 static char *
26052 rs6000_savres_routine_name (int regno, int sel)
26053 {
26054 const char *prefix = "";
26055 const char *suffix = "";
26056
26057 /* Different targets are supposed to define
26058 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26059 routine name could be defined with:
26060
26061 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26062
26063 This is a nice idea in practice, but in reality, things are
26064 complicated in several ways:
26065
26066 - ELF targets have save/restore routines for GPRs.
26067
26068 - PPC64 ELF targets have routines for save/restore of GPRs that
26069 differ in what they do with the link register, so having a set
26070 prefix doesn't work. (We only use one of the save routines at
26071 the moment, though.)
26072
26073 - PPC32 elf targets have "exit" versions of the restore routines
26074 that restore the link register and can save some extra space.
26075 These require an extra suffix. (There are also "tail" versions
26076 of the restore routines and "GOT" versions of the save routines,
26077 but we don't generate those at present. Same problems apply,
26078 though.)
26079
26080 We deal with all this by synthesizing our own prefix/suffix and
26081 using that for the simple sprintf call shown above. */
26082 if (DEFAULT_ABI == ABI_V4)
26083 {
26084 if (TARGET_64BIT)
26085 goto aix_names;
26086
26087 if ((sel & SAVRES_REG) == SAVRES_GPR)
26088 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26089 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26090 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26091 else if ((sel & SAVRES_REG) == SAVRES_VR)
26092 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26093 else
26094 abort ();
26095
26096 if ((sel & SAVRES_LR))
26097 suffix = "_x";
26098 }
26099 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26100 {
26101 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26102 /* No out-of-line save/restore routines for GPRs on AIX. */
26103 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26104 #endif
26105
26106 aix_names:
26107 if ((sel & SAVRES_REG) == SAVRES_GPR)
26108 prefix = ((sel & SAVRES_SAVE)
26109 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26110 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26111 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26112 {
26113 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26114 if ((sel & SAVRES_LR))
26115 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26116 else
26117 #endif
26118 {
26119 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26120 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26121 }
26122 }
26123 else if ((sel & SAVRES_REG) == SAVRES_VR)
26124 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26125 else
26126 abort ();
26127 }
26128
26129 if (DEFAULT_ABI == ABI_DARWIN)
26130 {
26131 /* The Darwin approach is (slightly) different, in order to be
26132 compatible with code generated by the system toolchain. There is a
26133 single symbol for the start of save sequence, and the code here
26134 embeds an offset into that code on the basis of the first register
26135 to be saved. */
26136 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26137 if ((sel & SAVRES_REG) == SAVRES_GPR)
26138 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26139 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26140 (regno - 13) * 4, prefix, regno);
26141 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26142 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26143 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26144 else if ((sel & SAVRES_REG) == SAVRES_VR)
26145 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26146 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26147 else
26148 abort ();
26149 }
26150 else
26151 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26152
26153 return savres_routine_name;
26154 }
26155
26156 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26157 We are saving/restoring GPRs if GPR is true. */
26158
26159 static rtx
26160 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26161 {
26162 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26163 ? info->first_gp_reg_save
26164 : (sel & SAVRES_REG) == SAVRES_FPR
26165 ? info->first_fp_reg_save - 32
26166 : (sel & SAVRES_REG) == SAVRES_VR
26167 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26168 : -1);
26169 rtx sym;
26170 int select = sel;
26171
26172 /* Don't generate bogus routine names. */
26173 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26174 && regno <= LAST_SAVRES_REGISTER
26175 && select >= 0 && select <= 12);
26176
26177 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26178
26179 if (sym == NULL)
26180 {
26181 char *name;
26182
26183 name = rs6000_savres_routine_name (regno, sel);
26184
26185 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26186 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26187 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26188 }
26189
26190 return sym;
26191 }
26192
26193 /* Emit a sequence of insns, including a stack tie if needed, for
26194 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26195 reset the stack pointer, but move the base of the frame into
26196 reg UPDT_REGNO for use by out-of-line register restore routines. */
26197
26198 static rtx
26199 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26200 unsigned updt_regno)
26201 {
26202 /* If there is nothing to do, don't do anything. */
26203 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26204 return NULL_RTX;
26205
26206 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26207
26208 /* This blockage is needed so that sched doesn't decide to move
26209 the sp change before the register restores. */
26210 if (DEFAULT_ABI == ABI_V4)
26211 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26212 GEN_INT (frame_off)));
26213
26214 /* If we are restoring registers out-of-line, we will be using the
26215 "exit" variants of the restore routines, which will reset the
26216 stack for us. But we do need to point updt_reg into the
26217 right place for those routines. */
26218 if (frame_off != 0)
26219 return emit_insn (gen_add3_insn (updt_reg_rtx,
26220 frame_reg_rtx, GEN_INT (frame_off)));
26221 else
26222 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26223
26224 return NULL_RTX;
26225 }
26226
26227 /* Return the register number used as a pointer by out-of-line
26228 save/restore functions. */
26229
26230 static inline unsigned
26231 ptr_regno_for_savres (int sel)
26232 {
26233 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26234 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26235 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26236 }
26237
26238 /* Construct a parallel rtx describing the effect of a call to an
26239 out-of-line register save/restore routine, and emit the insn
26240 or jump_insn as appropriate. */
26241
26242 static rtx_insn *
26243 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26244 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26245 machine_mode reg_mode, int sel)
26246 {
26247 int i;
26248 int offset, start_reg, end_reg, n_regs, use_reg;
26249 int reg_size = GET_MODE_SIZE (reg_mode);
26250 rtx sym;
26251 rtvec p;
26252 rtx par;
26253 rtx_insn *insn;
26254
26255 offset = 0;
26256 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26257 ? info->first_gp_reg_save
26258 : (sel & SAVRES_REG) == SAVRES_FPR
26259 ? info->first_fp_reg_save
26260 : (sel & SAVRES_REG) == SAVRES_VR
26261 ? info->first_altivec_reg_save
26262 : -1);
26263 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26264 ? 32
26265 : (sel & SAVRES_REG) == SAVRES_FPR
26266 ? 64
26267 : (sel & SAVRES_REG) == SAVRES_VR
26268 ? LAST_ALTIVEC_REGNO + 1
26269 : -1);
26270 n_regs = end_reg - start_reg;
26271 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26272 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26273 + n_regs);
26274
26275 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26276 RTVEC_ELT (p, offset++) = ret_rtx;
26277
26278 RTVEC_ELT (p, offset++)
26279 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26280
26281 sym = rs6000_savres_routine_sym (info, sel);
26282 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26283
26284 use_reg = ptr_regno_for_savres (sel);
26285 if ((sel & SAVRES_REG) == SAVRES_VR)
26286 {
26287 /* Vector regs are saved/restored using [reg+reg] addressing. */
26288 RTVEC_ELT (p, offset++)
26289 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26290 RTVEC_ELT (p, offset++)
26291 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26292 }
26293 else
26294 RTVEC_ELT (p, offset++)
26295 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26296
26297 for (i = 0; i < end_reg - start_reg; i++)
26298 RTVEC_ELT (p, i + offset)
26299 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26300 frame_reg_rtx, save_area_offset + reg_size * i,
26301 (sel & SAVRES_SAVE) != 0);
26302
26303 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26304 RTVEC_ELT (p, i + offset)
26305 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26306
26307 par = gen_rtx_PARALLEL (VOIDmode, p);
26308
26309 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26310 {
26311 insn = emit_jump_insn (par);
26312 JUMP_LABEL (insn) = ret_rtx;
26313 }
26314 else
26315 insn = emit_insn (par);
26316 return insn;
26317 }
26318
26319 /* Emit prologue code to store CR fields that need to be saved into REG. This
26320 function should only be called when moving the non-volatile CRs to REG, it
26321 is not a general purpose routine to move the entire set of CRs to REG.
26322 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26323 volatile CRs. */
26324
26325 static void
26326 rs6000_emit_prologue_move_from_cr (rtx reg)
26327 {
26328 /* Only the ELFv2 ABI allows storing only selected fields. */
26329 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26330 {
26331 int i, cr_reg[8], count = 0;
26332
26333 /* Collect CR fields that must be saved. */
26334 for (i = 0; i < 8; i++)
26335 if (save_reg_p (CR0_REGNO + i))
26336 cr_reg[count++] = i;
26337
26338 /* If it's just a single one, use mfcrf. */
26339 if (count == 1)
26340 {
26341 rtvec p = rtvec_alloc (1);
26342 rtvec r = rtvec_alloc (2);
26343 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26344 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26345 RTVEC_ELT (p, 0)
26346 = gen_rtx_SET (reg,
26347 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26348
26349 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26350 return;
26351 }
26352
26353 /* ??? It might be better to handle count == 2 / 3 cases here
26354 as well, using logical operations to combine the values. */
26355 }
26356
26357 emit_insn (gen_prologue_movesi_from_cr (reg));
26358 }
26359
26360 /* Return whether the split-stack arg pointer (r12) is used. */
26361
26362 static bool
26363 split_stack_arg_pointer_used_p (void)
26364 {
26365 /* If the pseudo holding the arg pointer is no longer a pseudo,
26366 then the arg pointer is used. */
26367 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26368 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26369 || (REGNO (cfun->machine->split_stack_arg_pointer)
26370 < FIRST_PSEUDO_REGISTER)))
26371 return true;
26372
26373 /* Unfortunately we also need to do some code scanning, since
26374 r12 may have been substituted for the pseudo. */
26375 rtx_insn *insn;
26376 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26377 FOR_BB_INSNS (bb, insn)
26378 if (NONDEBUG_INSN_P (insn))
26379 {
26380 /* A call destroys r12. */
26381 if (CALL_P (insn))
26382 return false;
26383
26384 df_ref use;
26385 FOR_EACH_INSN_USE (use, insn)
26386 {
26387 rtx x = DF_REF_REG (use);
26388 if (REG_P (x) && REGNO (x) == 12)
26389 return true;
26390 }
26391 df_ref def;
26392 FOR_EACH_INSN_DEF (def, insn)
26393 {
26394 rtx x = DF_REF_REG (def);
26395 if (REG_P (x) && REGNO (x) == 12)
26396 return false;
26397 }
26398 }
26399 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26400 }
26401
26402 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26403
26404 static bool
26405 rs6000_global_entry_point_needed_p (void)
26406 {
26407 /* Only needed for the ELFv2 ABI. */
26408 if (DEFAULT_ABI != ABI_ELFv2)
26409 return false;
26410
26411 /* With -msingle-pic-base, we assume the whole program shares the same
26412 TOC, so no global entry point prologues are needed anywhere. */
26413 if (TARGET_SINGLE_PIC_BASE)
26414 return false;
26415
26416 /* Ensure we have a global entry point for thunks. ??? We could
26417 avoid that if the target routine doesn't need a global entry point,
26418 but we do not know whether this is the case at this point. */
26419 if (cfun->is_thunk)
26420 return true;
26421
26422 /* For regular functions, rs6000_emit_prologue sets this flag if the
26423 routine ever uses the TOC pointer. */
26424 return cfun->machine->r2_setup_needed;
26425 }
26426
26427 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26428 static sbitmap
26429 rs6000_get_separate_components (void)
26430 {
26431 rs6000_stack_t *info = rs6000_stack_info ();
26432
26433 if (WORLD_SAVE_P (info))
26434 return NULL;
26435
26436 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26437 && !(info->savres_strategy & REST_MULTIPLE));
26438
26439 /* Component 0 is the save/restore of LR (done via GPR0).
26440 Component 2 is the save of the TOC (GPR2).
26441 Components 13..31 are the save/restore of GPR13..GPR31.
26442 Components 46..63 are the save/restore of FPR14..FPR31. */
26443
26444 cfun->machine->n_components = 64;
26445
26446 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26447 bitmap_clear (components);
26448
26449 int reg_size = TARGET_32BIT ? 4 : 8;
26450 int fp_reg_size = 8;
26451
26452 /* The GPRs we need saved to the frame. */
26453 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26454 && (info->savres_strategy & REST_INLINE_GPRS))
26455 {
26456 int offset = info->gp_save_offset;
26457 if (info->push_p)
26458 offset += info->total_size;
26459
26460 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26461 {
26462 if (IN_RANGE (offset, -0x8000, 0x7fff)
26463 && save_reg_p (regno))
26464 bitmap_set_bit (components, regno);
26465
26466 offset += reg_size;
26467 }
26468 }
26469
26470 /* Don't mess with the hard frame pointer. */
26471 if (frame_pointer_needed)
26472 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26473
26474 /* Don't mess with the fixed TOC register. */
26475 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26476 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26477 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26478 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26479
26480 /* The FPRs we need saved to the frame. */
26481 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26482 && (info->savres_strategy & REST_INLINE_FPRS))
26483 {
26484 int offset = info->fp_save_offset;
26485 if (info->push_p)
26486 offset += info->total_size;
26487
26488 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26489 {
26490 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26491 bitmap_set_bit (components, regno);
26492
26493 offset += fp_reg_size;
26494 }
26495 }
26496
26497 /* Optimize LR save and restore if we can. This is component 0. Any
26498 out-of-line register save/restore routines need LR. */
26499 if (info->lr_save_p
26500 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26501 && (info->savres_strategy & SAVE_INLINE_GPRS)
26502 && (info->savres_strategy & REST_INLINE_GPRS)
26503 && (info->savres_strategy & SAVE_INLINE_FPRS)
26504 && (info->savres_strategy & REST_INLINE_FPRS)
26505 && (info->savres_strategy & SAVE_INLINE_VRS)
26506 && (info->savres_strategy & REST_INLINE_VRS))
26507 {
26508 int offset = info->lr_save_offset;
26509 if (info->push_p)
26510 offset += info->total_size;
26511 if (IN_RANGE (offset, -0x8000, 0x7fff))
26512 bitmap_set_bit (components, 0);
26513 }
26514
26515 /* Optimize saving the TOC. This is component 2. */
26516 if (cfun->machine->save_toc_in_prologue)
26517 bitmap_set_bit (components, 2);
26518
26519 return components;
26520 }
26521
26522 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26523 static sbitmap
26524 rs6000_components_for_bb (basic_block bb)
26525 {
26526 rs6000_stack_t *info = rs6000_stack_info ();
26527
26528 bitmap in = DF_LIVE_IN (bb);
26529 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26530 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26531
26532 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26533 bitmap_clear (components);
26534
26535 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26536
26537 /* GPRs. */
26538 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26539 if (bitmap_bit_p (in, regno)
26540 || bitmap_bit_p (gen, regno)
26541 || bitmap_bit_p (kill, regno))
26542 bitmap_set_bit (components, regno);
26543
26544 /* FPRs. */
26545 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26546 if (bitmap_bit_p (in, regno)
26547 || bitmap_bit_p (gen, regno)
26548 || bitmap_bit_p (kill, regno))
26549 bitmap_set_bit (components, regno);
26550
26551 /* The link register. */
26552 if (bitmap_bit_p (in, LR_REGNO)
26553 || bitmap_bit_p (gen, LR_REGNO)
26554 || bitmap_bit_p (kill, LR_REGNO))
26555 bitmap_set_bit (components, 0);
26556
26557 /* The TOC save. */
26558 if (bitmap_bit_p (in, TOC_REGNUM)
26559 || bitmap_bit_p (gen, TOC_REGNUM)
26560 || bitmap_bit_p (kill, TOC_REGNUM))
26561 bitmap_set_bit (components, 2);
26562
26563 return components;
26564 }
26565
26566 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26567 static void
26568 rs6000_disqualify_components (sbitmap components, edge e,
26569 sbitmap edge_components, bool /*is_prologue*/)
26570 {
26571 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26572 live where we want to place that code. */
26573 if (bitmap_bit_p (edge_components, 0)
26574 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26575 {
26576 if (dump_file)
26577 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26578 "on entry to bb %d\n", e->dest->index);
26579 bitmap_clear_bit (components, 0);
26580 }
26581 }
26582
26583 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26584 static void
26585 rs6000_emit_prologue_components (sbitmap components)
26586 {
26587 rs6000_stack_t *info = rs6000_stack_info ();
26588 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26589 ? HARD_FRAME_POINTER_REGNUM
26590 : STACK_POINTER_REGNUM);
26591
26592 machine_mode reg_mode = Pmode;
26593 int reg_size = TARGET_32BIT ? 4 : 8;
26594 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26595 int fp_reg_size = 8;
26596
26597 /* Prologue for LR. */
26598 if (bitmap_bit_p (components, 0))
26599 {
26600 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26601 rtx reg = gen_rtx_REG (reg_mode, 0);
26602 rtx_insn *insn = emit_move_insn (reg, lr);
26603 RTX_FRAME_RELATED_P (insn) = 1;
26604 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26605
26606 int offset = info->lr_save_offset;
26607 if (info->push_p)
26608 offset += info->total_size;
26609
26610 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26611 RTX_FRAME_RELATED_P (insn) = 1;
26612 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26613 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26614 }
26615
26616 /* Prologue for TOC. */
26617 if (bitmap_bit_p (components, 2))
26618 {
26619 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26620 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26621 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26622 }
26623
26624 /* Prologue for the GPRs. */
26625 int offset = info->gp_save_offset;
26626 if (info->push_p)
26627 offset += info->total_size;
26628
26629 for (int i = info->first_gp_reg_save; i < 32; i++)
26630 {
26631 if (bitmap_bit_p (components, i))
26632 {
26633 rtx reg = gen_rtx_REG (reg_mode, i);
26634 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26635 RTX_FRAME_RELATED_P (insn) = 1;
26636 rtx set = copy_rtx (single_set (insn));
26637 add_reg_note (insn, REG_CFA_OFFSET, set);
26638 }
26639
26640 offset += reg_size;
26641 }
26642
26643 /* Prologue for the FPRs. */
26644 offset = info->fp_save_offset;
26645 if (info->push_p)
26646 offset += info->total_size;
26647
26648 for (int i = info->first_fp_reg_save; i < 64; i++)
26649 {
26650 if (bitmap_bit_p (components, i))
26651 {
26652 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26653 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26654 RTX_FRAME_RELATED_P (insn) = 1;
26655 rtx set = copy_rtx (single_set (insn));
26656 add_reg_note (insn, REG_CFA_OFFSET, set);
26657 }
26658
26659 offset += fp_reg_size;
26660 }
26661 }
26662
26663 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26664 static void
26665 rs6000_emit_epilogue_components (sbitmap components)
26666 {
26667 rs6000_stack_t *info = rs6000_stack_info ();
26668 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26669 ? HARD_FRAME_POINTER_REGNUM
26670 : STACK_POINTER_REGNUM);
26671
26672 machine_mode reg_mode = Pmode;
26673 int reg_size = TARGET_32BIT ? 4 : 8;
26674
26675 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26676 int fp_reg_size = 8;
26677
26678 /* Epilogue for the FPRs. */
26679 int offset = info->fp_save_offset;
26680 if (info->push_p)
26681 offset += info->total_size;
26682
26683 for (int i = info->first_fp_reg_save; i < 64; i++)
26684 {
26685 if (bitmap_bit_p (components, i))
26686 {
26687 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26688 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26689 RTX_FRAME_RELATED_P (insn) = 1;
26690 add_reg_note (insn, REG_CFA_RESTORE, reg);
26691 }
26692
26693 offset += fp_reg_size;
26694 }
26695
26696 /* Epilogue for the GPRs. */
26697 offset = info->gp_save_offset;
26698 if (info->push_p)
26699 offset += info->total_size;
26700
26701 for (int i = info->first_gp_reg_save; i < 32; i++)
26702 {
26703 if (bitmap_bit_p (components, i))
26704 {
26705 rtx reg = gen_rtx_REG (reg_mode, i);
26706 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26707 RTX_FRAME_RELATED_P (insn) = 1;
26708 add_reg_note (insn, REG_CFA_RESTORE, reg);
26709 }
26710
26711 offset += reg_size;
26712 }
26713
26714 /* Epilogue for LR. */
26715 if (bitmap_bit_p (components, 0))
26716 {
26717 int offset = info->lr_save_offset;
26718 if (info->push_p)
26719 offset += info->total_size;
26720
26721 rtx reg = gen_rtx_REG (reg_mode, 0);
26722 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26723
26724 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26725 insn = emit_move_insn (lr, reg);
26726 RTX_FRAME_RELATED_P (insn) = 1;
26727 add_reg_note (insn, REG_CFA_RESTORE, lr);
26728 }
26729 }
26730
26731 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26732 static void
26733 rs6000_set_handled_components (sbitmap components)
26734 {
26735 rs6000_stack_t *info = rs6000_stack_info ();
26736
26737 for (int i = info->first_gp_reg_save; i < 32; i++)
26738 if (bitmap_bit_p (components, i))
26739 cfun->machine->gpr_is_wrapped_separately[i] = true;
26740
26741 for (int i = info->first_fp_reg_save; i < 64; i++)
26742 if (bitmap_bit_p (components, i))
26743 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26744
26745 if (bitmap_bit_p (components, 0))
26746 cfun->machine->lr_is_wrapped_separately = true;
26747
26748 if (bitmap_bit_p (components, 2))
26749 cfun->machine->toc_is_wrapped_separately = true;
26750 }
26751
26752 /* VRSAVE is a bit vector representing which AltiVec registers
26753 are used. The OS uses this to determine which vector
26754 registers to save on a context switch. We need to save
26755 VRSAVE on the stack frame, add whatever AltiVec registers we
26756 used in this function, and do the corresponding magic in the
26757 epilogue. */
26758 static void
26759 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26760 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26761 {
26762 /* Get VRSAVE into a GPR. */
26763 rtx reg = gen_rtx_REG (SImode, save_regno);
26764 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26765 if (TARGET_MACHO)
26766 emit_insn (gen_get_vrsave_internal (reg));
26767 else
26768 emit_insn (gen_rtx_SET (reg, vrsave));
26769
26770 /* Save VRSAVE. */
26771 int offset = info->vrsave_save_offset + frame_off;
26772 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26773
26774 /* Include the registers in the mask. */
26775 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26776
26777 emit_insn (generate_set_vrsave (reg, info, 0));
26778 }
26779
26780 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26781 called, it left the arg pointer to the old stack in r29. Otherwise, the
26782 arg pointer is the top of the current frame. */
26783 static void
26784 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26785 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26786 {
26787 cfun->machine->split_stack_argp_used = true;
26788
26789 if (sp_adjust)
26790 {
26791 rtx r12 = gen_rtx_REG (Pmode, 12);
26792 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26793 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26794 emit_insn_before (set_r12, sp_adjust);
26795 }
26796 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26797 {
26798 rtx r12 = gen_rtx_REG (Pmode, 12);
26799 if (frame_off == 0)
26800 emit_move_insn (r12, frame_reg_rtx);
26801 else
26802 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26803 }
26804
26805 if (info->push_p)
26806 {
26807 rtx r12 = gen_rtx_REG (Pmode, 12);
26808 rtx r29 = gen_rtx_REG (Pmode, 29);
26809 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26810 rtx not_more = gen_label_rtx ();
26811 rtx jump;
26812
26813 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26814 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26815 gen_rtx_LABEL_REF (VOIDmode, not_more),
26816 pc_rtx);
26817 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26818 JUMP_LABEL (jump) = not_more;
26819 LABEL_NUSES (not_more) += 1;
26820 emit_move_insn (r12, r29);
26821 emit_label (not_more);
26822 }
26823 }
26824
26825 /* Emit function prologue as insns. */
26826
26827 void
26828 rs6000_emit_prologue (void)
26829 {
26830 rs6000_stack_t *info = rs6000_stack_info ();
26831 machine_mode reg_mode = Pmode;
26832 int reg_size = TARGET_32BIT ? 4 : 8;
26833 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26834 int fp_reg_size = 8;
26835 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26836 rtx frame_reg_rtx = sp_reg_rtx;
26837 unsigned int cr_save_regno;
26838 rtx cr_save_rtx = NULL_RTX;
26839 rtx_insn *insn;
26840 int strategy;
26841 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26842 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26843 && call_used_regs[STATIC_CHAIN_REGNUM]);
26844 int using_split_stack = (flag_split_stack
26845 && (lookup_attribute ("no_split_stack",
26846 DECL_ATTRIBUTES (cfun->decl))
26847 == NULL));
26848
26849 /* Offset to top of frame for frame_reg and sp respectively. */
26850 HOST_WIDE_INT frame_off = 0;
26851 HOST_WIDE_INT sp_off = 0;
26852 /* sp_adjust is the stack adjusting instruction, tracked so that the
26853 insn setting up the split-stack arg pointer can be emitted just
26854 prior to it, when r12 is not used here for other purposes. */
26855 rtx_insn *sp_adjust = 0;
26856
26857 #if CHECKING_P
26858 /* Track and check usage of r0, r11, r12. */
26859 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26860 #define START_USE(R) do \
26861 { \
26862 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26863 reg_inuse |= 1 << (R); \
26864 } while (0)
26865 #define END_USE(R) do \
26866 { \
26867 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26868 reg_inuse &= ~(1 << (R)); \
26869 } while (0)
26870 #define NOT_INUSE(R) do \
26871 { \
26872 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26873 } while (0)
26874 #else
26875 #define START_USE(R) do {} while (0)
26876 #define END_USE(R) do {} while (0)
26877 #define NOT_INUSE(R) do {} while (0)
26878 #endif
26879
26880 if (DEFAULT_ABI == ABI_ELFv2
26881 && !TARGET_SINGLE_PIC_BASE)
26882 {
26883 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26884
26885 /* With -mminimal-toc we may generate an extra use of r2 below. */
26886 if (TARGET_TOC && TARGET_MINIMAL_TOC
26887 && !constant_pool_empty_p ())
26888 cfun->machine->r2_setup_needed = true;
26889 }
26890
26891
26892 if (flag_stack_usage_info)
26893 current_function_static_stack_size = info->total_size;
26894
26895 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26896 {
26897 HOST_WIDE_INT size = info->total_size;
26898
26899 if (crtl->is_leaf && !cfun->calls_alloca)
26900 {
26901 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26902 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26903 size - get_stack_check_protect ());
26904 }
26905 else if (size > 0)
26906 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26907 }
26908
26909 if (TARGET_FIX_AND_CONTINUE)
26910 {
26911 /* gdb on darwin arranges to forward a function from the old
26912 address by modifying the first 5 instructions of the function
26913 to branch to the overriding function. This is necessary to
26914 permit function pointers that point to the old function to
26915 actually forward to the new function. */
26916 emit_insn (gen_nop ());
26917 emit_insn (gen_nop ());
26918 emit_insn (gen_nop ());
26919 emit_insn (gen_nop ());
26920 emit_insn (gen_nop ());
26921 }
26922
26923 /* Handle world saves specially here. */
26924 if (WORLD_SAVE_P (info))
26925 {
26926 int i, j, sz;
26927 rtx treg;
26928 rtvec p;
26929 rtx reg0;
26930
26931 /* save_world expects lr in r0. */
26932 reg0 = gen_rtx_REG (Pmode, 0);
26933 if (info->lr_save_p)
26934 {
26935 insn = emit_move_insn (reg0,
26936 gen_rtx_REG (Pmode, LR_REGNO));
26937 RTX_FRAME_RELATED_P (insn) = 1;
26938 }
26939
26940 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26941 assumptions about the offsets of various bits of the stack
26942 frame. */
26943 gcc_assert (info->gp_save_offset == -220
26944 && info->fp_save_offset == -144
26945 && info->lr_save_offset == 8
26946 && info->cr_save_offset == 4
26947 && info->push_p
26948 && info->lr_save_p
26949 && (!crtl->calls_eh_return
26950 || info->ehrd_offset == -432)
26951 && info->vrsave_save_offset == -224
26952 && info->altivec_save_offset == -416);
26953
26954 treg = gen_rtx_REG (SImode, 11);
26955 emit_move_insn (treg, GEN_INT (-info->total_size));
26956
26957 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26958 in R11. It also clobbers R12, so beware! */
26959
26960 /* Preserve CR2 for save_world prologues */
26961 sz = 5;
26962 sz += 32 - info->first_gp_reg_save;
26963 sz += 64 - info->first_fp_reg_save;
26964 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26965 p = rtvec_alloc (sz);
26966 j = 0;
26967 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26968 gen_rtx_REG (SImode,
26969 LR_REGNO));
26970 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26971 gen_rtx_SYMBOL_REF (Pmode,
26972 "*save_world"));
26973 /* We do floats first so that the instruction pattern matches
26974 properly. */
26975 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26976 RTVEC_ELT (p, j++)
26977 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26978 info->first_fp_reg_save + i),
26979 frame_reg_rtx,
26980 info->fp_save_offset + frame_off + 8 * i);
26981 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26982 RTVEC_ELT (p, j++)
26983 = gen_frame_store (gen_rtx_REG (V4SImode,
26984 info->first_altivec_reg_save + i),
26985 frame_reg_rtx,
26986 info->altivec_save_offset + frame_off + 16 * i);
26987 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26988 RTVEC_ELT (p, j++)
26989 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26990 frame_reg_rtx,
26991 info->gp_save_offset + frame_off + reg_size * i);
26992
26993 /* CR register traditionally saved as CR2. */
26994 RTVEC_ELT (p, j++)
26995 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26996 frame_reg_rtx, info->cr_save_offset + frame_off);
26997 /* Explain about use of R0. */
26998 if (info->lr_save_p)
26999 RTVEC_ELT (p, j++)
27000 = gen_frame_store (reg0,
27001 frame_reg_rtx, info->lr_save_offset + frame_off);
27002 /* Explain what happens to the stack pointer. */
27003 {
27004 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27005 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27006 }
27007
27008 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27009 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27010 treg, GEN_INT (-info->total_size));
27011 sp_off = frame_off = info->total_size;
27012 }
27013
27014 strategy = info->savres_strategy;
27015
27016 /* For V.4, update stack before we do any saving and set back pointer. */
27017 if (! WORLD_SAVE_P (info)
27018 && info->push_p
27019 && (DEFAULT_ABI == ABI_V4
27020 || crtl->calls_eh_return))
27021 {
27022 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27023 || !(strategy & SAVE_INLINE_GPRS)
27024 || !(strategy & SAVE_INLINE_VRS));
27025 int ptr_regno = -1;
27026 rtx ptr_reg = NULL_RTX;
27027 int ptr_off = 0;
27028
27029 if (info->total_size < 32767)
27030 frame_off = info->total_size;
27031 else if (need_r11)
27032 ptr_regno = 11;
27033 else if (info->cr_save_p
27034 || info->lr_save_p
27035 || info->first_fp_reg_save < 64
27036 || info->first_gp_reg_save < 32
27037 || info->altivec_size != 0
27038 || info->vrsave_size != 0
27039 || crtl->calls_eh_return)
27040 ptr_regno = 12;
27041 else
27042 {
27043 /* The prologue won't be saving any regs so there is no need
27044 to set up a frame register to access any frame save area.
27045 We also won't be using frame_off anywhere below, but set
27046 the correct value anyway to protect against future
27047 changes to this function. */
27048 frame_off = info->total_size;
27049 }
27050 if (ptr_regno != -1)
27051 {
27052 /* Set up the frame offset to that needed by the first
27053 out-of-line save function. */
27054 START_USE (ptr_regno);
27055 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27056 frame_reg_rtx = ptr_reg;
27057 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27058 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27059 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27060 ptr_off = info->gp_save_offset + info->gp_size;
27061 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27062 ptr_off = info->altivec_save_offset + info->altivec_size;
27063 frame_off = -ptr_off;
27064 }
27065 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27066 ptr_reg, ptr_off);
27067 if (REGNO (frame_reg_rtx) == 12)
27068 sp_adjust = 0;
27069 sp_off = info->total_size;
27070 if (frame_reg_rtx != sp_reg_rtx)
27071 rs6000_emit_stack_tie (frame_reg_rtx, false);
27072 }
27073
27074 /* If we use the link register, get it into r0. */
27075 if (!WORLD_SAVE_P (info) && info->lr_save_p
27076 && !cfun->machine->lr_is_wrapped_separately)
27077 {
27078 rtx addr, reg, mem;
27079
27080 reg = gen_rtx_REG (Pmode, 0);
27081 START_USE (0);
27082 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27083 RTX_FRAME_RELATED_P (insn) = 1;
27084
27085 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27086 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27087 {
27088 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27089 GEN_INT (info->lr_save_offset + frame_off));
27090 mem = gen_rtx_MEM (Pmode, addr);
27091 /* This should not be of rs6000_sr_alias_set, because of
27092 __builtin_return_address. */
27093
27094 insn = emit_move_insn (mem, reg);
27095 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27096 NULL_RTX, NULL_RTX);
27097 END_USE (0);
27098 }
27099 }
27100
27101 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27102 r12 will be needed by out-of-line gpr save. */
27103 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27104 && !(strategy & (SAVE_INLINE_GPRS
27105 | SAVE_NOINLINE_GPRS_SAVES_LR))
27106 ? 11 : 12);
27107 if (!WORLD_SAVE_P (info)
27108 && info->cr_save_p
27109 && REGNO (frame_reg_rtx) != cr_save_regno
27110 && !(using_static_chain_p && cr_save_regno == 11)
27111 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27112 {
27113 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27114 START_USE (cr_save_regno);
27115 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27116 }
27117
27118 /* Do any required saving of fpr's. If only one or two to save, do
27119 it ourselves. Otherwise, call function. */
27120 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27121 {
27122 int offset = info->fp_save_offset + frame_off;
27123 for (int i = info->first_fp_reg_save; i < 64; i++)
27124 {
27125 if (save_reg_p (i)
27126 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27127 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27128 sp_off - frame_off);
27129
27130 offset += fp_reg_size;
27131 }
27132 }
27133 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27134 {
27135 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27136 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27137 unsigned ptr_regno = ptr_regno_for_savres (sel);
27138 rtx ptr_reg = frame_reg_rtx;
27139
27140 if (REGNO (frame_reg_rtx) == ptr_regno)
27141 gcc_checking_assert (frame_off == 0);
27142 else
27143 {
27144 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27145 NOT_INUSE (ptr_regno);
27146 emit_insn (gen_add3_insn (ptr_reg,
27147 frame_reg_rtx, GEN_INT (frame_off)));
27148 }
27149 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27150 info->fp_save_offset,
27151 info->lr_save_offset,
27152 DFmode, sel);
27153 rs6000_frame_related (insn, ptr_reg, sp_off,
27154 NULL_RTX, NULL_RTX);
27155 if (lr)
27156 END_USE (0);
27157 }
27158
27159 /* Save GPRs. This is done as a PARALLEL if we are using
27160 the store-multiple instructions. */
27161 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27162 {
27163 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27164 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27165 unsigned ptr_regno = ptr_regno_for_savres (sel);
27166 rtx ptr_reg = frame_reg_rtx;
27167 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27168 int end_save = info->gp_save_offset + info->gp_size;
27169 int ptr_off;
27170
27171 if (ptr_regno == 12)
27172 sp_adjust = 0;
27173 if (!ptr_set_up)
27174 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27175
27176 /* Need to adjust r11 (r12) if we saved any FPRs. */
27177 if (end_save + frame_off != 0)
27178 {
27179 rtx offset = GEN_INT (end_save + frame_off);
27180
27181 if (ptr_set_up)
27182 frame_off = -end_save;
27183 else
27184 NOT_INUSE (ptr_regno);
27185 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27186 }
27187 else if (!ptr_set_up)
27188 {
27189 NOT_INUSE (ptr_regno);
27190 emit_move_insn (ptr_reg, frame_reg_rtx);
27191 }
27192 ptr_off = -end_save;
27193 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27194 info->gp_save_offset + ptr_off,
27195 info->lr_save_offset + ptr_off,
27196 reg_mode, sel);
27197 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27198 NULL_RTX, NULL_RTX);
27199 if (lr)
27200 END_USE (0);
27201 }
27202 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27203 {
27204 rtvec p;
27205 int i;
27206 p = rtvec_alloc (32 - info->first_gp_reg_save);
27207 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27208 RTVEC_ELT (p, i)
27209 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27210 frame_reg_rtx,
27211 info->gp_save_offset + frame_off + reg_size * i);
27212 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27213 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27214 NULL_RTX, NULL_RTX);
27215 }
27216 else if (!WORLD_SAVE_P (info))
27217 {
27218 int offset = info->gp_save_offset + frame_off;
27219 for (int i = info->first_gp_reg_save; i < 32; i++)
27220 {
27221 if (save_reg_p (i)
27222 && !cfun->machine->gpr_is_wrapped_separately[i])
27223 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27224 sp_off - frame_off);
27225
27226 offset += reg_size;
27227 }
27228 }
27229
27230 if (crtl->calls_eh_return)
27231 {
27232 unsigned int i;
27233 rtvec p;
27234
27235 for (i = 0; ; ++i)
27236 {
27237 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27238 if (regno == INVALID_REGNUM)
27239 break;
27240 }
27241
27242 p = rtvec_alloc (i);
27243
27244 for (i = 0; ; ++i)
27245 {
27246 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27247 if (regno == INVALID_REGNUM)
27248 break;
27249
27250 rtx set
27251 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27252 sp_reg_rtx,
27253 info->ehrd_offset + sp_off + reg_size * (int) i);
27254 RTVEC_ELT (p, i) = set;
27255 RTX_FRAME_RELATED_P (set) = 1;
27256 }
27257
27258 insn = emit_insn (gen_blockage ());
27259 RTX_FRAME_RELATED_P (insn) = 1;
27260 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27261 }
27262
27263 /* In AIX ABI we need to make sure r2 is really saved. */
27264 if (TARGET_AIX && crtl->calls_eh_return)
27265 {
27266 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27267 rtx join_insn, note;
27268 rtx_insn *save_insn;
27269 long toc_restore_insn;
27270
27271 tmp_reg = gen_rtx_REG (Pmode, 11);
27272 tmp_reg_si = gen_rtx_REG (SImode, 11);
27273 if (using_static_chain_p)
27274 {
27275 START_USE (0);
27276 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27277 }
27278 else
27279 START_USE (11);
27280 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27281 /* Peek at instruction to which this function returns. If it's
27282 restoring r2, then we know we've already saved r2. We can't
27283 unconditionally save r2 because the value we have will already
27284 be updated if we arrived at this function via a plt call or
27285 toc adjusting stub. */
27286 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27287 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27288 + RS6000_TOC_SAVE_SLOT);
27289 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27290 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27291 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27292 validate_condition_mode (EQ, CCUNSmode);
27293 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27294 emit_insn (gen_rtx_SET (compare_result,
27295 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27296 toc_save_done = gen_label_rtx ();
27297 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27298 gen_rtx_EQ (VOIDmode, compare_result,
27299 const0_rtx),
27300 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27301 pc_rtx);
27302 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27303 JUMP_LABEL (jump) = toc_save_done;
27304 LABEL_NUSES (toc_save_done) += 1;
27305
27306 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27307 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27308 sp_off - frame_off);
27309
27310 emit_label (toc_save_done);
27311
27312 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27313 have a CFG that has different saves along different paths.
27314 Move the note to a dummy blockage insn, which describes that
27315 R2 is unconditionally saved after the label. */
27316 /* ??? An alternate representation might be a special insn pattern
27317 containing both the branch and the store. That might let the
27318 code that minimizes the number of DW_CFA_advance opcodes better
27319 freedom in placing the annotations. */
27320 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27321 if (note)
27322 remove_note (save_insn, note);
27323 else
27324 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27325 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27326 RTX_FRAME_RELATED_P (save_insn) = 0;
27327
27328 join_insn = emit_insn (gen_blockage ());
27329 REG_NOTES (join_insn) = note;
27330 RTX_FRAME_RELATED_P (join_insn) = 1;
27331
27332 if (using_static_chain_p)
27333 {
27334 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27335 END_USE (0);
27336 }
27337 else
27338 END_USE (11);
27339 }
27340
27341 /* Save CR if we use any that must be preserved. */
27342 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27343 {
27344 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27345 GEN_INT (info->cr_save_offset + frame_off));
27346 rtx mem = gen_frame_mem (SImode, addr);
27347
27348 /* If we didn't copy cr before, do so now using r0. */
27349 if (cr_save_rtx == NULL_RTX)
27350 {
27351 START_USE (0);
27352 cr_save_rtx = gen_rtx_REG (SImode, 0);
27353 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27354 }
27355
27356 /* Saving CR requires a two-instruction sequence: one instruction
27357 to move the CR to a general-purpose register, and a second
27358 instruction that stores the GPR to memory.
27359
27360 We do not emit any DWARF CFI records for the first of these,
27361 because we cannot properly represent the fact that CR is saved in
27362 a register. One reason is that we cannot express that multiple
27363 CR fields are saved; another reason is that on 64-bit, the size
27364 of the CR register in DWARF (4 bytes) differs from the size of
27365 a general-purpose register.
27366
27367 This means if any intervening instruction were to clobber one of
27368 the call-saved CR fields, we'd have incorrect CFI. To prevent
27369 this from happening, we mark the store to memory as a use of
27370 those CR fields, which prevents any such instruction from being
27371 scheduled in between the two instructions. */
27372 rtx crsave_v[9];
27373 int n_crsave = 0;
27374 int i;
27375
27376 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27377 for (i = 0; i < 8; i++)
27378 if (save_reg_p (CR0_REGNO + i))
27379 crsave_v[n_crsave++]
27380 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27381
27382 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27383 gen_rtvec_v (n_crsave, crsave_v)));
27384 END_USE (REGNO (cr_save_rtx));
27385
27386 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27387 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27388 so we need to construct a frame expression manually. */
27389 RTX_FRAME_RELATED_P (insn) = 1;
27390
27391 /* Update address to be stack-pointer relative, like
27392 rs6000_frame_related would do. */
27393 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27394 GEN_INT (info->cr_save_offset + sp_off));
27395 mem = gen_frame_mem (SImode, addr);
27396
27397 if (DEFAULT_ABI == ABI_ELFv2)
27398 {
27399 /* In the ELFv2 ABI we generate separate CFI records for each
27400 CR field that was actually saved. They all point to the
27401 same 32-bit stack slot. */
27402 rtx crframe[8];
27403 int n_crframe = 0;
27404
27405 for (i = 0; i < 8; i++)
27406 if (save_reg_p (CR0_REGNO + i))
27407 {
27408 crframe[n_crframe]
27409 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27410
27411 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27412 n_crframe++;
27413 }
27414
27415 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27416 gen_rtx_PARALLEL (VOIDmode,
27417 gen_rtvec_v (n_crframe, crframe)));
27418 }
27419 else
27420 {
27421 /* In other ABIs, by convention, we use a single CR regnum to
27422 represent the fact that all call-saved CR fields are saved.
27423 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27424 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27425 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27426 }
27427 }
27428
27429 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27430 *separate* slots if the routine calls __builtin_eh_return, so
27431 that they can be independently restored by the unwinder. */
27432 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27433 {
27434 int i, cr_off = info->ehcr_offset;
27435 rtx crsave;
27436
27437 /* ??? We might get better performance by using multiple mfocrf
27438 instructions. */
27439 crsave = gen_rtx_REG (SImode, 0);
27440 emit_insn (gen_prologue_movesi_from_cr (crsave));
27441
27442 for (i = 0; i < 8; i++)
27443 if (!call_used_regs[CR0_REGNO + i])
27444 {
27445 rtvec p = rtvec_alloc (2);
27446 RTVEC_ELT (p, 0)
27447 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27448 RTVEC_ELT (p, 1)
27449 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27450
27451 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27452
27453 RTX_FRAME_RELATED_P (insn) = 1;
27454 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27455 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27456 sp_reg_rtx, cr_off + sp_off));
27457
27458 cr_off += reg_size;
27459 }
27460 }
27461
27462 /* If we are emitting stack probes, but allocate no stack, then
27463 just note that in the dump file. */
27464 if (flag_stack_clash_protection
27465 && dump_file
27466 && !info->push_p)
27467 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27468
27469 /* Update stack and set back pointer unless this is V.4,
27470 for which it was done previously. */
27471 if (!WORLD_SAVE_P (info) && info->push_p
27472 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27473 {
27474 rtx ptr_reg = NULL;
27475 int ptr_off = 0;
27476
27477 /* If saving altivec regs we need to be able to address all save
27478 locations using a 16-bit offset. */
27479 if ((strategy & SAVE_INLINE_VRS) == 0
27480 || (info->altivec_size != 0
27481 && (info->altivec_save_offset + info->altivec_size - 16
27482 + info->total_size - frame_off) > 32767)
27483 || (info->vrsave_size != 0
27484 && (info->vrsave_save_offset
27485 + info->total_size - frame_off) > 32767))
27486 {
27487 int sel = SAVRES_SAVE | SAVRES_VR;
27488 unsigned ptr_regno = ptr_regno_for_savres (sel);
27489
27490 if (using_static_chain_p
27491 && ptr_regno == STATIC_CHAIN_REGNUM)
27492 ptr_regno = 12;
27493 if (REGNO (frame_reg_rtx) != ptr_regno)
27494 START_USE (ptr_regno);
27495 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27496 frame_reg_rtx = ptr_reg;
27497 ptr_off = info->altivec_save_offset + info->altivec_size;
27498 frame_off = -ptr_off;
27499 }
27500 else if (REGNO (frame_reg_rtx) == 1)
27501 frame_off = info->total_size;
27502 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27503 ptr_reg, ptr_off);
27504 if (REGNO (frame_reg_rtx) == 12)
27505 sp_adjust = 0;
27506 sp_off = info->total_size;
27507 if (frame_reg_rtx != sp_reg_rtx)
27508 rs6000_emit_stack_tie (frame_reg_rtx, false);
27509 }
27510
27511 /* Set frame pointer, if needed. */
27512 if (frame_pointer_needed)
27513 {
27514 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27515 sp_reg_rtx);
27516 RTX_FRAME_RELATED_P (insn) = 1;
27517 }
27518
27519 /* Save AltiVec registers if needed. Save here because the red zone does
27520 not always include AltiVec registers. */
27521 if (!WORLD_SAVE_P (info)
27522 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27523 {
27524 int end_save = info->altivec_save_offset + info->altivec_size;
27525 int ptr_off;
27526 /* Oddly, the vector save/restore functions point r0 at the end
27527 of the save area, then use r11 or r12 to load offsets for
27528 [reg+reg] addressing. */
27529 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27530 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27531 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27532
27533 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27534 NOT_INUSE (0);
27535 if (scratch_regno == 12)
27536 sp_adjust = 0;
27537 if (end_save + frame_off != 0)
27538 {
27539 rtx offset = GEN_INT (end_save + frame_off);
27540
27541 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27542 }
27543 else
27544 emit_move_insn (ptr_reg, frame_reg_rtx);
27545
27546 ptr_off = -end_save;
27547 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27548 info->altivec_save_offset + ptr_off,
27549 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27550 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27551 NULL_RTX, NULL_RTX);
27552 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27553 {
27554 /* The oddity mentioned above clobbered our frame reg. */
27555 emit_move_insn (frame_reg_rtx, ptr_reg);
27556 frame_off = ptr_off;
27557 }
27558 }
27559 else if (!WORLD_SAVE_P (info)
27560 && info->altivec_size != 0)
27561 {
27562 int i;
27563
27564 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27565 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27566 {
27567 rtx areg, savereg, mem;
27568 HOST_WIDE_INT offset;
27569
27570 offset = (info->altivec_save_offset + frame_off
27571 + 16 * (i - info->first_altivec_reg_save));
27572
27573 savereg = gen_rtx_REG (V4SImode, i);
27574
27575 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27576 {
27577 mem = gen_frame_mem (V4SImode,
27578 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27579 GEN_INT (offset)));
27580 insn = emit_insn (gen_rtx_SET (mem, savereg));
27581 areg = NULL_RTX;
27582 }
27583 else
27584 {
27585 NOT_INUSE (0);
27586 areg = gen_rtx_REG (Pmode, 0);
27587 emit_move_insn (areg, GEN_INT (offset));
27588
27589 /* AltiVec addressing mode is [reg+reg]. */
27590 mem = gen_frame_mem (V4SImode,
27591 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27592
27593 /* Rather than emitting a generic move, force use of the stvx
27594 instruction, which we always want on ISA 2.07 (power8) systems.
27595 In particular we don't want xxpermdi/stxvd2x for little
27596 endian. */
27597 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27598 }
27599
27600 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27601 areg, GEN_INT (offset));
27602 }
27603 }
27604
27605 /* VRSAVE is a bit vector representing which AltiVec registers
27606 are used. The OS uses this to determine which vector
27607 registers to save on a context switch. We need to save
27608 VRSAVE on the stack frame, add whatever AltiVec registers we
27609 used in this function, and do the corresponding magic in the
27610 epilogue. */
27611
27612 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27613 {
27614 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27615 be using r12 as frame_reg_rtx and r11 as the static chain
27616 pointer for nested functions. */
27617 int save_regno = 12;
27618 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27619 && !using_static_chain_p)
27620 save_regno = 11;
27621 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27622 {
27623 save_regno = 11;
27624 if (using_static_chain_p)
27625 save_regno = 0;
27626 }
27627 NOT_INUSE (save_regno);
27628
27629 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27630 }
27631
27632 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27633 if (!TARGET_SINGLE_PIC_BASE
27634 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27635 && !constant_pool_empty_p ())
27636 || (DEFAULT_ABI == ABI_V4
27637 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27638 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27639 {
27640 /* If emit_load_toc_table will use the link register, we need to save
27641 it. We use R12 for this purpose because emit_load_toc_table
27642 can use register 0. This allows us to use a plain 'blr' to return
27643 from the procedure more often. */
27644 int save_LR_around_toc_setup = (TARGET_ELF
27645 && DEFAULT_ABI == ABI_V4
27646 && flag_pic
27647 && ! info->lr_save_p
27648 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27649 if (save_LR_around_toc_setup)
27650 {
27651 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27652 rtx tmp = gen_rtx_REG (Pmode, 12);
27653
27654 sp_adjust = 0;
27655 insn = emit_move_insn (tmp, lr);
27656 RTX_FRAME_RELATED_P (insn) = 1;
27657
27658 rs6000_emit_load_toc_table (TRUE);
27659
27660 insn = emit_move_insn (lr, tmp);
27661 add_reg_note (insn, REG_CFA_RESTORE, lr);
27662 RTX_FRAME_RELATED_P (insn) = 1;
27663 }
27664 else
27665 rs6000_emit_load_toc_table (TRUE);
27666 }
27667
27668 #if TARGET_MACHO
27669 if (!TARGET_SINGLE_PIC_BASE
27670 && DEFAULT_ABI == ABI_DARWIN
27671 && flag_pic && crtl->uses_pic_offset_table)
27672 {
27673 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27674 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27675
27676 /* Save and restore LR locally around this call (in R0). */
27677 if (!info->lr_save_p)
27678 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27679
27680 emit_insn (gen_load_macho_picbase (src));
27681
27682 emit_move_insn (gen_rtx_REG (Pmode,
27683 RS6000_PIC_OFFSET_TABLE_REGNUM),
27684 lr);
27685
27686 if (!info->lr_save_p)
27687 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27688 }
27689 #endif
27690
27691 /* If we need to, save the TOC register after doing the stack setup.
27692 Do not emit eh frame info for this save. The unwinder wants info,
27693 conceptually attached to instructions in this function, about
27694 register values in the caller of this function. This R2 may have
27695 already been changed from the value in the caller.
27696 We don't attempt to write accurate DWARF EH frame info for R2
27697 because code emitted by gcc for a (non-pointer) function call
27698 doesn't save and restore R2. Instead, R2 is managed out-of-line
27699 by a linker generated plt call stub when the function resides in
27700 a shared library. This behavior is costly to describe in DWARF,
27701 both in terms of the size of DWARF info and the time taken in the
27702 unwinder to interpret it. R2 changes, apart from the
27703 calls_eh_return case earlier in this function, are handled by
27704 linux-unwind.h frob_update_context. */
27705 if (rs6000_save_toc_in_prologue_p ()
27706 && !cfun->machine->toc_is_wrapped_separately)
27707 {
27708 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27709 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27710 }
27711
27712 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27713 if (using_split_stack && split_stack_arg_pointer_used_p ())
27714 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27715 }
27716
27717 /* Output .extern statements for the save/restore routines we use. */
27718
27719 static void
27720 rs6000_output_savres_externs (FILE *file)
27721 {
27722 rs6000_stack_t *info = rs6000_stack_info ();
27723
27724 if (TARGET_DEBUG_STACK)
27725 debug_stack_info (info);
27726
27727 /* Write .extern for any function we will call to save and restore
27728 fp values. */
27729 if (info->first_fp_reg_save < 64
27730 && !TARGET_MACHO
27731 && !TARGET_ELF)
27732 {
27733 char *name;
27734 int regno = info->first_fp_reg_save - 32;
27735
27736 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27737 {
27738 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27739 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27740 name = rs6000_savres_routine_name (regno, sel);
27741 fprintf (file, "\t.extern %s\n", name);
27742 }
27743 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27744 {
27745 bool lr = (info->savres_strategy
27746 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27747 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27748 name = rs6000_savres_routine_name (regno, sel);
27749 fprintf (file, "\t.extern %s\n", name);
27750 }
27751 }
27752 }
27753
27754 /* Write function prologue. */
27755
27756 static void
27757 rs6000_output_function_prologue (FILE *file)
27758 {
27759 if (!cfun->is_thunk)
27760 rs6000_output_savres_externs (file);
27761
27762 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27763 immediately after the global entry point label. */
27764 if (rs6000_global_entry_point_needed_p ())
27765 {
27766 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27767
27768 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27769
27770 if (TARGET_CMODEL != CMODEL_LARGE)
27771 {
27772 /* In the small and medium code models, we assume the TOC is less
27773 2 GB away from the text section, so it can be computed via the
27774 following two-instruction sequence. */
27775 char buf[256];
27776
27777 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27778 fprintf (file, "0:\taddis 2,12,.TOC.-");
27779 assemble_name (file, buf);
27780 fprintf (file, "@ha\n");
27781 fprintf (file, "\taddi 2,2,.TOC.-");
27782 assemble_name (file, buf);
27783 fprintf (file, "@l\n");
27784 }
27785 else
27786 {
27787 /* In the large code model, we allow arbitrary offsets between the
27788 TOC and the text section, so we have to load the offset from
27789 memory. The data field is emitted directly before the global
27790 entry point in rs6000_elf_declare_function_name. */
27791 char buf[256];
27792
27793 #ifdef HAVE_AS_ENTRY_MARKERS
27794 /* If supported by the linker, emit a marker relocation. If the
27795 total code size of the final executable or shared library
27796 happens to fit into 2 GB after all, the linker will replace
27797 this code sequence with the sequence for the small or medium
27798 code model. */
27799 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27800 #endif
27801 fprintf (file, "\tld 2,");
27802 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27803 assemble_name (file, buf);
27804 fprintf (file, "-");
27805 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27806 assemble_name (file, buf);
27807 fprintf (file, "(12)\n");
27808 fprintf (file, "\tadd 2,2,12\n");
27809 }
27810
27811 fputs ("\t.localentry\t", file);
27812 assemble_name (file, name);
27813 fputs (",.-", file);
27814 assemble_name (file, name);
27815 fputs ("\n", file);
27816 }
27817
27818 /* Output -mprofile-kernel code. This needs to be done here instead of
27819 in output_function_profile since it must go after the ELFv2 ABI
27820 local entry point. */
27821 if (TARGET_PROFILE_KERNEL && crtl->profile)
27822 {
27823 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27824 gcc_assert (!TARGET_32BIT);
27825
27826 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27827
27828 /* In the ELFv2 ABI we have no compiler stack word. It must be
27829 the resposibility of _mcount to preserve the static chain
27830 register if required. */
27831 if (DEFAULT_ABI != ABI_ELFv2
27832 && cfun->static_chain_decl != NULL)
27833 {
27834 asm_fprintf (file, "\tstd %s,24(%s)\n",
27835 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27836 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27837 asm_fprintf (file, "\tld %s,24(%s)\n",
27838 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27839 }
27840 else
27841 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27842 }
27843
27844 rs6000_pic_labelno++;
27845 }
27846
27847 /* -mprofile-kernel code calls mcount before the function prolog,
27848 so a profiled leaf function should stay a leaf function. */
27849 static bool
27850 rs6000_keep_leaf_when_profiled ()
27851 {
27852 return TARGET_PROFILE_KERNEL;
27853 }
27854
27855 /* Non-zero if vmx regs are restored before the frame pop, zero if
27856 we restore after the pop when possible. */
27857 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27858
27859 /* Restoring cr is a two step process: loading a reg from the frame
27860 save, then moving the reg to cr. For ABI_V4 we must let the
27861 unwinder know that the stack location is no longer valid at or
27862 before the stack deallocation, but we can't emit a cfa_restore for
27863 cr at the stack deallocation like we do for other registers.
27864 The trouble is that it is possible for the move to cr to be
27865 scheduled after the stack deallocation. So say exactly where cr
27866 is located on each of the two insns. */
27867
27868 static rtx
27869 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27870 {
27871 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27872 rtx reg = gen_rtx_REG (SImode, regno);
27873 rtx_insn *insn = emit_move_insn (reg, mem);
27874
27875 if (!exit_func && DEFAULT_ABI == ABI_V4)
27876 {
27877 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27878 rtx set = gen_rtx_SET (reg, cr);
27879
27880 add_reg_note (insn, REG_CFA_REGISTER, set);
27881 RTX_FRAME_RELATED_P (insn) = 1;
27882 }
27883 return reg;
27884 }
27885
27886 /* Reload CR from REG. */
27887
27888 static void
27889 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27890 {
27891 int count = 0;
27892 int i;
27893
27894 if (using_mfcr_multiple)
27895 {
27896 for (i = 0; i < 8; i++)
27897 if (save_reg_p (CR0_REGNO + i))
27898 count++;
27899 gcc_assert (count);
27900 }
27901
27902 if (using_mfcr_multiple && count > 1)
27903 {
27904 rtx_insn *insn;
27905 rtvec p;
27906 int ndx;
27907
27908 p = rtvec_alloc (count);
27909
27910 ndx = 0;
27911 for (i = 0; i < 8; i++)
27912 if (save_reg_p (CR0_REGNO + i))
27913 {
27914 rtvec r = rtvec_alloc (2);
27915 RTVEC_ELT (r, 0) = reg;
27916 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27917 RTVEC_ELT (p, ndx) =
27918 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27919 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27920 ndx++;
27921 }
27922 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27923 gcc_assert (ndx == count);
27924
27925 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27926 CR field separately. */
27927 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27928 {
27929 for (i = 0; i < 8; i++)
27930 if (save_reg_p (CR0_REGNO + i))
27931 add_reg_note (insn, REG_CFA_RESTORE,
27932 gen_rtx_REG (SImode, CR0_REGNO + i));
27933
27934 RTX_FRAME_RELATED_P (insn) = 1;
27935 }
27936 }
27937 else
27938 for (i = 0; i < 8; i++)
27939 if (save_reg_p (CR0_REGNO + i))
27940 {
27941 rtx insn = emit_insn (gen_movsi_to_cr_one
27942 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27943
27944 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27945 CR field separately, attached to the insn that in fact
27946 restores this particular CR field. */
27947 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27948 {
27949 add_reg_note (insn, REG_CFA_RESTORE,
27950 gen_rtx_REG (SImode, CR0_REGNO + i));
27951
27952 RTX_FRAME_RELATED_P (insn) = 1;
27953 }
27954 }
27955
27956 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27957 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27958 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27959 {
27960 rtx_insn *insn = get_last_insn ();
27961 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27962
27963 add_reg_note (insn, REG_CFA_RESTORE, cr);
27964 RTX_FRAME_RELATED_P (insn) = 1;
27965 }
27966 }
27967
27968 /* Like cr, the move to lr instruction can be scheduled after the
27969 stack deallocation, but unlike cr, its stack frame save is still
27970 valid. So we only need to emit the cfa_restore on the correct
27971 instruction. */
27972
27973 static void
27974 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27975 {
27976 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27977 rtx reg = gen_rtx_REG (Pmode, regno);
27978
27979 emit_move_insn (reg, mem);
27980 }
27981
27982 static void
27983 restore_saved_lr (int regno, bool exit_func)
27984 {
27985 rtx reg = gen_rtx_REG (Pmode, regno);
27986 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27987 rtx_insn *insn = emit_move_insn (lr, reg);
27988
27989 if (!exit_func && flag_shrink_wrap)
27990 {
27991 add_reg_note (insn, REG_CFA_RESTORE, lr);
27992 RTX_FRAME_RELATED_P (insn) = 1;
27993 }
27994 }
27995
27996 static rtx
27997 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27998 {
27999 if (DEFAULT_ABI == ABI_ELFv2)
28000 {
28001 int i;
28002 for (i = 0; i < 8; i++)
28003 if (save_reg_p (CR0_REGNO + i))
28004 {
28005 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28006 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28007 cfa_restores);
28008 }
28009 }
28010 else if (info->cr_save_p)
28011 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28012 gen_rtx_REG (SImode, CR2_REGNO),
28013 cfa_restores);
28014
28015 if (info->lr_save_p)
28016 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28017 gen_rtx_REG (Pmode, LR_REGNO),
28018 cfa_restores);
28019 return cfa_restores;
28020 }
28021
28022 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28023 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28024 below stack pointer not cloberred by signals. */
28025
28026 static inline bool
28027 offset_below_red_zone_p (HOST_WIDE_INT offset)
28028 {
28029 return offset < (DEFAULT_ABI == ABI_V4
28030 ? 0
28031 : TARGET_32BIT ? -220 : -288);
28032 }
28033
28034 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28035
28036 static void
28037 emit_cfa_restores (rtx cfa_restores)
28038 {
28039 rtx_insn *insn = get_last_insn ();
28040 rtx *loc = &REG_NOTES (insn);
28041
28042 while (*loc)
28043 loc = &XEXP (*loc, 1);
28044 *loc = cfa_restores;
28045 RTX_FRAME_RELATED_P (insn) = 1;
28046 }
28047
28048 /* Emit function epilogue as insns. */
28049
28050 void
28051 rs6000_emit_epilogue (int sibcall)
28052 {
28053 rs6000_stack_t *info;
28054 int restoring_GPRs_inline;
28055 int restoring_FPRs_inline;
28056 int using_load_multiple;
28057 int using_mtcr_multiple;
28058 int use_backchain_to_restore_sp;
28059 int restore_lr;
28060 int strategy;
28061 HOST_WIDE_INT frame_off = 0;
28062 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28063 rtx frame_reg_rtx = sp_reg_rtx;
28064 rtx cfa_restores = NULL_RTX;
28065 rtx insn;
28066 rtx cr_save_reg = NULL_RTX;
28067 machine_mode reg_mode = Pmode;
28068 int reg_size = TARGET_32BIT ? 4 : 8;
28069 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
28070 int fp_reg_size = 8;
28071 int i;
28072 bool exit_func;
28073 unsigned ptr_regno;
28074
28075 info = rs6000_stack_info ();
28076
28077 strategy = info->savres_strategy;
28078 using_load_multiple = strategy & REST_MULTIPLE;
28079 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28080 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28081 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28082 || rs6000_tune == PROCESSOR_PPC603
28083 || rs6000_tune == PROCESSOR_PPC750
28084 || optimize_size);
28085 /* Restore via the backchain when we have a large frame, since this
28086 is more efficient than an addis, addi pair. The second condition
28087 here will not trigger at the moment; We don't actually need a
28088 frame pointer for alloca, but the generic parts of the compiler
28089 give us one anyway. */
28090 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28091 ? info->lr_save_offset
28092 : 0) > 32767
28093 || (cfun->calls_alloca
28094 && !frame_pointer_needed));
28095 restore_lr = (info->lr_save_p
28096 && (restoring_FPRs_inline
28097 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28098 && (restoring_GPRs_inline
28099 || info->first_fp_reg_save < 64)
28100 && !cfun->machine->lr_is_wrapped_separately);
28101
28102
28103 if (WORLD_SAVE_P (info))
28104 {
28105 int i, j;
28106 char rname[30];
28107 const char *alloc_rname;
28108 rtvec p;
28109
28110 /* eh_rest_world_r10 will return to the location saved in the LR
28111 stack slot (which is not likely to be our caller.)
28112 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28113 rest_world is similar, except any R10 parameter is ignored.
28114 The exception-handling stuff that was here in 2.95 is no
28115 longer necessary. */
28116
28117 p = rtvec_alloc (9
28118 + 32 - info->first_gp_reg_save
28119 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28120 + 63 + 1 - info->first_fp_reg_save);
28121
28122 strcpy (rname, ((crtl->calls_eh_return) ?
28123 "*eh_rest_world_r10" : "*rest_world"));
28124 alloc_rname = ggc_strdup (rname);
28125
28126 j = 0;
28127 RTVEC_ELT (p, j++) = ret_rtx;
28128 RTVEC_ELT (p, j++)
28129 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28130 /* The instruction pattern requires a clobber here;
28131 it is shared with the restVEC helper. */
28132 RTVEC_ELT (p, j++)
28133 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
28134
28135 {
28136 /* CR register traditionally saved as CR2. */
28137 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28138 RTVEC_ELT (p, j++)
28139 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28140 if (flag_shrink_wrap)
28141 {
28142 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28143 gen_rtx_REG (Pmode, LR_REGNO),
28144 cfa_restores);
28145 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28146 }
28147 }
28148
28149 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28150 {
28151 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28152 RTVEC_ELT (p, j++)
28153 = gen_frame_load (reg,
28154 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28155 if (flag_shrink_wrap
28156 && save_reg_p (info->first_gp_reg_save + i))
28157 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28158 }
28159 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28160 {
28161 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28162 RTVEC_ELT (p, j++)
28163 = gen_frame_load (reg,
28164 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28165 if (flag_shrink_wrap
28166 && save_reg_p (info->first_altivec_reg_save + i))
28167 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28168 }
28169 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28170 {
28171 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
28172 info->first_fp_reg_save + i);
28173 RTVEC_ELT (p, j++)
28174 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28175 if (flag_shrink_wrap
28176 && save_reg_p (info->first_fp_reg_save + i))
28177 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28178 }
28179 RTVEC_ELT (p, j++)
28180 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28181 RTVEC_ELT (p, j++)
28182 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28183 RTVEC_ELT (p, j++)
28184 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28185 RTVEC_ELT (p, j++)
28186 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28187 RTVEC_ELT (p, j++)
28188 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28189 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28190
28191 if (flag_shrink_wrap)
28192 {
28193 REG_NOTES (insn) = cfa_restores;
28194 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28195 RTX_FRAME_RELATED_P (insn) = 1;
28196 }
28197 return;
28198 }
28199
28200 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28201 if (info->push_p)
28202 frame_off = info->total_size;
28203
28204 /* Restore AltiVec registers if we must do so before adjusting the
28205 stack. */
28206 if (info->altivec_size != 0
28207 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28208 || (DEFAULT_ABI != ABI_V4
28209 && offset_below_red_zone_p (info->altivec_save_offset))))
28210 {
28211 int i;
28212 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28213
28214 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28215 if (use_backchain_to_restore_sp)
28216 {
28217 int frame_regno = 11;
28218
28219 if ((strategy & REST_INLINE_VRS) == 0)
28220 {
28221 /* Of r11 and r12, select the one not clobbered by an
28222 out-of-line restore function for the frame register. */
28223 frame_regno = 11 + 12 - scratch_regno;
28224 }
28225 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28226 emit_move_insn (frame_reg_rtx,
28227 gen_rtx_MEM (Pmode, sp_reg_rtx));
28228 frame_off = 0;
28229 }
28230 else if (frame_pointer_needed)
28231 frame_reg_rtx = hard_frame_pointer_rtx;
28232
28233 if ((strategy & REST_INLINE_VRS) == 0)
28234 {
28235 int end_save = info->altivec_save_offset + info->altivec_size;
28236 int ptr_off;
28237 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28238 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28239
28240 if (end_save + frame_off != 0)
28241 {
28242 rtx offset = GEN_INT (end_save + frame_off);
28243
28244 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28245 }
28246 else
28247 emit_move_insn (ptr_reg, frame_reg_rtx);
28248
28249 ptr_off = -end_save;
28250 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28251 info->altivec_save_offset + ptr_off,
28252 0, V4SImode, SAVRES_VR);
28253 }
28254 else
28255 {
28256 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28257 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28258 {
28259 rtx addr, areg, mem, insn;
28260 rtx reg = gen_rtx_REG (V4SImode, i);
28261 HOST_WIDE_INT offset
28262 = (info->altivec_save_offset + frame_off
28263 + 16 * (i - info->first_altivec_reg_save));
28264
28265 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28266 {
28267 mem = gen_frame_mem (V4SImode,
28268 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28269 GEN_INT (offset)));
28270 insn = gen_rtx_SET (reg, mem);
28271 }
28272 else
28273 {
28274 areg = gen_rtx_REG (Pmode, 0);
28275 emit_move_insn (areg, GEN_INT (offset));
28276
28277 /* AltiVec addressing mode is [reg+reg]. */
28278 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28279 mem = gen_frame_mem (V4SImode, addr);
28280
28281 /* Rather than emitting a generic move, force use of the
28282 lvx instruction, which we always want. In particular we
28283 don't want lxvd2x/xxpermdi for little endian. */
28284 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28285 }
28286
28287 (void) emit_insn (insn);
28288 }
28289 }
28290
28291 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28292 if (((strategy & REST_INLINE_VRS) == 0
28293 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28294 && (flag_shrink_wrap
28295 || (offset_below_red_zone_p
28296 (info->altivec_save_offset
28297 + 16 * (i - info->first_altivec_reg_save))))
28298 && save_reg_p (i))
28299 {
28300 rtx reg = gen_rtx_REG (V4SImode, i);
28301 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28302 }
28303 }
28304
28305 /* Restore VRSAVE if we must do so before adjusting the stack. */
28306 if (info->vrsave_size != 0
28307 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28308 || (DEFAULT_ABI != ABI_V4
28309 && offset_below_red_zone_p (info->vrsave_save_offset))))
28310 {
28311 rtx reg;
28312
28313 if (frame_reg_rtx == sp_reg_rtx)
28314 {
28315 if (use_backchain_to_restore_sp)
28316 {
28317 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28318 emit_move_insn (frame_reg_rtx,
28319 gen_rtx_MEM (Pmode, sp_reg_rtx));
28320 frame_off = 0;
28321 }
28322 else if (frame_pointer_needed)
28323 frame_reg_rtx = hard_frame_pointer_rtx;
28324 }
28325
28326 reg = gen_rtx_REG (SImode, 12);
28327 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28328 info->vrsave_save_offset + frame_off));
28329
28330 emit_insn (generate_set_vrsave (reg, info, 1));
28331 }
28332
28333 insn = NULL_RTX;
28334 /* If we have a large stack frame, restore the old stack pointer
28335 using the backchain. */
28336 if (use_backchain_to_restore_sp)
28337 {
28338 if (frame_reg_rtx == sp_reg_rtx)
28339 {
28340 /* Under V.4, don't reset the stack pointer until after we're done
28341 loading the saved registers. */
28342 if (DEFAULT_ABI == ABI_V4)
28343 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28344
28345 insn = emit_move_insn (frame_reg_rtx,
28346 gen_rtx_MEM (Pmode, sp_reg_rtx));
28347 frame_off = 0;
28348 }
28349 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28350 && DEFAULT_ABI == ABI_V4)
28351 /* frame_reg_rtx has been set up by the altivec restore. */
28352 ;
28353 else
28354 {
28355 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28356 frame_reg_rtx = sp_reg_rtx;
28357 }
28358 }
28359 /* If we have a frame pointer, we can restore the old stack pointer
28360 from it. */
28361 else if (frame_pointer_needed)
28362 {
28363 frame_reg_rtx = sp_reg_rtx;
28364 if (DEFAULT_ABI == ABI_V4)
28365 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28366 /* Prevent reordering memory accesses against stack pointer restore. */
28367 else if (cfun->calls_alloca
28368 || offset_below_red_zone_p (-info->total_size))
28369 rs6000_emit_stack_tie (frame_reg_rtx, true);
28370
28371 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28372 GEN_INT (info->total_size)));
28373 frame_off = 0;
28374 }
28375 else if (info->push_p
28376 && DEFAULT_ABI != ABI_V4
28377 && !crtl->calls_eh_return)
28378 {
28379 /* Prevent reordering memory accesses against stack pointer restore. */
28380 if (cfun->calls_alloca
28381 || offset_below_red_zone_p (-info->total_size))
28382 rs6000_emit_stack_tie (frame_reg_rtx, false);
28383 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28384 GEN_INT (info->total_size)));
28385 frame_off = 0;
28386 }
28387 if (insn && frame_reg_rtx == sp_reg_rtx)
28388 {
28389 if (cfa_restores)
28390 {
28391 REG_NOTES (insn) = cfa_restores;
28392 cfa_restores = NULL_RTX;
28393 }
28394 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28395 RTX_FRAME_RELATED_P (insn) = 1;
28396 }
28397
28398 /* Restore AltiVec registers if we have not done so already. */
28399 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28400 && info->altivec_size != 0
28401 && (DEFAULT_ABI == ABI_V4
28402 || !offset_below_red_zone_p (info->altivec_save_offset)))
28403 {
28404 int i;
28405
28406 if ((strategy & REST_INLINE_VRS) == 0)
28407 {
28408 int end_save = info->altivec_save_offset + info->altivec_size;
28409 int ptr_off;
28410 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28411 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28412 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28413
28414 if (end_save + frame_off != 0)
28415 {
28416 rtx offset = GEN_INT (end_save + frame_off);
28417
28418 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28419 }
28420 else
28421 emit_move_insn (ptr_reg, frame_reg_rtx);
28422
28423 ptr_off = -end_save;
28424 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28425 info->altivec_save_offset + ptr_off,
28426 0, V4SImode, SAVRES_VR);
28427 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28428 {
28429 /* Frame reg was clobbered by out-of-line save. Restore it
28430 from ptr_reg, and if we are calling out-of-line gpr or
28431 fpr restore set up the correct pointer and offset. */
28432 unsigned newptr_regno = 1;
28433 if (!restoring_GPRs_inline)
28434 {
28435 bool lr = info->gp_save_offset + info->gp_size == 0;
28436 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28437 newptr_regno = ptr_regno_for_savres (sel);
28438 end_save = info->gp_save_offset + info->gp_size;
28439 }
28440 else if (!restoring_FPRs_inline)
28441 {
28442 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28443 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28444 newptr_regno = ptr_regno_for_savres (sel);
28445 end_save = info->fp_save_offset + info->fp_size;
28446 }
28447
28448 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28449 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28450
28451 if (end_save + ptr_off != 0)
28452 {
28453 rtx offset = GEN_INT (end_save + ptr_off);
28454
28455 frame_off = -end_save;
28456 if (TARGET_32BIT)
28457 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28458 ptr_reg, offset));
28459 else
28460 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28461 ptr_reg, offset));
28462 }
28463 else
28464 {
28465 frame_off = ptr_off;
28466 emit_move_insn (frame_reg_rtx, ptr_reg);
28467 }
28468 }
28469 }
28470 else
28471 {
28472 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28473 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28474 {
28475 rtx addr, areg, mem, insn;
28476 rtx reg = gen_rtx_REG (V4SImode, i);
28477 HOST_WIDE_INT offset
28478 = (info->altivec_save_offset + frame_off
28479 + 16 * (i - info->first_altivec_reg_save));
28480
28481 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28482 {
28483 mem = gen_frame_mem (V4SImode,
28484 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28485 GEN_INT (offset)));
28486 insn = gen_rtx_SET (reg, mem);
28487 }
28488 else
28489 {
28490 areg = gen_rtx_REG (Pmode, 0);
28491 emit_move_insn (areg, GEN_INT (offset));
28492
28493 /* AltiVec addressing mode is [reg+reg]. */
28494 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28495 mem = gen_frame_mem (V4SImode, addr);
28496
28497 /* Rather than emitting a generic move, force use of the
28498 lvx instruction, which we always want. In particular we
28499 don't want lxvd2x/xxpermdi for little endian. */
28500 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28501 }
28502
28503 (void) emit_insn (insn);
28504 }
28505 }
28506
28507 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28508 if (((strategy & REST_INLINE_VRS) == 0
28509 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28510 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28511 && save_reg_p (i))
28512 {
28513 rtx reg = gen_rtx_REG (V4SImode, i);
28514 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28515 }
28516 }
28517
28518 /* Restore VRSAVE if we have not done so already. */
28519 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28520 && info->vrsave_size != 0
28521 && (DEFAULT_ABI == ABI_V4
28522 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28523 {
28524 rtx reg;
28525
28526 reg = gen_rtx_REG (SImode, 12);
28527 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28528 info->vrsave_save_offset + frame_off));
28529
28530 emit_insn (generate_set_vrsave (reg, info, 1));
28531 }
28532
28533 /* If we exit by an out-of-line restore function on ABI_V4 then that
28534 function will deallocate the stack, so we don't need to worry
28535 about the unwinder restoring cr from an invalid stack frame
28536 location. */
28537 exit_func = (!restoring_FPRs_inline
28538 || (!restoring_GPRs_inline
28539 && info->first_fp_reg_save == 64));
28540
28541 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28542 *separate* slots if the routine calls __builtin_eh_return, so
28543 that they can be independently restored by the unwinder. */
28544 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28545 {
28546 int i, cr_off = info->ehcr_offset;
28547
28548 for (i = 0; i < 8; i++)
28549 if (!call_used_regs[CR0_REGNO + i])
28550 {
28551 rtx reg = gen_rtx_REG (SImode, 0);
28552 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28553 cr_off + frame_off));
28554
28555 insn = emit_insn (gen_movsi_to_cr_one
28556 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28557
28558 if (!exit_func && flag_shrink_wrap)
28559 {
28560 add_reg_note (insn, REG_CFA_RESTORE,
28561 gen_rtx_REG (SImode, CR0_REGNO + i));
28562
28563 RTX_FRAME_RELATED_P (insn) = 1;
28564 }
28565
28566 cr_off += reg_size;
28567 }
28568 }
28569
28570 /* Get the old lr if we saved it. If we are restoring registers
28571 out-of-line, then the out-of-line routines can do this for us. */
28572 if (restore_lr && restoring_GPRs_inline)
28573 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28574
28575 /* Get the old cr if we saved it. */
28576 if (info->cr_save_p)
28577 {
28578 unsigned cr_save_regno = 12;
28579
28580 if (!restoring_GPRs_inline)
28581 {
28582 /* Ensure we don't use the register used by the out-of-line
28583 gpr register restore below. */
28584 bool lr = info->gp_save_offset + info->gp_size == 0;
28585 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28586 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28587
28588 if (gpr_ptr_regno == 12)
28589 cr_save_regno = 11;
28590 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28591 }
28592 else if (REGNO (frame_reg_rtx) == 12)
28593 cr_save_regno = 11;
28594
28595 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28596 info->cr_save_offset + frame_off,
28597 exit_func);
28598 }
28599
28600 /* Set LR here to try to overlap restores below. */
28601 if (restore_lr && restoring_GPRs_inline)
28602 restore_saved_lr (0, exit_func);
28603
28604 /* Load exception handler data registers, if needed. */
28605 if (crtl->calls_eh_return)
28606 {
28607 unsigned int i, regno;
28608
28609 if (TARGET_AIX)
28610 {
28611 rtx reg = gen_rtx_REG (reg_mode, 2);
28612 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28613 frame_off + RS6000_TOC_SAVE_SLOT));
28614 }
28615
28616 for (i = 0; ; ++i)
28617 {
28618 rtx mem;
28619
28620 regno = EH_RETURN_DATA_REGNO (i);
28621 if (regno == INVALID_REGNUM)
28622 break;
28623
28624 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28625 info->ehrd_offset + frame_off
28626 + reg_size * (int) i);
28627
28628 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28629 }
28630 }
28631
28632 /* Restore GPRs. This is done as a PARALLEL if we are using
28633 the load-multiple instructions. */
28634 if (!restoring_GPRs_inline)
28635 {
28636 /* We are jumping to an out-of-line function. */
28637 rtx ptr_reg;
28638 int end_save = info->gp_save_offset + info->gp_size;
28639 bool can_use_exit = end_save == 0;
28640 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28641 int ptr_off;
28642
28643 /* Emit stack reset code if we need it. */
28644 ptr_regno = ptr_regno_for_savres (sel);
28645 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28646 if (can_use_exit)
28647 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28648 else if (end_save + frame_off != 0)
28649 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28650 GEN_INT (end_save + frame_off)));
28651 else if (REGNO (frame_reg_rtx) != ptr_regno)
28652 emit_move_insn (ptr_reg, frame_reg_rtx);
28653 if (REGNO (frame_reg_rtx) == ptr_regno)
28654 frame_off = -end_save;
28655
28656 if (can_use_exit && info->cr_save_p)
28657 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28658
28659 ptr_off = -end_save;
28660 rs6000_emit_savres_rtx (info, ptr_reg,
28661 info->gp_save_offset + ptr_off,
28662 info->lr_save_offset + ptr_off,
28663 reg_mode, sel);
28664 }
28665 else if (using_load_multiple)
28666 {
28667 rtvec p;
28668 p = rtvec_alloc (32 - info->first_gp_reg_save);
28669 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28670 RTVEC_ELT (p, i)
28671 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28672 frame_reg_rtx,
28673 info->gp_save_offset + frame_off + reg_size * i);
28674 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28675 }
28676 else
28677 {
28678 int offset = info->gp_save_offset + frame_off;
28679 for (i = info->first_gp_reg_save; i < 32; i++)
28680 {
28681 if (save_reg_p (i)
28682 && !cfun->machine->gpr_is_wrapped_separately[i])
28683 {
28684 rtx reg = gen_rtx_REG (reg_mode, i);
28685 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28686 }
28687
28688 offset += reg_size;
28689 }
28690 }
28691
28692 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28693 {
28694 /* If the frame pointer was used then we can't delay emitting
28695 a REG_CFA_DEF_CFA note. This must happen on the insn that
28696 restores the frame pointer, r31. We may have already emitted
28697 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28698 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28699 be harmless if emitted. */
28700 if (frame_pointer_needed)
28701 {
28702 insn = get_last_insn ();
28703 add_reg_note (insn, REG_CFA_DEF_CFA,
28704 plus_constant (Pmode, frame_reg_rtx, frame_off));
28705 RTX_FRAME_RELATED_P (insn) = 1;
28706 }
28707
28708 /* Set up cfa_restores. We always need these when
28709 shrink-wrapping. If not shrink-wrapping then we only need
28710 the cfa_restore when the stack location is no longer valid.
28711 The cfa_restores must be emitted on or before the insn that
28712 invalidates the stack, and of course must not be emitted
28713 before the insn that actually does the restore. The latter
28714 is why it is a bad idea to emit the cfa_restores as a group
28715 on the last instruction here that actually does a restore:
28716 That insn may be reordered with respect to others doing
28717 restores. */
28718 if (flag_shrink_wrap
28719 && !restoring_GPRs_inline
28720 && info->first_fp_reg_save == 64)
28721 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28722
28723 for (i = info->first_gp_reg_save; i < 32; i++)
28724 if (save_reg_p (i)
28725 && !cfun->machine->gpr_is_wrapped_separately[i])
28726 {
28727 rtx reg = gen_rtx_REG (reg_mode, i);
28728 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28729 }
28730 }
28731
28732 if (!restoring_GPRs_inline
28733 && info->first_fp_reg_save == 64)
28734 {
28735 /* We are jumping to an out-of-line function. */
28736 if (cfa_restores)
28737 emit_cfa_restores (cfa_restores);
28738 return;
28739 }
28740
28741 if (restore_lr && !restoring_GPRs_inline)
28742 {
28743 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28744 restore_saved_lr (0, exit_func);
28745 }
28746
28747 /* Restore fpr's if we need to do it without calling a function. */
28748 if (restoring_FPRs_inline)
28749 {
28750 int offset = info->fp_save_offset + frame_off;
28751 for (i = info->first_fp_reg_save; i < 64; i++)
28752 {
28753 if (save_reg_p (i)
28754 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28755 {
28756 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28757 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28758 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28759 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28760 cfa_restores);
28761 }
28762
28763 offset += fp_reg_size;
28764 }
28765 }
28766
28767 /* If we saved cr, restore it here. Just those that were used. */
28768 if (info->cr_save_p)
28769 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28770
28771 /* If this is V.4, unwind the stack pointer after all of the loads
28772 have been done, or set up r11 if we are restoring fp out of line. */
28773 ptr_regno = 1;
28774 if (!restoring_FPRs_inline)
28775 {
28776 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28777 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28778 ptr_regno = ptr_regno_for_savres (sel);
28779 }
28780
28781 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28782 if (REGNO (frame_reg_rtx) == ptr_regno)
28783 frame_off = 0;
28784
28785 if (insn && restoring_FPRs_inline)
28786 {
28787 if (cfa_restores)
28788 {
28789 REG_NOTES (insn) = cfa_restores;
28790 cfa_restores = NULL_RTX;
28791 }
28792 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28793 RTX_FRAME_RELATED_P (insn) = 1;
28794 }
28795
28796 if (crtl->calls_eh_return)
28797 {
28798 rtx sa = EH_RETURN_STACKADJ_RTX;
28799 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28800 }
28801
28802 if (!sibcall && restoring_FPRs_inline)
28803 {
28804 if (cfa_restores)
28805 {
28806 /* We can't hang the cfa_restores off a simple return,
28807 since the shrink-wrap code sometimes uses an existing
28808 return. This means there might be a path from
28809 pre-prologue code to this return, and dwarf2cfi code
28810 wants the eh_frame unwinder state to be the same on
28811 all paths to any point. So we need to emit the
28812 cfa_restores before the return. For -m64 we really
28813 don't need epilogue cfa_restores at all, except for
28814 this irritating dwarf2cfi with shrink-wrap
28815 requirement; The stack red-zone means eh_frame info
28816 from the prologue telling the unwinder to restore
28817 from the stack is perfectly good right to the end of
28818 the function. */
28819 emit_insn (gen_blockage ());
28820 emit_cfa_restores (cfa_restores);
28821 cfa_restores = NULL_RTX;
28822 }
28823
28824 emit_jump_insn (targetm.gen_simple_return ());
28825 }
28826
28827 if (!sibcall && !restoring_FPRs_inline)
28828 {
28829 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28830 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28831 int elt = 0;
28832 RTVEC_ELT (p, elt++) = ret_rtx;
28833 if (lr)
28834 RTVEC_ELT (p, elt++)
28835 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28836
28837 /* We have to restore more than two FP registers, so branch to the
28838 restore function. It will return to our caller. */
28839 int i;
28840 int reg;
28841 rtx sym;
28842
28843 if (flag_shrink_wrap)
28844 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28845
28846 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28847 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28848 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28849 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28850
28851 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28852 {
28853 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28854
28855 RTVEC_ELT (p, elt++)
28856 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28857 if (flag_shrink_wrap
28858 && save_reg_p (info->first_fp_reg_save + i))
28859 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28860 }
28861
28862 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28863 }
28864
28865 if (cfa_restores)
28866 {
28867 if (sibcall)
28868 /* Ensure the cfa_restores are hung off an insn that won't
28869 be reordered above other restores. */
28870 emit_insn (gen_blockage ());
28871
28872 emit_cfa_restores (cfa_restores);
28873 }
28874 }
28875
28876 /* Write function epilogue. */
28877
28878 static void
28879 rs6000_output_function_epilogue (FILE *file)
28880 {
28881 #if TARGET_MACHO
28882 macho_branch_islands ();
28883
28884 {
28885 rtx_insn *insn = get_last_insn ();
28886 rtx_insn *deleted_debug_label = NULL;
28887
28888 /* Mach-O doesn't support labels at the end of objects, so if
28889 it looks like we might want one, take special action.
28890
28891 First, collect any sequence of deleted debug labels. */
28892 while (insn
28893 && NOTE_P (insn)
28894 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28895 {
28896 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28897 notes only, instead set their CODE_LABEL_NUMBER to -1,
28898 otherwise there would be code generation differences
28899 in between -g and -g0. */
28900 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28901 deleted_debug_label = insn;
28902 insn = PREV_INSN (insn);
28903 }
28904
28905 /* Second, if we have:
28906 label:
28907 barrier
28908 then this needs to be detected, so skip past the barrier. */
28909
28910 if (insn && BARRIER_P (insn))
28911 insn = PREV_INSN (insn);
28912
28913 /* Up to now we've only seen notes or barriers. */
28914 if (insn)
28915 {
28916 if (LABEL_P (insn)
28917 || (NOTE_P (insn)
28918 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28919 /* Trailing label: <barrier>. */
28920 fputs ("\tnop\n", file);
28921 else
28922 {
28923 /* Lastly, see if we have a completely empty function body. */
28924 while (insn && ! INSN_P (insn))
28925 insn = PREV_INSN (insn);
28926 /* If we don't find any insns, we've got an empty function body;
28927 I.e. completely empty - without a return or branch. This is
28928 taken as the case where a function body has been removed
28929 because it contains an inline __builtin_unreachable(). GCC
28930 states that reaching __builtin_unreachable() means UB so we're
28931 not obliged to do anything special; however, we want
28932 non-zero-sized function bodies. To meet this, and help the
28933 user out, let's trap the case. */
28934 if (insn == NULL)
28935 fputs ("\ttrap\n", file);
28936 }
28937 }
28938 else if (deleted_debug_label)
28939 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28940 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28941 CODE_LABEL_NUMBER (insn) = -1;
28942 }
28943 #endif
28944
28945 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28946 on its format.
28947
28948 We don't output a traceback table if -finhibit-size-directive was
28949 used. The documentation for -finhibit-size-directive reads
28950 ``don't output a @code{.size} assembler directive, or anything
28951 else that would cause trouble if the function is split in the
28952 middle, and the two halves are placed at locations far apart in
28953 memory.'' The traceback table has this property, since it
28954 includes the offset from the start of the function to the
28955 traceback table itself.
28956
28957 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28958 different traceback table. */
28959 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28960 && ! flag_inhibit_size_directive
28961 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28962 {
28963 const char *fname = NULL;
28964 const char *language_string = lang_hooks.name;
28965 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28966 int i;
28967 int optional_tbtab;
28968 rs6000_stack_t *info = rs6000_stack_info ();
28969
28970 if (rs6000_traceback == traceback_full)
28971 optional_tbtab = 1;
28972 else if (rs6000_traceback == traceback_part)
28973 optional_tbtab = 0;
28974 else
28975 optional_tbtab = !optimize_size && !TARGET_ELF;
28976
28977 if (optional_tbtab)
28978 {
28979 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28980 while (*fname == '.') /* V.4 encodes . in the name */
28981 fname++;
28982
28983 /* Need label immediately before tbtab, so we can compute
28984 its offset from the function start. */
28985 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28986 ASM_OUTPUT_LABEL (file, fname);
28987 }
28988
28989 /* The .tbtab pseudo-op can only be used for the first eight
28990 expressions, since it can't handle the possibly variable
28991 length fields that follow. However, if you omit the optional
28992 fields, the assembler outputs zeros for all optional fields
28993 anyways, giving each variable length field is minimum length
28994 (as defined in sys/debug.h). Thus we can not use the .tbtab
28995 pseudo-op at all. */
28996
28997 /* An all-zero word flags the start of the tbtab, for debuggers
28998 that have to find it by searching forward from the entry
28999 point or from the current pc. */
29000 fputs ("\t.long 0\n", file);
29001
29002 /* Tbtab format type. Use format type 0. */
29003 fputs ("\t.byte 0,", file);
29004
29005 /* Language type. Unfortunately, there does not seem to be any
29006 official way to discover the language being compiled, so we
29007 use language_string.
29008 C is 0. Fortran is 1. Ada is 3. C++ is 9.
29009 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29010 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
29011 numbers either, so for now use 0. */
29012 if (lang_GNU_C ()
29013 || ! strcmp (language_string, "GNU GIMPLE")
29014 || ! strcmp (language_string, "GNU Go")
29015 || ! strcmp (language_string, "GNU D")
29016 || ! strcmp (language_string, "libgccjit"))
29017 i = 0;
29018 else if (! strcmp (language_string, "GNU F77")
29019 || lang_GNU_Fortran ())
29020 i = 1;
29021 else if (! strcmp (language_string, "GNU Ada"))
29022 i = 3;
29023 else if (lang_GNU_CXX ()
29024 || ! strcmp (language_string, "GNU Objective-C++"))
29025 i = 9;
29026 else if (! strcmp (language_string, "GNU Java"))
29027 i = 13;
29028 else if (! strcmp (language_string, "GNU Objective-C"))
29029 i = 14;
29030 else
29031 gcc_unreachable ();
29032 fprintf (file, "%d,", i);
29033
29034 /* 8 single bit fields: global linkage (not set for C extern linkage,
29035 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29036 from start of procedure stored in tbtab, internal function, function
29037 has controlled storage, function has no toc, function uses fp,
29038 function logs/aborts fp operations. */
29039 /* Assume that fp operations are used if any fp reg must be saved. */
29040 fprintf (file, "%d,",
29041 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29042
29043 /* 6 bitfields: function is interrupt handler, name present in
29044 proc table, function calls alloca, on condition directives
29045 (controls stack walks, 3 bits), saves condition reg, saves
29046 link reg. */
29047 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29048 set up as a frame pointer, even when there is no alloca call. */
29049 fprintf (file, "%d,",
29050 ((optional_tbtab << 6)
29051 | ((optional_tbtab & frame_pointer_needed) << 5)
29052 | (info->cr_save_p << 1)
29053 | (info->lr_save_p)));
29054
29055 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29056 (6 bits). */
29057 fprintf (file, "%d,",
29058 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29059
29060 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29061 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29062
29063 if (optional_tbtab)
29064 {
29065 /* Compute the parameter info from the function decl argument
29066 list. */
29067 tree decl;
29068 int next_parm_info_bit = 31;
29069
29070 for (decl = DECL_ARGUMENTS (current_function_decl);
29071 decl; decl = DECL_CHAIN (decl))
29072 {
29073 rtx parameter = DECL_INCOMING_RTL (decl);
29074 machine_mode mode = GET_MODE (parameter);
29075
29076 if (GET_CODE (parameter) == REG)
29077 {
29078 if (SCALAR_FLOAT_MODE_P (mode))
29079 {
29080 int bits;
29081
29082 float_parms++;
29083
29084 switch (mode)
29085 {
29086 case E_SFmode:
29087 case E_SDmode:
29088 bits = 0x2;
29089 break;
29090
29091 case E_DFmode:
29092 case E_DDmode:
29093 case E_TFmode:
29094 case E_TDmode:
29095 case E_IFmode:
29096 case E_KFmode:
29097 bits = 0x3;
29098 break;
29099
29100 default:
29101 gcc_unreachable ();
29102 }
29103
29104 /* If only one bit will fit, don't or in this entry. */
29105 if (next_parm_info_bit > 0)
29106 parm_info |= (bits << (next_parm_info_bit - 1));
29107 next_parm_info_bit -= 2;
29108 }
29109 else
29110 {
29111 fixed_parms += ((GET_MODE_SIZE (mode)
29112 + (UNITS_PER_WORD - 1))
29113 / UNITS_PER_WORD);
29114 next_parm_info_bit -= 1;
29115 }
29116 }
29117 }
29118 }
29119
29120 /* Number of fixed point parameters. */
29121 /* This is actually the number of words of fixed point parameters; thus
29122 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29123 fprintf (file, "%d,", fixed_parms);
29124
29125 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29126 all on stack. */
29127 /* This is actually the number of fp registers that hold parameters;
29128 and thus the maximum value is 13. */
29129 /* Set parameters on stack bit if parameters are not in their original
29130 registers, regardless of whether they are on the stack? Xlc
29131 seems to set the bit when not optimizing. */
29132 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29133
29134 if (optional_tbtab)
29135 {
29136 /* Optional fields follow. Some are variable length. */
29137
29138 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29139 float, 11 double float. */
29140 /* There is an entry for each parameter in a register, in the order
29141 that they occur in the parameter list. Any intervening arguments
29142 on the stack are ignored. If the list overflows a long (max
29143 possible length 34 bits) then completely leave off all elements
29144 that don't fit. */
29145 /* Only emit this long if there was at least one parameter. */
29146 if (fixed_parms || float_parms)
29147 fprintf (file, "\t.long %d\n", parm_info);
29148
29149 /* Offset from start of code to tb table. */
29150 fputs ("\t.long ", file);
29151 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29152 RS6000_OUTPUT_BASENAME (file, fname);
29153 putc ('-', file);
29154 rs6000_output_function_entry (file, fname);
29155 putc ('\n', file);
29156
29157 /* Interrupt handler mask. */
29158 /* Omit this long, since we never set the interrupt handler bit
29159 above. */
29160
29161 /* Number of CTL (controlled storage) anchors. */
29162 /* Omit this long, since the has_ctl bit is never set above. */
29163
29164 /* Displacement into stack of each CTL anchor. */
29165 /* Omit this list of longs, because there are no CTL anchors. */
29166
29167 /* Length of function name. */
29168 if (*fname == '*')
29169 ++fname;
29170 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29171
29172 /* Function name. */
29173 assemble_string (fname, strlen (fname));
29174
29175 /* Register for alloca automatic storage; this is always reg 31.
29176 Only emit this if the alloca bit was set above. */
29177 if (frame_pointer_needed)
29178 fputs ("\t.byte 31\n", file);
29179
29180 fputs ("\t.align 2\n", file);
29181 }
29182 }
29183
29184 /* Arrange to define .LCTOC1 label, if not already done. */
29185 if (need_toc_init)
29186 {
29187 need_toc_init = 0;
29188 if (!toc_initialized)
29189 {
29190 switch_to_section (toc_section);
29191 switch_to_section (current_function_section ());
29192 }
29193 }
29194 }
29195
29196 /* -fsplit-stack support. */
29197
29198 /* A SYMBOL_REF for __morestack. */
29199 static GTY(()) rtx morestack_ref;
29200
29201 static rtx
29202 gen_add3_const (rtx rt, rtx ra, long c)
29203 {
29204 if (TARGET_64BIT)
29205 return gen_adddi3 (rt, ra, GEN_INT (c));
29206 else
29207 return gen_addsi3 (rt, ra, GEN_INT (c));
29208 }
29209
29210 /* Emit -fsplit-stack prologue, which goes before the regular function
29211 prologue (at local entry point in the case of ELFv2). */
29212
29213 void
29214 rs6000_expand_split_stack_prologue (void)
29215 {
29216 rs6000_stack_t *info = rs6000_stack_info ();
29217 unsigned HOST_WIDE_INT allocate;
29218 long alloc_hi, alloc_lo;
29219 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29220 rtx_insn *insn;
29221
29222 gcc_assert (flag_split_stack && reload_completed);
29223
29224 if (!info->push_p)
29225 return;
29226
29227 if (global_regs[29])
29228 {
29229 error ("%qs uses register r29", "-fsplit-stack");
29230 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29231 "conflicts with %qD", global_regs_decl[29]);
29232 }
29233
29234 allocate = info->total_size;
29235 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29236 {
29237 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29238 return;
29239 }
29240 if (morestack_ref == NULL_RTX)
29241 {
29242 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29243 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29244 | SYMBOL_FLAG_FUNCTION);
29245 }
29246
29247 r0 = gen_rtx_REG (Pmode, 0);
29248 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29249 r12 = gen_rtx_REG (Pmode, 12);
29250 emit_insn (gen_load_split_stack_limit (r0));
29251 /* Always emit two insns here to calculate the requested stack,
29252 so that the linker can edit them when adjusting size for calling
29253 non-split-stack code. */
29254 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29255 alloc_lo = -allocate - alloc_hi;
29256 if (alloc_hi != 0)
29257 {
29258 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29259 if (alloc_lo != 0)
29260 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29261 else
29262 emit_insn (gen_nop ());
29263 }
29264 else
29265 {
29266 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29267 emit_insn (gen_nop ());
29268 }
29269
29270 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29271 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29272 ok_label = gen_label_rtx ();
29273 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29274 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29275 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29276 pc_rtx);
29277 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29278 JUMP_LABEL (insn) = ok_label;
29279 /* Mark the jump as very likely to be taken. */
29280 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29281
29282 lr = gen_rtx_REG (Pmode, LR_REGNO);
29283 insn = emit_move_insn (r0, lr);
29284 RTX_FRAME_RELATED_P (insn) = 1;
29285 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29286 RTX_FRAME_RELATED_P (insn) = 1;
29287
29288 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29289 const0_rtx, const0_rtx));
29290 call_fusage = NULL_RTX;
29291 use_reg (&call_fusage, r12);
29292 /* Say the call uses r0, even though it doesn't, to stop regrename
29293 from twiddling with the insns saving lr, trashing args for cfun.
29294 The insns restoring lr are similarly protected by making
29295 split_stack_return use r0. */
29296 use_reg (&call_fusage, r0);
29297 add_function_usage_to (insn, call_fusage);
29298 /* Indicate that this function can't jump to non-local gotos. */
29299 make_reg_eh_region_note_nothrow_nononlocal (insn);
29300 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29301 insn = emit_move_insn (lr, r0);
29302 add_reg_note (insn, REG_CFA_RESTORE, lr);
29303 RTX_FRAME_RELATED_P (insn) = 1;
29304 emit_insn (gen_split_stack_return ());
29305
29306 emit_label (ok_label);
29307 LABEL_NUSES (ok_label) = 1;
29308 }
29309
29310 /* Return the internal arg pointer used for function incoming
29311 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29312 to copy it to a pseudo in order for it to be preserved over calls
29313 and suchlike. We'd really like to use a pseudo here for the
29314 internal arg pointer but data-flow analysis is not prepared to
29315 accept pseudos as live at the beginning of a function. */
29316
29317 static rtx
29318 rs6000_internal_arg_pointer (void)
29319 {
29320 if (flag_split_stack
29321 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29322 == NULL))
29323
29324 {
29325 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29326 {
29327 rtx pat;
29328
29329 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29330 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29331
29332 /* Put the pseudo initialization right after the note at the
29333 beginning of the function. */
29334 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29335 gen_rtx_REG (Pmode, 12));
29336 push_topmost_sequence ();
29337 emit_insn_after (pat, get_insns ());
29338 pop_topmost_sequence ();
29339 }
29340 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29341 FIRST_PARM_OFFSET (current_function_decl));
29342 return copy_to_reg (ret);
29343 }
29344 return virtual_incoming_args_rtx;
29345 }
29346
29347 /* We may have to tell the dataflow pass that the split stack prologue
29348 is initializing a register. */
29349
29350 static void
29351 rs6000_live_on_entry (bitmap regs)
29352 {
29353 if (flag_split_stack)
29354 bitmap_set_bit (regs, 12);
29355 }
29356
29357 /* Emit -fsplit-stack dynamic stack allocation space check. */
29358
29359 void
29360 rs6000_split_stack_space_check (rtx size, rtx label)
29361 {
29362 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29363 rtx limit = gen_reg_rtx (Pmode);
29364 rtx requested = gen_reg_rtx (Pmode);
29365 rtx cmp = gen_reg_rtx (CCUNSmode);
29366 rtx jump;
29367
29368 emit_insn (gen_load_split_stack_limit (limit));
29369 if (CONST_INT_P (size))
29370 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29371 else
29372 {
29373 size = force_reg (Pmode, size);
29374 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29375 }
29376 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29377 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29378 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29379 gen_rtx_LABEL_REF (VOIDmode, label),
29380 pc_rtx);
29381 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29382 JUMP_LABEL (jump) = label;
29383 }
29384 \f
29385 /* A C compound statement that outputs the assembler code for a thunk
29386 function, used to implement C++ virtual function calls with
29387 multiple inheritance. The thunk acts as a wrapper around a virtual
29388 function, adjusting the implicit object parameter before handing
29389 control off to the real function.
29390
29391 First, emit code to add the integer DELTA to the location that
29392 contains the incoming first argument. Assume that this argument
29393 contains a pointer, and is the one used to pass the `this' pointer
29394 in C++. This is the incoming argument *before* the function
29395 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29396 values of all other incoming arguments.
29397
29398 After the addition, emit code to jump to FUNCTION, which is a
29399 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29400 not touch the return address. Hence returning from FUNCTION will
29401 return to whoever called the current `thunk'.
29402
29403 The effect must be as if FUNCTION had been called directly with the
29404 adjusted first argument. This macro is responsible for emitting
29405 all of the code for a thunk function; output_function_prologue()
29406 and output_function_epilogue() are not invoked.
29407
29408 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29409 been extracted from it.) It might possibly be useful on some
29410 targets, but probably not.
29411
29412 If you do not define this macro, the target-independent code in the
29413 C++ frontend will generate a less efficient heavyweight thunk that
29414 calls FUNCTION instead of jumping to it. The generic approach does
29415 not support varargs. */
29416
29417 static void
29418 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29419 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29420 tree function)
29421 {
29422 rtx this_rtx, funexp;
29423 rtx_insn *insn;
29424
29425 reload_completed = 1;
29426 epilogue_completed = 1;
29427
29428 /* Mark the end of the (empty) prologue. */
29429 emit_note (NOTE_INSN_PROLOGUE_END);
29430
29431 /* Find the "this" pointer. If the function returns a structure,
29432 the structure return pointer is in r3. */
29433 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29434 this_rtx = gen_rtx_REG (Pmode, 4);
29435 else
29436 this_rtx = gen_rtx_REG (Pmode, 3);
29437
29438 /* Apply the constant offset, if required. */
29439 if (delta)
29440 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29441
29442 /* Apply the offset from the vtable, if required. */
29443 if (vcall_offset)
29444 {
29445 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29446 rtx tmp = gen_rtx_REG (Pmode, 12);
29447
29448 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29449 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29450 {
29451 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29452 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29453 }
29454 else
29455 {
29456 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29457
29458 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29459 }
29460 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29461 }
29462
29463 /* Generate a tail call to the target function. */
29464 if (!TREE_USED (function))
29465 {
29466 assemble_external (function);
29467 TREE_USED (function) = 1;
29468 }
29469 funexp = XEXP (DECL_RTL (function), 0);
29470 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29471
29472 #if TARGET_MACHO
29473 if (MACHOPIC_INDIRECT)
29474 funexp = machopic_indirect_call_target (funexp);
29475 #endif
29476
29477 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29478 generate sibcall RTL explicitly. */
29479 insn = emit_call_insn (
29480 gen_rtx_PARALLEL (VOIDmode,
29481 gen_rtvec (3,
29482 gen_rtx_CALL (VOIDmode,
29483 funexp, const0_rtx),
29484 gen_rtx_USE (VOIDmode, const0_rtx),
29485 simple_return_rtx)));
29486 SIBLING_CALL_P (insn) = 1;
29487 emit_barrier ();
29488
29489 /* Run just enough of rest_of_compilation to get the insns emitted.
29490 There's not really enough bulk here to make other passes such as
29491 instruction scheduling worth while. Note that use_thunk calls
29492 assemble_start_function and assemble_end_function. */
29493 insn = get_insns ();
29494 shorten_branches (insn);
29495 final_start_function (insn, file, 1);
29496 final (insn, file, 1);
29497 final_end_function ();
29498
29499 reload_completed = 0;
29500 epilogue_completed = 0;
29501 }
29502 \f
29503 /* A quick summary of the various types of 'constant-pool tables'
29504 under PowerPC:
29505
29506 Target Flags Name One table per
29507 AIX (none) AIX TOC object file
29508 AIX -mfull-toc AIX TOC object file
29509 AIX -mminimal-toc AIX minimal TOC translation unit
29510 SVR4/EABI (none) SVR4 SDATA object file
29511 SVR4/EABI -fpic SVR4 pic object file
29512 SVR4/EABI -fPIC SVR4 PIC translation unit
29513 SVR4/EABI -mrelocatable EABI TOC function
29514 SVR4/EABI -maix AIX TOC object file
29515 SVR4/EABI -maix -mminimal-toc
29516 AIX minimal TOC translation unit
29517
29518 Name Reg. Set by entries contains:
29519 made by addrs? fp? sum?
29520
29521 AIX TOC 2 crt0 as Y option option
29522 AIX minimal TOC 30 prolog gcc Y Y option
29523 SVR4 SDATA 13 crt0 gcc N Y N
29524 SVR4 pic 30 prolog ld Y not yet N
29525 SVR4 PIC 30 prolog gcc Y option option
29526 EABI TOC 30 prolog gcc Y option option
29527
29528 */
29529
29530 /* Hash functions for the hash table. */
29531
29532 static unsigned
29533 rs6000_hash_constant (rtx k)
29534 {
29535 enum rtx_code code = GET_CODE (k);
29536 machine_mode mode = GET_MODE (k);
29537 unsigned result = (code << 3) ^ mode;
29538 const char *format;
29539 int flen, fidx;
29540
29541 format = GET_RTX_FORMAT (code);
29542 flen = strlen (format);
29543 fidx = 0;
29544
29545 switch (code)
29546 {
29547 case LABEL_REF:
29548 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29549
29550 case CONST_WIDE_INT:
29551 {
29552 int i;
29553 flen = CONST_WIDE_INT_NUNITS (k);
29554 for (i = 0; i < flen; i++)
29555 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29556 return result;
29557 }
29558
29559 case CONST_DOUBLE:
29560 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29561
29562 case CODE_LABEL:
29563 fidx = 3;
29564 break;
29565
29566 default:
29567 break;
29568 }
29569
29570 for (; fidx < flen; fidx++)
29571 switch (format[fidx])
29572 {
29573 case 's':
29574 {
29575 unsigned i, len;
29576 const char *str = XSTR (k, fidx);
29577 len = strlen (str);
29578 result = result * 613 + len;
29579 for (i = 0; i < len; i++)
29580 result = result * 613 + (unsigned) str[i];
29581 break;
29582 }
29583 case 'u':
29584 case 'e':
29585 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29586 break;
29587 case 'i':
29588 case 'n':
29589 result = result * 613 + (unsigned) XINT (k, fidx);
29590 break;
29591 case 'w':
29592 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29593 result = result * 613 + (unsigned) XWINT (k, fidx);
29594 else
29595 {
29596 size_t i;
29597 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29598 result = result * 613 + (unsigned) (XWINT (k, fidx)
29599 >> CHAR_BIT * i);
29600 }
29601 break;
29602 case '0':
29603 break;
29604 default:
29605 gcc_unreachable ();
29606 }
29607
29608 return result;
29609 }
29610
29611 hashval_t
29612 toc_hasher::hash (toc_hash_struct *thc)
29613 {
29614 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29615 }
29616
29617 /* Compare H1 and H2 for equivalence. */
29618
29619 bool
29620 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29621 {
29622 rtx r1 = h1->key;
29623 rtx r2 = h2->key;
29624
29625 if (h1->key_mode != h2->key_mode)
29626 return 0;
29627
29628 return rtx_equal_p (r1, r2);
29629 }
29630
29631 /* These are the names given by the C++ front-end to vtables, and
29632 vtable-like objects. Ideally, this logic should not be here;
29633 instead, there should be some programmatic way of inquiring as
29634 to whether or not an object is a vtable. */
29635
29636 #define VTABLE_NAME_P(NAME) \
29637 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29638 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29639 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29640 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29641 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29642
29643 #ifdef NO_DOLLAR_IN_LABEL
29644 /* Return a GGC-allocated character string translating dollar signs in
29645 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29646
29647 const char *
29648 rs6000_xcoff_strip_dollar (const char *name)
29649 {
29650 char *strip, *p;
29651 const char *q;
29652 size_t len;
29653
29654 q = (const char *) strchr (name, '$');
29655
29656 if (q == 0 || q == name)
29657 return name;
29658
29659 len = strlen (name);
29660 strip = XALLOCAVEC (char, len + 1);
29661 strcpy (strip, name);
29662 p = strip + (q - name);
29663 while (p)
29664 {
29665 *p = '_';
29666 p = strchr (p + 1, '$');
29667 }
29668
29669 return ggc_alloc_string (strip, len);
29670 }
29671 #endif
29672
29673 void
29674 rs6000_output_symbol_ref (FILE *file, rtx x)
29675 {
29676 const char *name = XSTR (x, 0);
29677
29678 /* Currently C++ toc references to vtables can be emitted before it
29679 is decided whether the vtable is public or private. If this is
29680 the case, then the linker will eventually complain that there is
29681 a reference to an unknown section. Thus, for vtables only,
29682 we emit the TOC reference to reference the identifier and not the
29683 symbol. */
29684 if (VTABLE_NAME_P (name))
29685 {
29686 RS6000_OUTPUT_BASENAME (file, name);
29687 }
29688 else
29689 assemble_name (file, name);
29690 }
29691
29692 /* Output a TOC entry. We derive the entry name from what is being
29693 written. */
29694
29695 void
29696 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29697 {
29698 char buf[256];
29699 const char *name = buf;
29700 rtx base = x;
29701 HOST_WIDE_INT offset = 0;
29702
29703 gcc_assert (!TARGET_NO_TOC);
29704
29705 /* When the linker won't eliminate them, don't output duplicate
29706 TOC entries (this happens on AIX if there is any kind of TOC,
29707 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29708 CODE_LABELs. */
29709 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29710 {
29711 struct toc_hash_struct *h;
29712
29713 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29714 time because GGC is not initialized at that point. */
29715 if (toc_hash_table == NULL)
29716 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29717
29718 h = ggc_alloc<toc_hash_struct> ();
29719 h->key = x;
29720 h->key_mode = mode;
29721 h->labelno = labelno;
29722
29723 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29724 if (*found == NULL)
29725 *found = h;
29726 else /* This is indeed a duplicate.
29727 Set this label equal to that label. */
29728 {
29729 fputs ("\t.set ", file);
29730 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29731 fprintf (file, "%d,", labelno);
29732 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29733 fprintf (file, "%d\n", ((*found)->labelno));
29734
29735 #ifdef HAVE_AS_TLS
29736 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29737 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29738 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29739 {
29740 fputs ("\t.set ", file);
29741 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29742 fprintf (file, "%d,", labelno);
29743 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29744 fprintf (file, "%d\n", ((*found)->labelno));
29745 }
29746 #endif
29747 return;
29748 }
29749 }
29750
29751 /* If we're going to put a double constant in the TOC, make sure it's
29752 aligned properly when strict alignment is on. */
29753 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29754 && STRICT_ALIGNMENT
29755 && GET_MODE_BITSIZE (mode) >= 64
29756 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29757 ASM_OUTPUT_ALIGN (file, 3);
29758 }
29759
29760 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29761
29762 /* Handle FP constants specially. Note that if we have a minimal
29763 TOC, things we put here aren't actually in the TOC, so we can allow
29764 FP constants. */
29765 if (CONST_DOUBLE_P (x)
29766 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29767 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29768 {
29769 long k[4];
29770
29771 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29772 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29773 else
29774 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29775
29776 if (TARGET_64BIT)
29777 {
29778 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29779 fputs (DOUBLE_INT_ASM_OP, file);
29780 else
29781 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29782 k[0] & 0xffffffff, k[1] & 0xffffffff,
29783 k[2] & 0xffffffff, k[3] & 0xffffffff);
29784 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29785 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29786 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29787 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29788 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29789 return;
29790 }
29791 else
29792 {
29793 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29794 fputs ("\t.long ", file);
29795 else
29796 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29797 k[0] & 0xffffffff, k[1] & 0xffffffff,
29798 k[2] & 0xffffffff, k[3] & 0xffffffff);
29799 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29800 k[0] & 0xffffffff, k[1] & 0xffffffff,
29801 k[2] & 0xffffffff, k[3] & 0xffffffff);
29802 return;
29803 }
29804 }
29805 else if (CONST_DOUBLE_P (x)
29806 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29807 {
29808 long k[2];
29809
29810 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29811 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29812 else
29813 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29814
29815 if (TARGET_64BIT)
29816 {
29817 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29818 fputs (DOUBLE_INT_ASM_OP, file);
29819 else
29820 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29821 k[0] & 0xffffffff, k[1] & 0xffffffff);
29822 fprintf (file, "0x%lx%08lx\n",
29823 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29824 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29825 return;
29826 }
29827 else
29828 {
29829 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29830 fputs ("\t.long ", file);
29831 else
29832 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29833 k[0] & 0xffffffff, k[1] & 0xffffffff);
29834 fprintf (file, "0x%lx,0x%lx\n",
29835 k[0] & 0xffffffff, k[1] & 0xffffffff);
29836 return;
29837 }
29838 }
29839 else if (CONST_DOUBLE_P (x)
29840 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29841 {
29842 long l;
29843
29844 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29845 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29846 else
29847 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29848
29849 if (TARGET_64BIT)
29850 {
29851 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29852 fputs (DOUBLE_INT_ASM_OP, file);
29853 else
29854 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29855 if (WORDS_BIG_ENDIAN)
29856 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29857 else
29858 fprintf (file, "0x%lx\n", l & 0xffffffff);
29859 return;
29860 }
29861 else
29862 {
29863 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29864 fputs ("\t.long ", file);
29865 else
29866 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29867 fprintf (file, "0x%lx\n", l & 0xffffffff);
29868 return;
29869 }
29870 }
29871 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29872 {
29873 unsigned HOST_WIDE_INT low;
29874 HOST_WIDE_INT high;
29875
29876 low = INTVAL (x) & 0xffffffff;
29877 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29878
29879 /* TOC entries are always Pmode-sized, so when big-endian
29880 smaller integer constants in the TOC need to be padded.
29881 (This is still a win over putting the constants in
29882 a separate constant pool, because then we'd have
29883 to have both a TOC entry _and_ the actual constant.)
29884
29885 For a 32-bit target, CONST_INT values are loaded and shifted
29886 entirely within `low' and can be stored in one TOC entry. */
29887
29888 /* It would be easy to make this work, but it doesn't now. */
29889 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29890
29891 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29892 {
29893 low |= high << 32;
29894 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29895 high = (HOST_WIDE_INT) low >> 32;
29896 low &= 0xffffffff;
29897 }
29898
29899 if (TARGET_64BIT)
29900 {
29901 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29902 fputs (DOUBLE_INT_ASM_OP, file);
29903 else
29904 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29905 (long) high & 0xffffffff, (long) low & 0xffffffff);
29906 fprintf (file, "0x%lx%08lx\n",
29907 (long) high & 0xffffffff, (long) low & 0xffffffff);
29908 return;
29909 }
29910 else
29911 {
29912 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29913 {
29914 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29915 fputs ("\t.long ", file);
29916 else
29917 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29918 (long) high & 0xffffffff, (long) low & 0xffffffff);
29919 fprintf (file, "0x%lx,0x%lx\n",
29920 (long) high & 0xffffffff, (long) low & 0xffffffff);
29921 }
29922 else
29923 {
29924 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29925 fputs ("\t.long ", file);
29926 else
29927 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29928 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29929 }
29930 return;
29931 }
29932 }
29933
29934 if (GET_CODE (x) == CONST)
29935 {
29936 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29937 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29938
29939 base = XEXP (XEXP (x, 0), 0);
29940 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29941 }
29942
29943 switch (GET_CODE (base))
29944 {
29945 case SYMBOL_REF:
29946 name = XSTR (base, 0);
29947 break;
29948
29949 case LABEL_REF:
29950 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29951 CODE_LABEL_NUMBER (XEXP (base, 0)));
29952 break;
29953
29954 case CODE_LABEL:
29955 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29956 break;
29957
29958 default:
29959 gcc_unreachable ();
29960 }
29961
29962 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29963 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29964 else
29965 {
29966 fputs ("\t.tc ", file);
29967 RS6000_OUTPUT_BASENAME (file, name);
29968
29969 if (offset < 0)
29970 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29971 else if (offset)
29972 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29973
29974 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29975 after other TOC symbols, reducing overflow of small TOC access
29976 to [TC] symbols. */
29977 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29978 ? "[TE]," : "[TC],", file);
29979 }
29980
29981 /* Currently C++ toc references to vtables can be emitted before it
29982 is decided whether the vtable is public or private. If this is
29983 the case, then the linker will eventually complain that there is
29984 a TOC reference to an unknown section. Thus, for vtables only,
29985 we emit the TOC reference to reference the symbol and not the
29986 section. */
29987 if (VTABLE_NAME_P (name))
29988 {
29989 RS6000_OUTPUT_BASENAME (file, name);
29990 if (offset < 0)
29991 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29992 else if (offset > 0)
29993 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29994 }
29995 else
29996 output_addr_const (file, x);
29997
29998 #if HAVE_AS_TLS
29999 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
30000 {
30001 switch (SYMBOL_REF_TLS_MODEL (base))
30002 {
30003 case 0:
30004 break;
30005 case TLS_MODEL_LOCAL_EXEC:
30006 fputs ("@le", file);
30007 break;
30008 case TLS_MODEL_INITIAL_EXEC:
30009 fputs ("@ie", file);
30010 break;
30011 /* Use global-dynamic for local-dynamic. */
30012 case TLS_MODEL_GLOBAL_DYNAMIC:
30013 case TLS_MODEL_LOCAL_DYNAMIC:
30014 putc ('\n', file);
30015 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30016 fputs ("\t.tc .", file);
30017 RS6000_OUTPUT_BASENAME (file, name);
30018 fputs ("[TC],", file);
30019 output_addr_const (file, x);
30020 fputs ("@m", file);
30021 break;
30022 default:
30023 gcc_unreachable ();
30024 }
30025 }
30026 #endif
30027
30028 putc ('\n', file);
30029 }
30030 \f
30031 /* Output an assembler pseudo-op to write an ASCII string of N characters
30032 starting at P to FILE.
30033
30034 On the RS/6000, we have to do this using the .byte operation and
30035 write out special characters outside the quoted string.
30036 Also, the assembler is broken; very long strings are truncated,
30037 so we must artificially break them up early. */
30038
30039 void
30040 output_ascii (FILE *file, const char *p, int n)
30041 {
30042 char c;
30043 int i, count_string;
30044 const char *for_string = "\t.byte \"";
30045 const char *for_decimal = "\t.byte ";
30046 const char *to_close = NULL;
30047
30048 count_string = 0;
30049 for (i = 0; i < n; i++)
30050 {
30051 c = *p++;
30052 if (c >= ' ' && c < 0177)
30053 {
30054 if (for_string)
30055 fputs (for_string, file);
30056 putc (c, file);
30057
30058 /* Write two quotes to get one. */
30059 if (c == '"')
30060 {
30061 putc (c, file);
30062 ++count_string;
30063 }
30064
30065 for_string = NULL;
30066 for_decimal = "\"\n\t.byte ";
30067 to_close = "\"\n";
30068 ++count_string;
30069
30070 if (count_string >= 512)
30071 {
30072 fputs (to_close, file);
30073
30074 for_string = "\t.byte \"";
30075 for_decimal = "\t.byte ";
30076 to_close = NULL;
30077 count_string = 0;
30078 }
30079 }
30080 else
30081 {
30082 if (for_decimal)
30083 fputs (for_decimal, file);
30084 fprintf (file, "%d", c);
30085
30086 for_string = "\n\t.byte \"";
30087 for_decimal = ", ";
30088 to_close = "\n";
30089 count_string = 0;
30090 }
30091 }
30092
30093 /* Now close the string if we have written one. Then end the line. */
30094 if (to_close)
30095 fputs (to_close, file);
30096 }
30097 \f
30098 /* Generate a unique section name for FILENAME for a section type
30099 represented by SECTION_DESC. Output goes into BUF.
30100
30101 SECTION_DESC can be any string, as long as it is different for each
30102 possible section type.
30103
30104 We name the section in the same manner as xlc. The name begins with an
30105 underscore followed by the filename (after stripping any leading directory
30106 names) with the last period replaced by the string SECTION_DESC. If
30107 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30108 the name. */
30109
30110 void
30111 rs6000_gen_section_name (char **buf, const char *filename,
30112 const char *section_desc)
30113 {
30114 const char *q, *after_last_slash, *last_period = 0;
30115 char *p;
30116 int len;
30117
30118 after_last_slash = filename;
30119 for (q = filename; *q; q++)
30120 {
30121 if (*q == '/')
30122 after_last_slash = q + 1;
30123 else if (*q == '.')
30124 last_period = q;
30125 }
30126
30127 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30128 *buf = (char *) xmalloc (len);
30129
30130 p = *buf;
30131 *p++ = '_';
30132
30133 for (q = after_last_slash; *q; q++)
30134 {
30135 if (q == last_period)
30136 {
30137 strcpy (p, section_desc);
30138 p += strlen (section_desc);
30139 break;
30140 }
30141
30142 else if (ISALNUM (*q))
30143 *p++ = *q;
30144 }
30145
30146 if (last_period == 0)
30147 strcpy (p, section_desc);
30148 else
30149 *p = '\0';
30150 }
30151 \f
30152 /* Emit profile function. */
30153
30154 void
30155 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30156 {
30157 /* Non-standard profiling for kernels, which just saves LR then calls
30158 _mcount without worrying about arg saves. The idea is to change
30159 the function prologue as little as possible as it isn't easy to
30160 account for arg save/restore code added just for _mcount. */
30161 if (TARGET_PROFILE_KERNEL)
30162 return;
30163
30164 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30165 {
30166 #ifndef NO_PROFILE_COUNTERS
30167 # define NO_PROFILE_COUNTERS 0
30168 #endif
30169 if (NO_PROFILE_COUNTERS)
30170 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30171 LCT_NORMAL, VOIDmode);
30172 else
30173 {
30174 char buf[30];
30175 const char *label_name;
30176 rtx fun;
30177
30178 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30179 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30180 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30181
30182 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30183 LCT_NORMAL, VOIDmode, fun, Pmode);
30184 }
30185 }
30186 else if (DEFAULT_ABI == ABI_DARWIN)
30187 {
30188 const char *mcount_name = RS6000_MCOUNT;
30189 int caller_addr_regno = LR_REGNO;
30190
30191 /* Be conservative and always set this, at least for now. */
30192 crtl->uses_pic_offset_table = 1;
30193
30194 #if TARGET_MACHO
30195 /* For PIC code, set up a stub and collect the caller's address
30196 from r0, which is where the prologue puts it. */
30197 if (MACHOPIC_INDIRECT
30198 && crtl->uses_pic_offset_table)
30199 caller_addr_regno = 0;
30200 #endif
30201 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30202 LCT_NORMAL, VOIDmode,
30203 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30204 }
30205 }
30206
30207 /* Write function profiler code. */
30208
30209 void
30210 output_function_profiler (FILE *file, int labelno)
30211 {
30212 char buf[100];
30213
30214 switch (DEFAULT_ABI)
30215 {
30216 default:
30217 gcc_unreachable ();
30218
30219 case ABI_V4:
30220 if (!TARGET_32BIT)
30221 {
30222 warning (0, "no profiling of 64-bit code for this ABI");
30223 return;
30224 }
30225 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30226 fprintf (file, "\tmflr %s\n", reg_names[0]);
30227 if (NO_PROFILE_COUNTERS)
30228 {
30229 asm_fprintf (file, "\tstw %s,4(%s)\n",
30230 reg_names[0], reg_names[1]);
30231 }
30232 else if (TARGET_SECURE_PLT && flag_pic)
30233 {
30234 if (TARGET_LINK_STACK)
30235 {
30236 char name[32];
30237 get_ppc476_thunk_name (name);
30238 asm_fprintf (file, "\tbl %s\n", name);
30239 }
30240 else
30241 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30242 asm_fprintf (file, "\tstw %s,4(%s)\n",
30243 reg_names[0], reg_names[1]);
30244 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30245 asm_fprintf (file, "\taddis %s,%s,",
30246 reg_names[12], reg_names[12]);
30247 assemble_name (file, buf);
30248 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30249 assemble_name (file, buf);
30250 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30251 }
30252 else if (flag_pic == 1)
30253 {
30254 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30255 asm_fprintf (file, "\tstw %s,4(%s)\n",
30256 reg_names[0], reg_names[1]);
30257 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30258 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30259 assemble_name (file, buf);
30260 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30261 }
30262 else if (flag_pic > 1)
30263 {
30264 asm_fprintf (file, "\tstw %s,4(%s)\n",
30265 reg_names[0], reg_names[1]);
30266 /* Now, we need to get the address of the label. */
30267 if (TARGET_LINK_STACK)
30268 {
30269 char name[32];
30270 get_ppc476_thunk_name (name);
30271 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30272 assemble_name (file, buf);
30273 fputs ("-.\n1:", file);
30274 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30275 asm_fprintf (file, "\taddi %s,%s,4\n",
30276 reg_names[11], reg_names[11]);
30277 }
30278 else
30279 {
30280 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30281 assemble_name (file, buf);
30282 fputs ("-.\n1:", file);
30283 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30284 }
30285 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30286 reg_names[0], reg_names[11]);
30287 asm_fprintf (file, "\tadd %s,%s,%s\n",
30288 reg_names[0], reg_names[0], reg_names[11]);
30289 }
30290 else
30291 {
30292 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30293 assemble_name (file, buf);
30294 fputs ("@ha\n", file);
30295 asm_fprintf (file, "\tstw %s,4(%s)\n",
30296 reg_names[0], reg_names[1]);
30297 asm_fprintf (file, "\tla %s,", reg_names[0]);
30298 assemble_name (file, buf);
30299 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30300 }
30301
30302 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30303 fprintf (file, "\tbl %s%s\n",
30304 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30305 break;
30306
30307 case ABI_AIX:
30308 case ABI_ELFv2:
30309 case ABI_DARWIN:
30310 /* Don't do anything, done in output_profile_hook (). */
30311 break;
30312 }
30313 }
30314
30315 \f
30316
30317 /* The following variable value is the last issued insn. */
30318
30319 static rtx_insn *last_scheduled_insn;
30320
30321 /* The following variable helps to balance issuing of load and
30322 store instructions */
30323
30324 static int load_store_pendulum;
30325
30326 /* The following variable helps pair divide insns during scheduling. */
30327 static int divide_cnt;
30328 /* The following variable helps pair and alternate vector and vector load
30329 insns during scheduling. */
30330 static int vec_pairing;
30331
30332
30333 /* Power4 load update and store update instructions are cracked into a
30334 load or store and an integer insn which are executed in the same cycle.
30335 Branches have their own dispatch slot which does not count against the
30336 GCC issue rate, but it changes the program flow so there are no other
30337 instructions to issue in this cycle. */
30338
30339 static int
30340 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30341 {
30342 last_scheduled_insn = insn;
30343 if (GET_CODE (PATTERN (insn)) == USE
30344 || GET_CODE (PATTERN (insn)) == CLOBBER)
30345 {
30346 cached_can_issue_more = more;
30347 return cached_can_issue_more;
30348 }
30349
30350 if (insn_terminates_group_p (insn, current_group))
30351 {
30352 cached_can_issue_more = 0;
30353 return cached_can_issue_more;
30354 }
30355
30356 /* If no reservation, but reach here */
30357 if (recog_memoized (insn) < 0)
30358 return more;
30359
30360 if (rs6000_sched_groups)
30361 {
30362 if (is_microcoded_insn (insn))
30363 cached_can_issue_more = 0;
30364 else if (is_cracked_insn (insn))
30365 cached_can_issue_more = more > 2 ? more - 2 : 0;
30366 else
30367 cached_can_issue_more = more - 1;
30368
30369 return cached_can_issue_more;
30370 }
30371
30372 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30373 return 0;
30374
30375 cached_can_issue_more = more - 1;
30376 return cached_can_issue_more;
30377 }
30378
30379 static int
30380 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30381 {
30382 int r = rs6000_variable_issue_1 (insn, more);
30383 if (verbose)
30384 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30385 return r;
30386 }
30387
30388 /* Adjust the cost of a scheduling dependency. Return the new cost of
30389 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30390
30391 static int
30392 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30393 unsigned int)
30394 {
30395 enum attr_type attr_type;
30396
30397 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30398 return cost;
30399
30400 switch (dep_type)
30401 {
30402 case REG_DEP_TRUE:
30403 {
30404 /* Data dependency; DEP_INSN writes a register that INSN reads
30405 some cycles later. */
30406
30407 /* Separate a load from a narrower, dependent store. */
30408 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30409 && GET_CODE (PATTERN (insn)) == SET
30410 && GET_CODE (PATTERN (dep_insn)) == SET
30411 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30412 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30413 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30414 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30415 return cost + 14;
30416
30417 attr_type = get_attr_type (insn);
30418
30419 switch (attr_type)
30420 {
30421 case TYPE_JMPREG:
30422 /* Tell the first scheduling pass about the latency between
30423 a mtctr and bctr (and mtlr and br/blr). The first
30424 scheduling pass will not know about this latency since
30425 the mtctr instruction, which has the latency associated
30426 to it, will be generated by reload. */
30427 return 4;
30428 case TYPE_BRANCH:
30429 /* Leave some extra cycles between a compare and its
30430 dependent branch, to inhibit expensive mispredicts. */
30431 if ((rs6000_tune == PROCESSOR_PPC603
30432 || rs6000_tune == PROCESSOR_PPC604
30433 || rs6000_tune == PROCESSOR_PPC604e
30434 || rs6000_tune == PROCESSOR_PPC620
30435 || rs6000_tune == PROCESSOR_PPC630
30436 || rs6000_tune == PROCESSOR_PPC750
30437 || rs6000_tune == PROCESSOR_PPC7400
30438 || rs6000_tune == PROCESSOR_PPC7450
30439 || rs6000_tune == PROCESSOR_PPCE5500
30440 || rs6000_tune == PROCESSOR_PPCE6500
30441 || rs6000_tune == PROCESSOR_POWER4
30442 || rs6000_tune == PROCESSOR_POWER5
30443 || rs6000_tune == PROCESSOR_POWER7
30444 || rs6000_tune == PROCESSOR_POWER8
30445 || rs6000_tune == PROCESSOR_POWER9
30446 || rs6000_tune == PROCESSOR_CELL)
30447 && recog_memoized (dep_insn)
30448 && (INSN_CODE (dep_insn) >= 0))
30449
30450 switch (get_attr_type (dep_insn))
30451 {
30452 case TYPE_CMP:
30453 case TYPE_FPCOMPARE:
30454 case TYPE_CR_LOGICAL:
30455 return cost + 2;
30456 case TYPE_EXTS:
30457 case TYPE_MUL:
30458 if (get_attr_dot (dep_insn) == DOT_YES)
30459 return cost + 2;
30460 else
30461 break;
30462 case TYPE_SHIFT:
30463 if (get_attr_dot (dep_insn) == DOT_YES
30464 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30465 return cost + 2;
30466 else
30467 break;
30468 default:
30469 break;
30470 }
30471 break;
30472
30473 case TYPE_STORE:
30474 case TYPE_FPSTORE:
30475 if ((rs6000_tune == PROCESSOR_POWER6)
30476 && recog_memoized (dep_insn)
30477 && (INSN_CODE (dep_insn) >= 0))
30478 {
30479
30480 if (GET_CODE (PATTERN (insn)) != SET)
30481 /* If this happens, we have to extend this to schedule
30482 optimally. Return default for now. */
30483 return cost;
30484
30485 /* Adjust the cost for the case where the value written
30486 by a fixed point operation is used as the address
30487 gen value on a store. */
30488 switch (get_attr_type (dep_insn))
30489 {
30490 case TYPE_LOAD:
30491 case TYPE_CNTLZ:
30492 {
30493 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30494 return get_attr_sign_extend (dep_insn)
30495 == SIGN_EXTEND_YES ? 6 : 4;
30496 break;
30497 }
30498 case TYPE_SHIFT:
30499 {
30500 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30501 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30502 6 : 3;
30503 break;
30504 }
30505 case TYPE_INTEGER:
30506 case TYPE_ADD:
30507 case TYPE_LOGICAL:
30508 case TYPE_EXTS:
30509 case TYPE_INSERT:
30510 {
30511 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30512 return 3;
30513 break;
30514 }
30515 case TYPE_STORE:
30516 case TYPE_FPLOAD:
30517 case TYPE_FPSTORE:
30518 {
30519 if (get_attr_update (dep_insn) == UPDATE_YES
30520 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30521 return 3;
30522 break;
30523 }
30524 case TYPE_MUL:
30525 {
30526 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30527 return 17;
30528 break;
30529 }
30530 case TYPE_DIV:
30531 {
30532 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30533 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30534 break;
30535 }
30536 default:
30537 break;
30538 }
30539 }
30540 break;
30541
30542 case TYPE_LOAD:
30543 if ((rs6000_tune == PROCESSOR_POWER6)
30544 && recog_memoized (dep_insn)
30545 && (INSN_CODE (dep_insn) >= 0))
30546 {
30547
30548 /* Adjust the cost for the case where the value written
30549 by a fixed point instruction is used within the address
30550 gen portion of a subsequent load(u)(x) */
30551 switch (get_attr_type (dep_insn))
30552 {
30553 case TYPE_LOAD:
30554 case TYPE_CNTLZ:
30555 {
30556 if (set_to_load_agen (dep_insn, insn))
30557 return get_attr_sign_extend (dep_insn)
30558 == SIGN_EXTEND_YES ? 6 : 4;
30559 break;
30560 }
30561 case TYPE_SHIFT:
30562 {
30563 if (set_to_load_agen (dep_insn, insn))
30564 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30565 6 : 3;
30566 break;
30567 }
30568 case TYPE_INTEGER:
30569 case TYPE_ADD:
30570 case TYPE_LOGICAL:
30571 case TYPE_EXTS:
30572 case TYPE_INSERT:
30573 {
30574 if (set_to_load_agen (dep_insn, insn))
30575 return 3;
30576 break;
30577 }
30578 case TYPE_STORE:
30579 case TYPE_FPLOAD:
30580 case TYPE_FPSTORE:
30581 {
30582 if (get_attr_update (dep_insn) == UPDATE_YES
30583 && set_to_load_agen (dep_insn, insn))
30584 return 3;
30585 break;
30586 }
30587 case TYPE_MUL:
30588 {
30589 if (set_to_load_agen (dep_insn, insn))
30590 return 17;
30591 break;
30592 }
30593 case TYPE_DIV:
30594 {
30595 if (set_to_load_agen (dep_insn, insn))
30596 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30597 break;
30598 }
30599 default:
30600 break;
30601 }
30602 }
30603 break;
30604
30605 case TYPE_FPLOAD:
30606 if ((rs6000_tune == PROCESSOR_POWER6)
30607 && get_attr_update (insn) == UPDATE_NO
30608 && recog_memoized (dep_insn)
30609 && (INSN_CODE (dep_insn) >= 0)
30610 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30611 return 2;
30612
30613 default:
30614 break;
30615 }
30616
30617 /* Fall out to return default cost. */
30618 }
30619 break;
30620
30621 case REG_DEP_OUTPUT:
30622 /* Output dependency; DEP_INSN writes a register that INSN writes some
30623 cycles later. */
30624 if ((rs6000_tune == PROCESSOR_POWER6)
30625 && recog_memoized (dep_insn)
30626 && (INSN_CODE (dep_insn) >= 0))
30627 {
30628 attr_type = get_attr_type (insn);
30629
30630 switch (attr_type)
30631 {
30632 case TYPE_FP:
30633 case TYPE_FPSIMPLE:
30634 if (get_attr_type (dep_insn) == TYPE_FP
30635 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30636 return 1;
30637 break;
30638 case TYPE_FPLOAD:
30639 if (get_attr_update (insn) == UPDATE_NO
30640 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30641 return 2;
30642 break;
30643 default:
30644 break;
30645 }
30646 }
30647 /* Fall through, no cost for output dependency. */
30648 /* FALLTHRU */
30649
30650 case REG_DEP_ANTI:
30651 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30652 cycles later. */
30653 return 0;
30654
30655 default:
30656 gcc_unreachable ();
30657 }
30658
30659 return cost;
30660 }
30661
30662 /* Debug version of rs6000_adjust_cost. */
30663
30664 static int
30665 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30666 int cost, unsigned int dw)
30667 {
30668 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30669
30670 if (ret != cost)
30671 {
30672 const char *dep;
30673
30674 switch (dep_type)
30675 {
30676 default: dep = "unknown depencency"; break;
30677 case REG_DEP_TRUE: dep = "data dependency"; break;
30678 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30679 case REG_DEP_ANTI: dep = "anti depencency"; break;
30680 }
30681
30682 fprintf (stderr,
30683 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30684 "%s, insn:\n", ret, cost, dep);
30685
30686 debug_rtx (insn);
30687 }
30688
30689 return ret;
30690 }
30691
30692 /* The function returns a true if INSN is microcoded.
30693 Return false otherwise. */
30694
30695 static bool
30696 is_microcoded_insn (rtx_insn *insn)
30697 {
30698 if (!insn || !NONDEBUG_INSN_P (insn)
30699 || GET_CODE (PATTERN (insn)) == USE
30700 || GET_CODE (PATTERN (insn)) == CLOBBER)
30701 return false;
30702
30703 if (rs6000_tune == PROCESSOR_CELL)
30704 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30705
30706 if (rs6000_sched_groups
30707 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30708 {
30709 enum attr_type type = get_attr_type (insn);
30710 if ((type == TYPE_LOAD
30711 && get_attr_update (insn) == UPDATE_YES
30712 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30713 || ((type == TYPE_LOAD || type == TYPE_STORE)
30714 && get_attr_update (insn) == UPDATE_YES
30715 && get_attr_indexed (insn) == INDEXED_YES)
30716 || type == TYPE_MFCR)
30717 return true;
30718 }
30719
30720 return false;
30721 }
30722
30723 /* The function returns true if INSN is cracked into 2 instructions
30724 by the processor (and therefore occupies 2 issue slots). */
30725
30726 static bool
30727 is_cracked_insn (rtx_insn *insn)
30728 {
30729 if (!insn || !NONDEBUG_INSN_P (insn)
30730 || GET_CODE (PATTERN (insn)) == USE
30731 || GET_CODE (PATTERN (insn)) == CLOBBER)
30732 return false;
30733
30734 if (rs6000_sched_groups
30735 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30736 {
30737 enum attr_type type = get_attr_type (insn);
30738 if ((type == TYPE_LOAD
30739 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30740 && get_attr_update (insn) == UPDATE_NO)
30741 || (type == TYPE_LOAD
30742 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30743 && get_attr_update (insn) == UPDATE_YES
30744 && get_attr_indexed (insn) == INDEXED_NO)
30745 || (type == TYPE_STORE
30746 && get_attr_update (insn) == UPDATE_YES
30747 && get_attr_indexed (insn) == INDEXED_NO)
30748 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30749 && get_attr_update (insn) == UPDATE_YES)
30750 || (type == TYPE_CR_LOGICAL
30751 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30752 || (type == TYPE_EXTS
30753 && get_attr_dot (insn) == DOT_YES)
30754 || (type == TYPE_SHIFT
30755 && get_attr_dot (insn) == DOT_YES
30756 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30757 || (type == TYPE_MUL
30758 && get_attr_dot (insn) == DOT_YES)
30759 || type == TYPE_DIV
30760 || (type == TYPE_INSERT
30761 && get_attr_size (insn) == SIZE_32))
30762 return true;
30763 }
30764
30765 return false;
30766 }
30767
30768 /* The function returns true if INSN can be issued only from
30769 the branch slot. */
30770
30771 static bool
30772 is_branch_slot_insn (rtx_insn *insn)
30773 {
30774 if (!insn || !NONDEBUG_INSN_P (insn)
30775 || GET_CODE (PATTERN (insn)) == USE
30776 || GET_CODE (PATTERN (insn)) == CLOBBER)
30777 return false;
30778
30779 if (rs6000_sched_groups)
30780 {
30781 enum attr_type type = get_attr_type (insn);
30782 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30783 return true;
30784 return false;
30785 }
30786
30787 return false;
30788 }
30789
30790 /* The function returns true if out_inst sets a value that is
30791 used in the address generation computation of in_insn */
30792 static bool
30793 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30794 {
30795 rtx out_set, in_set;
30796
30797 /* For performance reasons, only handle the simple case where
30798 both loads are a single_set. */
30799 out_set = single_set (out_insn);
30800 if (out_set)
30801 {
30802 in_set = single_set (in_insn);
30803 if (in_set)
30804 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30805 }
30806
30807 return false;
30808 }
30809
30810 /* Try to determine base/offset/size parts of the given MEM.
30811 Return true if successful, false if all the values couldn't
30812 be determined.
30813
30814 This function only looks for REG or REG+CONST address forms.
30815 REG+REG address form will return false. */
30816
30817 static bool
30818 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30819 HOST_WIDE_INT *size)
30820 {
30821 rtx addr_rtx;
30822 if MEM_SIZE_KNOWN_P (mem)
30823 *size = MEM_SIZE (mem);
30824 else
30825 return false;
30826
30827 addr_rtx = (XEXP (mem, 0));
30828 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30829 addr_rtx = XEXP (addr_rtx, 1);
30830
30831 *offset = 0;
30832 while (GET_CODE (addr_rtx) == PLUS
30833 && CONST_INT_P (XEXP (addr_rtx, 1)))
30834 {
30835 *offset += INTVAL (XEXP (addr_rtx, 1));
30836 addr_rtx = XEXP (addr_rtx, 0);
30837 }
30838 if (!REG_P (addr_rtx))
30839 return false;
30840
30841 *base = addr_rtx;
30842 return true;
30843 }
30844
30845 /* The function returns true if the target storage location of
30846 mem1 is adjacent to the target storage location of mem2 */
30847 /* Return 1 if memory locations are adjacent. */
30848
30849 static bool
30850 adjacent_mem_locations (rtx mem1, rtx mem2)
30851 {
30852 rtx reg1, reg2;
30853 HOST_WIDE_INT off1, size1, off2, size2;
30854
30855 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30856 && get_memref_parts (mem2, &reg2, &off2, &size2))
30857 return ((REGNO (reg1) == REGNO (reg2))
30858 && ((off1 + size1 == off2)
30859 || (off2 + size2 == off1)));
30860
30861 return false;
30862 }
30863
30864 /* This function returns true if it can be determined that the two MEM
30865 locations overlap by at least 1 byte based on base reg/offset/size. */
30866
30867 static bool
30868 mem_locations_overlap (rtx mem1, rtx mem2)
30869 {
30870 rtx reg1, reg2;
30871 HOST_WIDE_INT off1, size1, off2, size2;
30872
30873 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30874 && get_memref_parts (mem2, &reg2, &off2, &size2))
30875 return ((REGNO (reg1) == REGNO (reg2))
30876 && (((off1 <= off2) && (off1 + size1 > off2))
30877 || ((off2 <= off1) && (off2 + size2 > off1))));
30878
30879 return false;
30880 }
30881
30882 /* A C statement (sans semicolon) to update the integer scheduling
30883 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30884 INSN earlier, reduce the priority to execute INSN later. Do not
30885 define this macro if you do not need to adjust the scheduling
30886 priorities of insns. */
30887
30888 static int
30889 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30890 {
30891 rtx load_mem, str_mem;
30892 /* On machines (like the 750) which have asymmetric integer units,
30893 where one integer unit can do multiply and divides and the other
30894 can't, reduce the priority of multiply/divide so it is scheduled
30895 before other integer operations. */
30896
30897 #if 0
30898 if (! INSN_P (insn))
30899 return priority;
30900
30901 if (GET_CODE (PATTERN (insn)) == USE)
30902 return priority;
30903
30904 switch (rs6000_tune) {
30905 case PROCESSOR_PPC750:
30906 switch (get_attr_type (insn))
30907 {
30908 default:
30909 break;
30910
30911 case TYPE_MUL:
30912 case TYPE_DIV:
30913 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30914 priority, priority);
30915 if (priority >= 0 && priority < 0x01000000)
30916 priority >>= 3;
30917 break;
30918 }
30919 }
30920 #endif
30921
30922 if (insn_must_be_first_in_group (insn)
30923 && reload_completed
30924 && current_sched_info->sched_max_insns_priority
30925 && rs6000_sched_restricted_insns_priority)
30926 {
30927
30928 /* Prioritize insns that can be dispatched only in the first
30929 dispatch slot. */
30930 if (rs6000_sched_restricted_insns_priority == 1)
30931 /* Attach highest priority to insn. This means that in
30932 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30933 precede 'priority' (critical path) considerations. */
30934 return current_sched_info->sched_max_insns_priority;
30935 else if (rs6000_sched_restricted_insns_priority == 2)
30936 /* Increase priority of insn by a minimal amount. This means that in
30937 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30938 considerations precede dispatch-slot restriction considerations. */
30939 return (priority + 1);
30940 }
30941
30942 if (rs6000_tune == PROCESSOR_POWER6
30943 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30944 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30945 /* Attach highest priority to insn if the scheduler has just issued two
30946 stores and this instruction is a load, or two loads and this instruction
30947 is a store. Power6 wants loads and stores scheduled alternately
30948 when possible */
30949 return current_sched_info->sched_max_insns_priority;
30950
30951 return priority;
30952 }
30953
30954 /* Return true if the instruction is nonpipelined on the Cell. */
30955 static bool
30956 is_nonpipeline_insn (rtx_insn *insn)
30957 {
30958 enum attr_type type;
30959 if (!insn || !NONDEBUG_INSN_P (insn)
30960 || GET_CODE (PATTERN (insn)) == USE
30961 || GET_CODE (PATTERN (insn)) == CLOBBER)
30962 return false;
30963
30964 type = get_attr_type (insn);
30965 if (type == TYPE_MUL
30966 || type == TYPE_DIV
30967 || type == TYPE_SDIV
30968 || type == TYPE_DDIV
30969 || type == TYPE_SSQRT
30970 || type == TYPE_DSQRT
30971 || type == TYPE_MFCR
30972 || type == TYPE_MFCRF
30973 || type == TYPE_MFJMPR)
30974 {
30975 return true;
30976 }
30977 return false;
30978 }
30979
30980
30981 /* Return how many instructions the machine can issue per cycle. */
30982
30983 static int
30984 rs6000_issue_rate (void)
30985 {
30986 /* Unless scheduling for register pressure, use issue rate of 1 for
30987 first scheduling pass to decrease degradation. */
30988 if (!reload_completed && !flag_sched_pressure)
30989 return 1;
30990
30991 switch (rs6000_tune) {
30992 case PROCESSOR_RS64A:
30993 case PROCESSOR_PPC601: /* ? */
30994 case PROCESSOR_PPC7450:
30995 return 3;
30996 case PROCESSOR_PPC440:
30997 case PROCESSOR_PPC603:
30998 case PROCESSOR_PPC750:
30999 case PROCESSOR_PPC7400:
31000 case PROCESSOR_PPC8540:
31001 case PROCESSOR_PPC8548:
31002 case PROCESSOR_CELL:
31003 case PROCESSOR_PPCE300C2:
31004 case PROCESSOR_PPCE300C3:
31005 case PROCESSOR_PPCE500MC:
31006 case PROCESSOR_PPCE500MC64:
31007 case PROCESSOR_PPCE5500:
31008 case PROCESSOR_PPCE6500:
31009 case PROCESSOR_TITAN:
31010 return 2;
31011 case PROCESSOR_PPC476:
31012 case PROCESSOR_PPC604:
31013 case PROCESSOR_PPC604e:
31014 case PROCESSOR_PPC620:
31015 case PROCESSOR_PPC630:
31016 return 4;
31017 case PROCESSOR_POWER4:
31018 case PROCESSOR_POWER5:
31019 case PROCESSOR_POWER6:
31020 case PROCESSOR_POWER7:
31021 return 5;
31022 case PROCESSOR_POWER8:
31023 return 7;
31024 case PROCESSOR_POWER9:
31025 return 6;
31026 default:
31027 return 1;
31028 }
31029 }
31030
31031 /* Return how many instructions to look ahead for better insn
31032 scheduling. */
31033
31034 static int
31035 rs6000_use_sched_lookahead (void)
31036 {
31037 switch (rs6000_tune)
31038 {
31039 case PROCESSOR_PPC8540:
31040 case PROCESSOR_PPC8548:
31041 return 4;
31042
31043 case PROCESSOR_CELL:
31044 return (reload_completed ? 8 : 0);
31045
31046 default:
31047 return 0;
31048 }
31049 }
31050
31051 /* We are choosing insn from the ready queue. Return zero if INSN can be
31052 chosen. */
31053 static int
31054 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31055 {
31056 if (ready_index == 0)
31057 return 0;
31058
31059 if (rs6000_tune != PROCESSOR_CELL)
31060 return 0;
31061
31062 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31063
31064 if (!reload_completed
31065 || is_nonpipeline_insn (insn)
31066 || is_microcoded_insn (insn))
31067 return 1;
31068
31069 return 0;
31070 }
31071
31072 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31073 and return true. */
31074
31075 static bool
31076 find_mem_ref (rtx pat, rtx *mem_ref)
31077 {
31078 const char * fmt;
31079 int i, j;
31080
31081 /* stack_tie does not produce any real memory traffic. */
31082 if (tie_operand (pat, VOIDmode))
31083 return false;
31084
31085 if (GET_CODE (pat) == MEM)
31086 {
31087 *mem_ref = pat;
31088 return true;
31089 }
31090
31091 /* Recursively process the pattern. */
31092 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31093
31094 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31095 {
31096 if (fmt[i] == 'e')
31097 {
31098 if (find_mem_ref (XEXP (pat, i), mem_ref))
31099 return true;
31100 }
31101 else if (fmt[i] == 'E')
31102 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31103 {
31104 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31105 return true;
31106 }
31107 }
31108
31109 return false;
31110 }
31111
31112 /* Determine if PAT is a PATTERN of a load insn. */
31113
31114 static bool
31115 is_load_insn1 (rtx pat, rtx *load_mem)
31116 {
31117 if (!pat || pat == NULL_RTX)
31118 return false;
31119
31120 if (GET_CODE (pat) == SET)
31121 return find_mem_ref (SET_SRC (pat), load_mem);
31122
31123 if (GET_CODE (pat) == PARALLEL)
31124 {
31125 int i;
31126
31127 for (i = 0; i < XVECLEN (pat, 0); i++)
31128 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31129 return true;
31130 }
31131
31132 return false;
31133 }
31134
31135 /* Determine if INSN loads from memory. */
31136
31137 static bool
31138 is_load_insn (rtx insn, rtx *load_mem)
31139 {
31140 if (!insn || !INSN_P (insn))
31141 return false;
31142
31143 if (CALL_P (insn))
31144 return false;
31145
31146 return is_load_insn1 (PATTERN (insn), load_mem);
31147 }
31148
31149 /* Determine if PAT is a PATTERN of a store insn. */
31150
31151 static bool
31152 is_store_insn1 (rtx pat, rtx *str_mem)
31153 {
31154 if (!pat || pat == NULL_RTX)
31155 return false;
31156
31157 if (GET_CODE (pat) == SET)
31158 return find_mem_ref (SET_DEST (pat), str_mem);
31159
31160 if (GET_CODE (pat) == PARALLEL)
31161 {
31162 int i;
31163
31164 for (i = 0; i < XVECLEN (pat, 0); i++)
31165 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31166 return true;
31167 }
31168
31169 return false;
31170 }
31171
31172 /* Determine if INSN stores to memory. */
31173
31174 static bool
31175 is_store_insn (rtx insn, rtx *str_mem)
31176 {
31177 if (!insn || !INSN_P (insn))
31178 return false;
31179
31180 return is_store_insn1 (PATTERN (insn), str_mem);
31181 }
31182
31183 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31184
31185 static bool
31186 is_power9_pairable_vec_type (enum attr_type type)
31187 {
31188 switch (type)
31189 {
31190 case TYPE_VECSIMPLE:
31191 case TYPE_VECCOMPLEX:
31192 case TYPE_VECDIV:
31193 case TYPE_VECCMP:
31194 case TYPE_VECPERM:
31195 case TYPE_VECFLOAT:
31196 case TYPE_VECFDIV:
31197 case TYPE_VECDOUBLE:
31198 return true;
31199 default:
31200 break;
31201 }
31202 return false;
31203 }
31204
31205 /* Returns whether the dependence between INSN and NEXT is considered
31206 costly by the given target. */
31207
31208 static bool
31209 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31210 {
31211 rtx insn;
31212 rtx next;
31213 rtx load_mem, str_mem;
31214
31215 /* If the flag is not enabled - no dependence is considered costly;
31216 allow all dependent insns in the same group.
31217 This is the most aggressive option. */
31218 if (rs6000_sched_costly_dep == no_dep_costly)
31219 return false;
31220
31221 /* If the flag is set to 1 - a dependence is always considered costly;
31222 do not allow dependent instructions in the same group.
31223 This is the most conservative option. */
31224 if (rs6000_sched_costly_dep == all_deps_costly)
31225 return true;
31226
31227 insn = DEP_PRO (dep);
31228 next = DEP_CON (dep);
31229
31230 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31231 && is_load_insn (next, &load_mem)
31232 && is_store_insn (insn, &str_mem))
31233 /* Prevent load after store in the same group. */
31234 return true;
31235
31236 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31237 && is_load_insn (next, &load_mem)
31238 && is_store_insn (insn, &str_mem)
31239 && DEP_TYPE (dep) == REG_DEP_TRUE
31240 && mem_locations_overlap(str_mem, load_mem))
31241 /* Prevent load after store in the same group if it is a true
31242 dependence. */
31243 return true;
31244
31245 /* The flag is set to X; dependences with latency >= X are considered costly,
31246 and will not be scheduled in the same group. */
31247 if (rs6000_sched_costly_dep <= max_dep_latency
31248 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31249 return true;
31250
31251 return false;
31252 }
31253
31254 /* Return the next insn after INSN that is found before TAIL is reached,
31255 skipping any "non-active" insns - insns that will not actually occupy
31256 an issue slot. Return NULL_RTX if such an insn is not found. */
31257
31258 static rtx_insn *
31259 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31260 {
31261 if (insn == NULL_RTX || insn == tail)
31262 return NULL;
31263
31264 while (1)
31265 {
31266 insn = NEXT_INSN (insn);
31267 if (insn == NULL_RTX || insn == tail)
31268 return NULL;
31269
31270 if (CALL_P (insn)
31271 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31272 || (NONJUMP_INSN_P (insn)
31273 && GET_CODE (PATTERN (insn)) != USE
31274 && GET_CODE (PATTERN (insn)) != CLOBBER
31275 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31276 break;
31277 }
31278 return insn;
31279 }
31280
31281 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31282
31283 static int
31284 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31285 {
31286 int pos;
31287 int i;
31288 rtx_insn *tmp;
31289 enum attr_type type, type2;
31290
31291 type = get_attr_type (last_scheduled_insn);
31292
31293 /* Try to issue fixed point divides back-to-back in pairs so they will be
31294 routed to separate execution units and execute in parallel. */
31295 if (type == TYPE_DIV && divide_cnt == 0)
31296 {
31297 /* First divide has been scheduled. */
31298 divide_cnt = 1;
31299
31300 /* Scan the ready list looking for another divide, if found move it
31301 to the end of the list so it is chosen next. */
31302 pos = lastpos;
31303 while (pos >= 0)
31304 {
31305 if (recog_memoized (ready[pos]) >= 0
31306 && get_attr_type (ready[pos]) == TYPE_DIV)
31307 {
31308 tmp = ready[pos];
31309 for (i = pos; i < lastpos; i++)
31310 ready[i] = ready[i + 1];
31311 ready[lastpos] = tmp;
31312 break;
31313 }
31314 pos--;
31315 }
31316 }
31317 else
31318 {
31319 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31320 divide_cnt = 0;
31321
31322 /* The best dispatch throughput for vector and vector load insns can be
31323 achieved by interleaving a vector and vector load such that they'll
31324 dispatch to the same superslice. If this pairing cannot be achieved
31325 then it is best to pair vector insns together and vector load insns
31326 together.
31327
31328 To aid in this pairing, vec_pairing maintains the current state with
31329 the following values:
31330
31331 0 : Initial state, no vecload/vector pairing has been started.
31332
31333 1 : A vecload or vector insn has been issued and a candidate for
31334 pairing has been found and moved to the end of the ready
31335 list. */
31336 if (type == TYPE_VECLOAD)
31337 {
31338 /* Issued a vecload. */
31339 if (vec_pairing == 0)
31340 {
31341 int vecload_pos = -1;
31342 /* We issued a single vecload, look for a vector insn to pair it
31343 with. If one isn't found, try to pair another vecload. */
31344 pos = lastpos;
31345 while (pos >= 0)
31346 {
31347 if (recog_memoized (ready[pos]) >= 0)
31348 {
31349 type2 = get_attr_type (ready[pos]);
31350 if (is_power9_pairable_vec_type (type2))
31351 {
31352 /* Found a vector insn to pair with, move it to the
31353 end of the ready list so it is scheduled next. */
31354 tmp = ready[pos];
31355 for (i = pos; i < lastpos; i++)
31356 ready[i] = ready[i + 1];
31357 ready[lastpos] = tmp;
31358 vec_pairing = 1;
31359 return cached_can_issue_more;
31360 }
31361 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31362 /* Remember position of first vecload seen. */
31363 vecload_pos = pos;
31364 }
31365 pos--;
31366 }
31367 if (vecload_pos >= 0)
31368 {
31369 /* Didn't find a vector to pair with but did find a vecload,
31370 move it to the end of the ready list. */
31371 tmp = ready[vecload_pos];
31372 for (i = vecload_pos; i < lastpos; i++)
31373 ready[i] = ready[i + 1];
31374 ready[lastpos] = tmp;
31375 vec_pairing = 1;
31376 return cached_can_issue_more;
31377 }
31378 }
31379 }
31380 else if (is_power9_pairable_vec_type (type))
31381 {
31382 /* Issued a vector operation. */
31383 if (vec_pairing == 0)
31384 {
31385 int vec_pos = -1;
31386 /* We issued a single vector insn, look for a vecload to pair it
31387 with. If one isn't found, try to pair another vector. */
31388 pos = lastpos;
31389 while (pos >= 0)
31390 {
31391 if (recog_memoized (ready[pos]) >= 0)
31392 {
31393 type2 = get_attr_type (ready[pos]);
31394 if (type2 == TYPE_VECLOAD)
31395 {
31396 /* Found a vecload insn to pair with, move it to the
31397 end of the ready list so it is scheduled next. */
31398 tmp = ready[pos];
31399 for (i = pos; i < lastpos; i++)
31400 ready[i] = ready[i + 1];
31401 ready[lastpos] = tmp;
31402 vec_pairing = 1;
31403 return cached_can_issue_more;
31404 }
31405 else if (is_power9_pairable_vec_type (type2)
31406 && vec_pos == -1)
31407 /* Remember position of first vector insn seen. */
31408 vec_pos = pos;
31409 }
31410 pos--;
31411 }
31412 if (vec_pos >= 0)
31413 {
31414 /* Didn't find a vecload to pair with but did find a vector
31415 insn, move it to the end of the ready list. */
31416 tmp = ready[vec_pos];
31417 for (i = vec_pos; i < lastpos; i++)
31418 ready[i] = ready[i + 1];
31419 ready[lastpos] = tmp;
31420 vec_pairing = 1;
31421 return cached_can_issue_more;
31422 }
31423 }
31424 }
31425
31426 /* We've either finished a vec/vecload pair, couldn't find an insn to
31427 continue the current pair, or the last insn had nothing to do with
31428 with pairing. In any case, reset the state. */
31429 vec_pairing = 0;
31430 }
31431
31432 return cached_can_issue_more;
31433 }
31434
31435 /* We are about to begin issuing insns for this clock cycle. */
31436
31437 static int
31438 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31439 rtx_insn **ready ATTRIBUTE_UNUSED,
31440 int *pn_ready ATTRIBUTE_UNUSED,
31441 int clock_var ATTRIBUTE_UNUSED)
31442 {
31443 int n_ready = *pn_ready;
31444
31445 if (sched_verbose)
31446 fprintf (dump, "// rs6000_sched_reorder :\n");
31447
31448 /* Reorder the ready list, if the second to last ready insn
31449 is a nonepipeline insn. */
31450 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31451 {
31452 if (is_nonpipeline_insn (ready[n_ready - 1])
31453 && (recog_memoized (ready[n_ready - 2]) > 0))
31454 /* Simply swap first two insns. */
31455 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31456 }
31457
31458 if (rs6000_tune == PROCESSOR_POWER6)
31459 load_store_pendulum = 0;
31460
31461 return rs6000_issue_rate ();
31462 }
31463
31464 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31465
31466 static int
31467 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31468 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31469 {
31470 if (sched_verbose)
31471 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31472
31473 /* For Power6, we need to handle some special cases to try and keep the
31474 store queue from overflowing and triggering expensive flushes.
31475
31476 This code monitors how load and store instructions are being issued
31477 and skews the ready list one way or the other to increase the likelihood
31478 that a desired instruction is issued at the proper time.
31479
31480 A couple of things are done. First, we maintain a "load_store_pendulum"
31481 to track the current state of load/store issue.
31482
31483 - If the pendulum is at zero, then no loads or stores have been
31484 issued in the current cycle so we do nothing.
31485
31486 - If the pendulum is 1, then a single load has been issued in this
31487 cycle and we attempt to locate another load in the ready list to
31488 issue with it.
31489
31490 - If the pendulum is -2, then two stores have already been
31491 issued in this cycle, so we increase the priority of the first load
31492 in the ready list to increase it's likelihood of being chosen first
31493 in the next cycle.
31494
31495 - If the pendulum is -1, then a single store has been issued in this
31496 cycle and we attempt to locate another store in the ready list to
31497 issue with it, preferring a store to an adjacent memory location to
31498 facilitate store pairing in the store queue.
31499
31500 - If the pendulum is 2, then two loads have already been
31501 issued in this cycle, so we increase the priority of the first store
31502 in the ready list to increase it's likelihood of being chosen first
31503 in the next cycle.
31504
31505 - If the pendulum < -2 or > 2, then do nothing.
31506
31507 Note: This code covers the most common scenarios. There exist non
31508 load/store instructions which make use of the LSU and which
31509 would need to be accounted for to strictly model the behavior
31510 of the machine. Those instructions are currently unaccounted
31511 for to help minimize compile time overhead of this code.
31512 */
31513 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31514 {
31515 int pos;
31516 int i;
31517 rtx_insn *tmp;
31518 rtx load_mem, str_mem;
31519
31520 if (is_store_insn (last_scheduled_insn, &str_mem))
31521 /* Issuing a store, swing the load_store_pendulum to the left */
31522 load_store_pendulum--;
31523 else if (is_load_insn (last_scheduled_insn, &load_mem))
31524 /* Issuing a load, swing the load_store_pendulum to the right */
31525 load_store_pendulum++;
31526 else
31527 return cached_can_issue_more;
31528
31529 /* If the pendulum is balanced, or there is only one instruction on
31530 the ready list, then all is well, so return. */
31531 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31532 return cached_can_issue_more;
31533
31534 if (load_store_pendulum == 1)
31535 {
31536 /* A load has been issued in this cycle. Scan the ready list
31537 for another load to issue with it */
31538 pos = *pn_ready-1;
31539
31540 while (pos >= 0)
31541 {
31542 if (is_load_insn (ready[pos], &load_mem))
31543 {
31544 /* Found a load. Move it to the head of the ready list,
31545 and adjust it's priority so that it is more likely to
31546 stay there */
31547 tmp = ready[pos];
31548 for (i=pos; i<*pn_ready-1; i++)
31549 ready[i] = ready[i + 1];
31550 ready[*pn_ready-1] = tmp;
31551
31552 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31553 INSN_PRIORITY (tmp)++;
31554 break;
31555 }
31556 pos--;
31557 }
31558 }
31559 else if (load_store_pendulum == -2)
31560 {
31561 /* Two stores have been issued in this cycle. Increase the
31562 priority of the first load in the ready list to favor it for
31563 issuing in the next cycle. */
31564 pos = *pn_ready-1;
31565
31566 while (pos >= 0)
31567 {
31568 if (is_load_insn (ready[pos], &load_mem)
31569 && !sel_sched_p ()
31570 && INSN_PRIORITY_KNOWN (ready[pos]))
31571 {
31572 INSN_PRIORITY (ready[pos])++;
31573
31574 /* Adjust the pendulum to account for the fact that a load
31575 was found and increased in priority. This is to prevent
31576 increasing the priority of multiple loads */
31577 load_store_pendulum--;
31578
31579 break;
31580 }
31581 pos--;
31582 }
31583 }
31584 else if (load_store_pendulum == -1)
31585 {
31586 /* A store has been issued in this cycle. Scan the ready list for
31587 another store to issue with it, preferring a store to an adjacent
31588 memory location */
31589 int first_store_pos = -1;
31590
31591 pos = *pn_ready-1;
31592
31593 while (pos >= 0)
31594 {
31595 if (is_store_insn (ready[pos], &str_mem))
31596 {
31597 rtx str_mem2;
31598 /* Maintain the index of the first store found on the
31599 list */
31600 if (first_store_pos == -1)
31601 first_store_pos = pos;
31602
31603 if (is_store_insn (last_scheduled_insn, &str_mem2)
31604 && adjacent_mem_locations (str_mem, str_mem2))
31605 {
31606 /* Found an adjacent store. Move it to the head of the
31607 ready list, and adjust it's priority so that it is
31608 more likely to stay there */
31609 tmp = ready[pos];
31610 for (i=pos; i<*pn_ready-1; i++)
31611 ready[i] = ready[i + 1];
31612 ready[*pn_ready-1] = tmp;
31613
31614 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31615 INSN_PRIORITY (tmp)++;
31616
31617 first_store_pos = -1;
31618
31619 break;
31620 };
31621 }
31622 pos--;
31623 }
31624
31625 if (first_store_pos >= 0)
31626 {
31627 /* An adjacent store wasn't found, but a non-adjacent store was,
31628 so move the non-adjacent store to the front of the ready
31629 list, and adjust its priority so that it is more likely to
31630 stay there. */
31631 tmp = ready[first_store_pos];
31632 for (i=first_store_pos; i<*pn_ready-1; i++)
31633 ready[i] = ready[i + 1];
31634 ready[*pn_ready-1] = tmp;
31635 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31636 INSN_PRIORITY (tmp)++;
31637 }
31638 }
31639 else if (load_store_pendulum == 2)
31640 {
31641 /* Two loads have been issued in this cycle. Increase the priority
31642 of the first store in the ready list to favor it for issuing in
31643 the next cycle. */
31644 pos = *pn_ready-1;
31645
31646 while (pos >= 0)
31647 {
31648 if (is_store_insn (ready[pos], &str_mem)
31649 && !sel_sched_p ()
31650 && INSN_PRIORITY_KNOWN (ready[pos]))
31651 {
31652 INSN_PRIORITY (ready[pos])++;
31653
31654 /* Adjust the pendulum to account for the fact that a store
31655 was found and increased in priority. This is to prevent
31656 increasing the priority of multiple stores */
31657 load_store_pendulum++;
31658
31659 break;
31660 }
31661 pos--;
31662 }
31663 }
31664 }
31665
31666 /* Do Power9 dependent reordering if necessary. */
31667 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31668 && recog_memoized (last_scheduled_insn) >= 0)
31669 return power9_sched_reorder2 (ready, *pn_ready - 1);
31670
31671 return cached_can_issue_more;
31672 }
31673
31674 /* Return whether the presence of INSN causes a dispatch group termination
31675 of group WHICH_GROUP.
31676
31677 If WHICH_GROUP == current_group, this function will return true if INSN
31678 causes the termination of the current group (i.e, the dispatch group to
31679 which INSN belongs). This means that INSN will be the last insn in the
31680 group it belongs to.
31681
31682 If WHICH_GROUP == previous_group, this function will return true if INSN
31683 causes the termination of the previous group (i.e, the dispatch group that
31684 precedes the group to which INSN belongs). This means that INSN will be
31685 the first insn in the group it belongs to). */
31686
31687 static bool
31688 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31689 {
31690 bool first, last;
31691
31692 if (! insn)
31693 return false;
31694
31695 first = insn_must_be_first_in_group (insn);
31696 last = insn_must_be_last_in_group (insn);
31697
31698 if (first && last)
31699 return true;
31700
31701 if (which_group == current_group)
31702 return last;
31703 else if (which_group == previous_group)
31704 return first;
31705
31706 return false;
31707 }
31708
31709
31710 static bool
31711 insn_must_be_first_in_group (rtx_insn *insn)
31712 {
31713 enum attr_type type;
31714
31715 if (!insn
31716 || NOTE_P (insn)
31717 || DEBUG_INSN_P (insn)
31718 || GET_CODE (PATTERN (insn)) == USE
31719 || GET_CODE (PATTERN (insn)) == CLOBBER)
31720 return false;
31721
31722 switch (rs6000_tune)
31723 {
31724 case PROCESSOR_POWER5:
31725 if (is_cracked_insn (insn))
31726 return true;
31727 /* FALLTHRU */
31728 case PROCESSOR_POWER4:
31729 if (is_microcoded_insn (insn))
31730 return true;
31731
31732 if (!rs6000_sched_groups)
31733 return false;
31734
31735 type = get_attr_type (insn);
31736
31737 switch (type)
31738 {
31739 case TYPE_MFCR:
31740 case TYPE_MFCRF:
31741 case TYPE_MTCR:
31742 case TYPE_CR_LOGICAL:
31743 case TYPE_MTJMPR:
31744 case TYPE_MFJMPR:
31745 case TYPE_DIV:
31746 case TYPE_LOAD_L:
31747 case TYPE_STORE_C:
31748 case TYPE_ISYNC:
31749 case TYPE_SYNC:
31750 return true;
31751 default:
31752 break;
31753 }
31754 break;
31755 case PROCESSOR_POWER6:
31756 type = get_attr_type (insn);
31757
31758 switch (type)
31759 {
31760 case TYPE_EXTS:
31761 case TYPE_CNTLZ:
31762 case TYPE_TRAP:
31763 case TYPE_MUL:
31764 case TYPE_INSERT:
31765 case TYPE_FPCOMPARE:
31766 case TYPE_MFCR:
31767 case TYPE_MTCR:
31768 case TYPE_MFJMPR:
31769 case TYPE_MTJMPR:
31770 case TYPE_ISYNC:
31771 case TYPE_SYNC:
31772 case TYPE_LOAD_L:
31773 case TYPE_STORE_C:
31774 return true;
31775 case TYPE_SHIFT:
31776 if (get_attr_dot (insn) == DOT_NO
31777 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31778 return true;
31779 else
31780 break;
31781 case TYPE_DIV:
31782 if (get_attr_size (insn) == SIZE_32)
31783 return true;
31784 else
31785 break;
31786 case TYPE_LOAD:
31787 case TYPE_STORE:
31788 case TYPE_FPLOAD:
31789 case TYPE_FPSTORE:
31790 if (get_attr_update (insn) == UPDATE_YES)
31791 return true;
31792 else
31793 break;
31794 default:
31795 break;
31796 }
31797 break;
31798 case PROCESSOR_POWER7:
31799 type = get_attr_type (insn);
31800
31801 switch (type)
31802 {
31803 case TYPE_CR_LOGICAL:
31804 case TYPE_MFCR:
31805 case TYPE_MFCRF:
31806 case TYPE_MTCR:
31807 case TYPE_DIV:
31808 case TYPE_ISYNC:
31809 case TYPE_LOAD_L:
31810 case TYPE_STORE_C:
31811 case TYPE_MFJMPR:
31812 case TYPE_MTJMPR:
31813 return true;
31814 case TYPE_MUL:
31815 case TYPE_SHIFT:
31816 case TYPE_EXTS:
31817 if (get_attr_dot (insn) == DOT_YES)
31818 return true;
31819 else
31820 break;
31821 case TYPE_LOAD:
31822 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31823 || get_attr_update (insn) == UPDATE_YES)
31824 return true;
31825 else
31826 break;
31827 case TYPE_STORE:
31828 case TYPE_FPLOAD:
31829 case TYPE_FPSTORE:
31830 if (get_attr_update (insn) == UPDATE_YES)
31831 return true;
31832 else
31833 break;
31834 default:
31835 break;
31836 }
31837 break;
31838 case PROCESSOR_POWER8:
31839 type = get_attr_type (insn);
31840
31841 switch (type)
31842 {
31843 case TYPE_CR_LOGICAL:
31844 case TYPE_MFCR:
31845 case TYPE_MFCRF:
31846 case TYPE_MTCR:
31847 case TYPE_SYNC:
31848 case TYPE_ISYNC:
31849 case TYPE_LOAD_L:
31850 case TYPE_STORE_C:
31851 case TYPE_VECSTORE:
31852 case TYPE_MFJMPR:
31853 case TYPE_MTJMPR:
31854 return true;
31855 case TYPE_SHIFT:
31856 case TYPE_EXTS:
31857 case TYPE_MUL:
31858 if (get_attr_dot (insn) == DOT_YES)
31859 return true;
31860 else
31861 break;
31862 case TYPE_LOAD:
31863 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31864 || get_attr_update (insn) == UPDATE_YES)
31865 return true;
31866 else
31867 break;
31868 case TYPE_STORE:
31869 if (get_attr_update (insn) == UPDATE_YES
31870 && get_attr_indexed (insn) == INDEXED_YES)
31871 return true;
31872 else
31873 break;
31874 default:
31875 break;
31876 }
31877 break;
31878 default:
31879 break;
31880 }
31881
31882 return false;
31883 }
31884
31885 static bool
31886 insn_must_be_last_in_group (rtx_insn *insn)
31887 {
31888 enum attr_type type;
31889
31890 if (!insn
31891 || NOTE_P (insn)
31892 || DEBUG_INSN_P (insn)
31893 || GET_CODE (PATTERN (insn)) == USE
31894 || GET_CODE (PATTERN (insn)) == CLOBBER)
31895 return false;
31896
31897 switch (rs6000_tune) {
31898 case PROCESSOR_POWER4:
31899 case PROCESSOR_POWER5:
31900 if (is_microcoded_insn (insn))
31901 return true;
31902
31903 if (is_branch_slot_insn (insn))
31904 return true;
31905
31906 break;
31907 case PROCESSOR_POWER6:
31908 type = get_attr_type (insn);
31909
31910 switch (type)
31911 {
31912 case TYPE_EXTS:
31913 case TYPE_CNTLZ:
31914 case TYPE_TRAP:
31915 case TYPE_MUL:
31916 case TYPE_FPCOMPARE:
31917 case TYPE_MFCR:
31918 case TYPE_MTCR:
31919 case TYPE_MFJMPR:
31920 case TYPE_MTJMPR:
31921 case TYPE_ISYNC:
31922 case TYPE_SYNC:
31923 case TYPE_LOAD_L:
31924 case TYPE_STORE_C:
31925 return true;
31926 case TYPE_SHIFT:
31927 if (get_attr_dot (insn) == DOT_NO
31928 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31929 return true;
31930 else
31931 break;
31932 case TYPE_DIV:
31933 if (get_attr_size (insn) == SIZE_32)
31934 return true;
31935 else
31936 break;
31937 default:
31938 break;
31939 }
31940 break;
31941 case PROCESSOR_POWER7:
31942 type = get_attr_type (insn);
31943
31944 switch (type)
31945 {
31946 case TYPE_ISYNC:
31947 case TYPE_SYNC:
31948 case TYPE_LOAD_L:
31949 case TYPE_STORE_C:
31950 return true;
31951 case TYPE_LOAD:
31952 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31953 && get_attr_update (insn) == UPDATE_YES)
31954 return true;
31955 else
31956 break;
31957 case TYPE_STORE:
31958 if (get_attr_update (insn) == UPDATE_YES
31959 && get_attr_indexed (insn) == INDEXED_YES)
31960 return true;
31961 else
31962 break;
31963 default:
31964 break;
31965 }
31966 break;
31967 case PROCESSOR_POWER8:
31968 type = get_attr_type (insn);
31969
31970 switch (type)
31971 {
31972 case TYPE_MFCR:
31973 case TYPE_MTCR:
31974 case TYPE_ISYNC:
31975 case TYPE_SYNC:
31976 case TYPE_LOAD_L:
31977 case TYPE_STORE_C:
31978 return true;
31979 case TYPE_LOAD:
31980 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31981 && get_attr_update (insn) == UPDATE_YES)
31982 return true;
31983 else
31984 break;
31985 case TYPE_STORE:
31986 if (get_attr_update (insn) == UPDATE_YES
31987 && get_attr_indexed (insn) == INDEXED_YES)
31988 return true;
31989 else
31990 break;
31991 default:
31992 break;
31993 }
31994 break;
31995 default:
31996 break;
31997 }
31998
31999 return false;
32000 }
32001
32002 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32003 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32004
32005 static bool
32006 is_costly_group (rtx *group_insns, rtx next_insn)
32007 {
32008 int i;
32009 int issue_rate = rs6000_issue_rate ();
32010
32011 for (i = 0; i < issue_rate; i++)
32012 {
32013 sd_iterator_def sd_it;
32014 dep_t dep;
32015 rtx insn = group_insns[i];
32016
32017 if (!insn)
32018 continue;
32019
32020 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32021 {
32022 rtx next = DEP_CON (dep);
32023
32024 if (next == next_insn
32025 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32026 return true;
32027 }
32028 }
32029
32030 return false;
32031 }
32032
32033 /* Utility of the function redefine_groups.
32034 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32035 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32036 to keep it "far" (in a separate group) from GROUP_INSNS, following
32037 one of the following schemes, depending on the value of the flag
32038 -minsert_sched_nops = X:
32039 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32040 in order to force NEXT_INSN into a separate group.
32041 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32042 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32043 insertion (has a group just ended, how many vacant issue slots remain in the
32044 last group, and how many dispatch groups were encountered so far). */
32045
32046 static int
32047 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32048 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32049 int *group_count)
32050 {
32051 rtx nop;
32052 bool force;
32053 int issue_rate = rs6000_issue_rate ();
32054 bool end = *group_end;
32055 int i;
32056
32057 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32058 return can_issue_more;
32059
32060 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32061 return can_issue_more;
32062
32063 force = is_costly_group (group_insns, next_insn);
32064 if (!force)
32065 return can_issue_more;
32066
32067 if (sched_verbose > 6)
32068 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32069 *group_count ,can_issue_more);
32070
32071 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32072 {
32073 if (*group_end)
32074 can_issue_more = 0;
32075
32076 /* Since only a branch can be issued in the last issue_slot, it is
32077 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32078 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32079 in this case the last nop will start a new group and the branch
32080 will be forced to the new group. */
32081 if (can_issue_more && !is_branch_slot_insn (next_insn))
32082 can_issue_more--;
32083
32084 /* Do we have a special group ending nop? */
32085 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32086 || rs6000_tune == PROCESSOR_POWER8)
32087 {
32088 nop = gen_group_ending_nop ();
32089 emit_insn_before (nop, next_insn);
32090 can_issue_more = 0;
32091 }
32092 else
32093 while (can_issue_more > 0)
32094 {
32095 nop = gen_nop ();
32096 emit_insn_before (nop, next_insn);
32097 can_issue_more--;
32098 }
32099
32100 *group_end = true;
32101 return 0;
32102 }
32103
32104 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32105 {
32106 int n_nops = rs6000_sched_insert_nops;
32107
32108 /* Nops can't be issued from the branch slot, so the effective
32109 issue_rate for nops is 'issue_rate - 1'. */
32110 if (can_issue_more == 0)
32111 can_issue_more = issue_rate;
32112 can_issue_more--;
32113 if (can_issue_more == 0)
32114 {
32115 can_issue_more = issue_rate - 1;
32116 (*group_count)++;
32117 end = true;
32118 for (i = 0; i < issue_rate; i++)
32119 {
32120 group_insns[i] = 0;
32121 }
32122 }
32123
32124 while (n_nops > 0)
32125 {
32126 nop = gen_nop ();
32127 emit_insn_before (nop, next_insn);
32128 if (can_issue_more == issue_rate - 1) /* new group begins */
32129 end = false;
32130 can_issue_more--;
32131 if (can_issue_more == 0)
32132 {
32133 can_issue_more = issue_rate - 1;
32134 (*group_count)++;
32135 end = true;
32136 for (i = 0; i < issue_rate; i++)
32137 {
32138 group_insns[i] = 0;
32139 }
32140 }
32141 n_nops--;
32142 }
32143
32144 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32145 can_issue_more++;
32146
32147 /* Is next_insn going to start a new group? */
32148 *group_end
32149 = (end
32150 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32151 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32152 || (can_issue_more < issue_rate &&
32153 insn_terminates_group_p (next_insn, previous_group)));
32154 if (*group_end && end)
32155 (*group_count)--;
32156
32157 if (sched_verbose > 6)
32158 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32159 *group_count, can_issue_more);
32160 return can_issue_more;
32161 }
32162
32163 return can_issue_more;
32164 }
32165
32166 /* This function tries to synch the dispatch groups that the compiler "sees"
32167 with the dispatch groups that the processor dispatcher is expected to
32168 form in practice. It tries to achieve this synchronization by forcing the
32169 estimated processor grouping on the compiler (as opposed to the function
32170 'pad_goups' which tries to force the scheduler's grouping on the processor).
32171
32172 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32173 examines the (estimated) dispatch groups that will be formed by the processor
32174 dispatcher. It marks these group boundaries to reflect the estimated
32175 processor grouping, overriding the grouping that the scheduler had marked.
32176 Depending on the value of the flag '-minsert-sched-nops' this function can
32177 force certain insns into separate groups or force a certain distance between
32178 them by inserting nops, for example, if there exists a "costly dependence"
32179 between the insns.
32180
32181 The function estimates the group boundaries that the processor will form as
32182 follows: It keeps track of how many vacant issue slots are available after
32183 each insn. A subsequent insn will start a new group if one of the following
32184 4 cases applies:
32185 - no more vacant issue slots remain in the current dispatch group.
32186 - only the last issue slot, which is the branch slot, is vacant, but the next
32187 insn is not a branch.
32188 - only the last 2 or less issue slots, including the branch slot, are vacant,
32189 which means that a cracked insn (which occupies two issue slots) can't be
32190 issued in this group.
32191 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32192 start a new group. */
32193
32194 static int
32195 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32196 rtx_insn *tail)
32197 {
32198 rtx_insn *insn, *next_insn;
32199 int issue_rate;
32200 int can_issue_more;
32201 int slot, i;
32202 bool group_end;
32203 int group_count = 0;
32204 rtx *group_insns;
32205
32206 /* Initialize. */
32207 issue_rate = rs6000_issue_rate ();
32208 group_insns = XALLOCAVEC (rtx, issue_rate);
32209 for (i = 0; i < issue_rate; i++)
32210 {
32211 group_insns[i] = 0;
32212 }
32213 can_issue_more = issue_rate;
32214 slot = 0;
32215 insn = get_next_active_insn (prev_head_insn, tail);
32216 group_end = false;
32217
32218 while (insn != NULL_RTX)
32219 {
32220 slot = (issue_rate - can_issue_more);
32221 group_insns[slot] = insn;
32222 can_issue_more =
32223 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32224 if (insn_terminates_group_p (insn, current_group))
32225 can_issue_more = 0;
32226
32227 next_insn = get_next_active_insn (insn, tail);
32228 if (next_insn == NULL_RTX)
32229 return group_count + 1;
32230
32231 /* Is next_insn going to start a new group? */
32232 group_end
32233 = (can_issue_more == 0
32234 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32235 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32236 || (can_issue_more < issue_rate &&
32237 insn_terminates_group_p (next_insn, previous_group)));
32238
32239 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32240 next_insn, &group_end, can_issue_more,
32241 &group_count);
32242
32243 if (group_end)
32244 {
32245 group_count++;
32246 can_issue_more = 0;
32247 for (i = 0; i < issue_rate; i++)
32248 {
32249 group_insns[i] = 0;
32250 }
32251 }
32252
32253 if (GET_MODE (next_insn) == TImode && can_issue_more)
32254 PUT_MODE (next_insn, VOIDmode);
32255 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32256 PUT_MODE (next_insn, TImode);
32257
32258 insn = next_insn;
32259 if (can_issue_more == 0)
32260 can_issue_more = issue_rate;
32261 } /* while */
32262
32263 return group_count;
32264 }
32265
32266 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32267 dispatch group boundaries that the scheduler had marked. Pad with nops
32268 any dispatch groups which have vacant issue slots, in order to force the
32269 scheduler's grouping on the processor dispatcher. The function
32270 returns the number of dispatch groups found. */
32271
32272 static int
32273 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32274 rtx_insn *tail)
32275 {
32276 rtx_insn *insn, *next_insn;
32277 rtx nop;
32278 int issue_rate;
32279 int can_issue_more;
32280 int group_end;
32281 int group_count = 0;
32282
32283 /* Initialize issue_rate. */
32284 issue_rate = rs6000_issue_rate ();
32285 can_issue_more = issue_rate;
32286
32287 insn = get_next_active_insn (prev_head_insn, tail);
32288 next_insn = get_next_active_insn (insn, tail);
32289
32290 while (insn != NULL_RTX)
32291 {
32292 can_issue_more =
32293 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32294
32295 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32296
32297 if (next_insn == NULL_RTX)
32298 break;
32299
32300 if (group_end)
32301 {
32302 /* If the scheduler had marked group termination at this location
32303 (between insn and next_insn), and neither insn nor next_insn will
32304 force group termination, pad the group with nops to force group
32305 termination. */
32306 if (can_issue_more
32307 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32308 && !insn_terminates_group_p (insn, current_group)
32309 && !insn_terminates_group_p (next_insn, previous_group))
32310 {
32311 if (!is_branch_slot_insn (next_insn))
32312 can_issue_more--;
32313
32314 while (can_issue_more)
32315 {
32316 nop = gen_nop ();
32317 emit_insn_before (nop, next_insn);
32318 can_issue_more--;
32319 }
32320 }
32321
32322 can_issue_more = issue_rate;
32323 group_count++;
32324 }
32325
32326 insn = next_insn;
32327 next_insn = get_next_active_insn (insn, tail);
32328 }
32329
32330 return group_count;
32331 }
32332
32333 /* We're beginning a new block. Initialize data structures as necessary. */
32334
32335 static void
32336 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32337 int sched_verbose ATTRIBUTE_UNUSED,
32338 int max_ready ATTRIBUTE_UNUSED)
32339 {
32340 last_scheduled_insn = NULL;
32341 load_store_pendulum = 0;
32342 divide_cnt = 0;
32343 vec_pairing = 0;
32344 }
32345
32346 /* The following function is called at the end of scheduling BB.
32347 After reload, it inserts nops at insn group bundling. */
32348
32349 static void
32350 rs6000_sched_finish (FILE *dump, int sched_verbose)
32351 {
32352 int n_groups;
32353
32354 if (sched_verbose)
32355 fprintf (dump, "=== Finishing schedule.\n");
32356
32357 if (reload_completed && rs6000_sched_groups)
32358 {
32359 /* Do not run sched_finish hook when selective scheduling enabled. */
32360 if (sel_sched_p ())
32361 return;
32362
32363 if (rs6000_sched_insert_nops == sched_finish_none)
32364 return;
32365
32366 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32367 n_groups = pad_groups (dump, sched_verbose,
32368 current_sched_info->prev_head,
32369 current_sched_info->next_tail);
32370 else
32371 n_groups = redefine_groups (dump, sched_verbose,
32372 current_sched_info->prev_head,
32373 current_sched_info->next_tail);
32374
32375 if (sched_verbose >= 6)
32376 {
32377 fprintf (dump, "ngroups = %d\n", n_groups);
32378 print_rtl (dump, current_sched_info->prev_head);
32379 fprintf (dump, "Done finish_sched\n");
32380 }
32381 }
32382 }
32383
32384 struct rs6000_sched_context
32385 {
32386 short cached_can_issue_more;
32387 rtx_insn *last_scheduled_insn;
32388 int load_store_pendulum;
32389 int divide_cnt;
32390 int vec_pairing;
32391 };
32392
32393 typedef struct rs6000_sched_context rs6000_sched_context_def;
32394 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32395
32396 /* Allocate store for new scheduling context. */
32397 static void *
32398 rs6000_alloc_sched_context (void)
32399 {
32400 return xmalloc (sizeof (rs6000_sched_context_def));
32401 }
32402
32403 /* If CLEAN_P is true then initializes _SC with clean data,
32404 and from the global context otherwise. */
32405 static void
32406 rs6000_init_sched_context (void *_sc, bool clean_p)
32407 {
32408 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32409
32410 if (clean_p)
32411 {
32412 sc->cached_can_issue_more = 0;
32413 sc->last_scheduled_insn = NULL;
32414 sc->load_store_pendulum = 0;
32415 sc->divide_cnt = 0;
32416 sc->vec_pairing = 0;
32417 }
32418 else
32419 {
32420 sc->cached_can_issue_more = cached_can_issue_more;
32421 sc->last_scheduled_insn = last_scheduled_insn;
32422 sc->load_store_pendulum = load_store_pendulum;
32423 sc->divide_cnt = divide_cnt;
32424 sc->vec_pairing = vec_pairing;
32425 }
32426 }
32427
32428 /* Sets the global scheduling context to the one pointed to by _SC. */
32429 static void
32430 rs6000_set_sched_context (void *_sc)
32431 {
32432 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32433
32434 gcc_assert (sc != NULL);
32435
32436 cached_can_issue_more = sc->cached_can_issue_more;
32437 last_scheduled_insn = sc->last_scheduled_insn;
32438 load_store_pendulum = sc->load_store_pendulum;
32439 divide_cnt = sc->divide_cnt;
32440 vec_pairing = sc->vec_pairing;
32441 }
32442
32443 /* Free _SC. */
32444 static void
32445 rs6000_free_sched_context (void *_sc)
32446 {
32447 gcc_assert (_sc != NULL);
32448
32449 free (_sc);
32450 }
32451
32452 static bool
32453 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32454 {
32455 switch (get_attr_type (insn))
32456 {
32457 case TYPE_DIV:
32458 case TYPE_SDIV:
32459 case TYPE_DDIV:
32460 case TYPE_VECDIV:
32461 case TYPE_SSQRT:
32462 case TYPE_DSQRT:
32463 return false;
32464
32465 default:
32466 return true;
32467 }
32468 }
32469 \f
32470 /* Length in units of the trampoline for entering a nested function. */
32471
32472 int
32473 rs6000_trampoline_size (void)
32474 {
32475 int ret = 0;
32476
32477 switch (DEFAULT_ABI)
32478 {
32479 default:
32480 gcc_unreachable ();
32481
32482 case ABI_AIX:
32483 ret = (TARGET_32BIT) ? 12 : 24;
32484 break;
32485
32486 case ABI_ELFv2:
32487 gcc_assert (!TARGET_32BIT);
32488 ret = 32;
32489 break;
32490
32491 case ABI_DARWIN:
32492 case ABI_V4:
32493 ret = (TARGET_32BIT) ? 40 : 48;
32494 break;
32495 }
32496
32497 return ret;
32498 }
32499
32500 /* Emit RTL insns to initialize the variable parts of a trampoline.
32501 FNADDR is an RTX for the address of the function's pure code.
32502 CXT is an RTX for the static chain value for the function. */
32503
32504 static void
32505 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32506 {
32507 int regsize = (TARGET_32BIT) ? 4 : 8;
32508 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32509 rtx ctx_reg = force_reg (Pmode, cxt);
32510 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32511
32512 switch (DEFAULT_ABI)
32513 {
32514 default:
32515 gcc_unreachable ();
32516
32517 /* Under AIX, just build the 3 word function descriptor */
32518 case ABI_AIX:
32519 {
32520 rtx fnmem, fn_reg, toc_reg;
32521
32522 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32523 error ("you cannot take the address of a nested function if you use "
32524 "the %qs option", "-mno-pointers-to-nested-functions");
32525
32526 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32527 fn_reg = gen_reg_rtx (Pmode);
32528 toc_reg = gen_reg_rtx (Pmode);
32529
32530 /* Macro to shorten the code expansions below. */
32531 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32532
32533 m_tramp = replace_equiv_address (m_tramp, addr);
32534
32535 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32536 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32537 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32538 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32539 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32540
32541 # undef MEM_PLUS
32542 }
32543 break;
32544
32545 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32546 case ABI_ELFv2:
32547 case ABI_DARWIN:
32548 case ABI_V4:
32549 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32550 LCT_NORMAL, VOIDmode,
32551 addr, Pmode,
32552 GEN_INT (rs6000_trampoline_size ()), SImode,
32553 fnaddr, Pmode,
32554 ctx_reg, Pmode);
32555 break;
32556 }
32557 }
32558
32559 \f
32560 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32561 identifier as an argument, so the front end shouldn't look it up. */
32562
32563 static bool
32564 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32565 {
32566 return is_attribute_p ("altivec", attr_id);
32567 }
32568
32569 /* Handle the "altivec" attribute. The attribute may have
32570 arguments as follows:
32571
32572 __attribute__((altivec(vector__)))
32573 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32574 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32575
32576 and may appear more than once (e.g., 'vector bool char') in a
32577 given declaration. */
32578
32579 static tree
32580 rs6000_handle_altivec_attribute (tree *node,
32581 tree name ATTRIBUTE_UNUSED,
32582 tree args,
32583 int flags ATTRIBUTE_UNUSED,
32584 bool *no_add_attrs)
32585 {
32586 tree type = *node, result = NULL_TREE;
32587 machine_mode mode;
32588 int unsigned_p;
32589 char altivec_type
32590 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32591 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32592 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32593 : '?');
32594
32595 while (POINTER_TYPE_P (type)
32596 || TREE_CODE (type) == FUNCTION_TYPE
32597 || TREE_CODE (type) == METHOD_TYPE
32598 || TREE_CODE (type) == ARRAY_TYPE)
32599 type = TREE_TYPE (type);
32600
32601 mode = TYPE_MODE (type);
32602
32603 /* Check for invalid AltiVec type qualifiers. */
32604 if (type == long_double_type_node)
32605 error ("use of %<long double%> in AltiVec types is invalid");
32606 else if (type == boolean_type_node)
32607 error ("use of boolean types in AltiVec types is invalid");
32608 else if (TREE_CODE (type) == COMPLEX_TYPE)
32609 error ("use of %<complex%> in AltiVec types is invalid");
32610 else if (DECIMAL_FLOAT_MODE_P (mode))
32611 error ("use of decimal floating point types in AltiVec types is invalid");
32612 else if (!TARGET_VSX)
32613 {
32614 if (type == long_unsigned_type_node || type == long_integer_type_node)
32615 {
32616 if (TARGET_64BIT)
32617 error ("use of %<long%> in AltiVec types is invalid for "
32618 "64-bit code without %qs", "-mvsx");
32619 else if (rs6000_warn_altivec_long)
32620 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32621 "use %<int%>");
32622 }
32623 else if (type == long_long_unsigned_type_node
32624 || type == long_long_integer_type_node)
32625 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32626 "-mvsx");
32627 else if (type == double_type_node)
32628 error ("use of %<double%> in AltiVec types is invalid without %qs",
32629 "-mvsx");
32630 }
32631
32632 switch (altivec_type)
32633 {
32634 case 'v':
32635 unsigned_p = TYPE_UNSIGNED (type);
32636 switch (mode)
32637 {
32638 case E_TImode:
32639 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32640 break;
32641 case E_DImode:
32642 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32643 break;
32644 case E_SImode:
32645 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32646 break;
32647 case E_HImode:
32648 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32649 break;
32650 case E_QImode:
32651 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32652 break;
32653 case E_SFmode: result = V4SF_type_node; break;
32654 case E_DFmode: result = V2DF_type_node; break;
32655 /* If the user says 'vector int bool', we may be handed the 'bool'
32656 attribute _before_ the 'vector' attribute, and so select the
32657 proper type in the 'b' case below. */
32658 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32659 case E_V2DImode: case E_V2DFmode:
32660 result = type;
32661 default: break;
32662 }
32663 break;
32664 case 'b':
32665 switch (mode)
32666 {
32667 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32668 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32669 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32670 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32671 default: break;
32672 }
32673 break;
32674 case 'p':
32675 switch (mode)
32676 {
32677 case E_V8HImode: result = pixel_V8HI_type_node;
32678 default: break;
32679 }
32680 default: break;
32681 }
32682
32683 /* Propagate qualifiers attached to the element type
32684 onto the vector type. */
32685 if (result && result != type && TYPE_QUALS (type))
32686 result = build_qualified_type (result, TYPE_QUALS (type));
32687
32688 *no_add_attrs = true; /* No need to hang on to the attribute. */
32689
32690 if (result)
32691 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32692
32693 return NULL_TREE;
32694 }
32695
32696 /* AltiVec defines five built-in scalar types that serve as vector
32697 elements; we must teach the compiler how to mangle them. The 128-bit
32698 floating point mangling is target-specific as well. */
32699
32700 static const char *
32701 rs6000_mangle_type (const_tree type)
32702 {
32703 type = TYPE_MAIN_VARIANT (type);
32704
32705 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32706 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32707 return NULL;
32708
32709 if (type == bool_char_type_node) return "U6__boolc";
32710 if (type == bool_short_type_node) return "U6__bools";
32711 if (type == pixel_type_node) return "u7__pixel";
32712 if (type == bool_int_type_node) return "U6__booli";
32713 if (type == bool_long_long_type_node) return "U6__boolx";
32714
32715 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32716 return "g";
32717 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32718 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32719
32720 /* For all other types, use the default mangling. */
32721 return NULL;
32722 }
32723
32724 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32725 struct attribute_spec.handler. */
32726
32727 static tree
32728 rs6000_handle_longcall_attribute (tree *node, tree name,
32729 tree args ATTRIBUTE_UNUSED,
32730 int flags ATTRIBUTE_UNUSED,
32731 bool *no_add_attrs)
32732 {
32733 if (TREE_CODE (*node) != FUNCTION_TYPE
32734 && TREE_CODE (*node) != FIELD_DECL
32735 && TREE_CODE (*node) != TYPE_DECL)
32736 {
32737 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32738 name);
32739 *no_add_attrs = true;
32740 }
32741
32742 return NULL_TREE;
32743 }
32744
32745 /* Set longcall attributes on all functions declared when
32746 rs6000_default_long_calls is true. */
32747 static void
32748 rs6000_set_default_type_attributes (tree type)
32749 {
32750 if (rs6000_default_long_calls
32751 && (TREE_CODE (type) == FUNCTION_TYPE
32752 || TREE_CODE (type) == METHOD_TYPE))
32753 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32754 NULL_TREE,
32755 TYPE_ATTRIBUTES (type));
32756
32757 #if TARGET_MACHO
32758 darwin_set_default_type_attributes (type);
32759 #endif
32760 }
32761
32762 /* Return a reference suitable for calling a function with the
32763 longcall attribute. */
32764
32765 static rtx
32766 rs6000_longcall_ref (rtx call_ref, rtx arg)
32767 {
32768 /* System V adds '.' to the internal name, so skip them. */
32769 const char *call_name = XSTR (call_ref, 0);
32770 if (*call_name == '.')
32771 {
32772 while (*call_name == '.')
32773 call_name++;
32774
32775 tree node = get_identifier (call_name);
32776 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32777 }
32778
32779 if (HAVE_AS_PLTSEQ
32780 && TARGET_TLS_MARKERS
32781 && (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4))
32782 {
32783 rtx base = const0_rtx;
32784 int regno;
32785 if (DEFAULT_ABI == ABI_ELFv2)
32786 {
32787 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32788 regno = 12;
32789 }
32790 else
32791 {
32792 if (flag_pic)
32793 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32794 regno = 11;
32795 }
32796 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32797 may be used by a function global entry point. For SysV4, r11
32798 is used by __glink_PLTresolve lazy resolver entry. */
32799 rtx reg = gen_rtx_REG (Pmode, regno);
32800 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32801 UNSPEC_PLT16_HA);
32802 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32803 UNSPEC_PLT16_LO);
32804 emit_insn (gen_rtx_SET (reg, hi));
32805 emit_insn (gen_rtx_SET (reg, lo));
32806 return reg;
32807 }
32808
32809 return force_reg (Pmode, call_ref);
32810 }
32811 \f
32812 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32813 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32814 #endif
32815
32816 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32817 struct attribute_spec.handler. */
32818 static tree
32819 rs6000_handle_struct_attribute (tree *node, tree name,
32820 tree args ATTRIBUTE_UNUSED,
32821 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32822 {
32823 tree *type = NULL;
32824 if (DECL_P (*node))
32825 {
32826 if (TREE_CODE (*node) == TYPE_DECL)
32827 type = &TREE_TYPE (*node);
32828 }
32829 else
32830 type = node;
32831
32832 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32833 || TREE_CODE (*type) == UNION_TYPE)))
32834 {
32835 warning (OPT_Wattributes, "%qE attribute ignored", name);
32836 *no_add_attrs = true;
32837 }
32838
32839 else if ((is_attribute_p ("ms_struct", name)
32840 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32841 || ((is_attribute_p ("gcc_struct", name)
32842 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32843 {
32844 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32845 name);
32846 *no_add_attrs = true;
32847 }
32848
32849 return NULL_TREE;
32850 }
32851
32852 static bool
32853 rs6000_ms_bitfield_layout_p (const_tree record_type)
32854 {
32855 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32856 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32857 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32858 }
32859 \f
32860 #ifdef USING_ELFOS_H
32861
32862 /* A get_unnamed_section callback, used for switching to toc_section. */
32863
32864 static void
32865 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32866 {
32867 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32868 && TARGET_MINIMAL_TOC)
32869 {
32870 if (!toc_initialized)
32871 {
32872 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32873 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32874 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32875 fprintf (asm_out_file, "\t.tc ");
32876 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32877 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32878 fprintf (asm_out_file, "\n");
32879
32880 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32881 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32882 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32883 fprintf (asm_out_file, " = .+32768\n");
32884 toc_initialized = 1;
32885 }
32886 else
32887 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32888 }
32889 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32890 {
32891 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32892 if (!toc_initialized)
32893 {
32894 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32895 toc_initialized = 1;
32896 }
32897 }
32898 else
32899 {
32900 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32901 if (!toc_initialized)
32902 {
32903 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32904 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32905 fprintf (asm_out_file, " = .+32768\n");
32906 toc_initialized = 1;
32907 }
32908 }
32909 }
32910
32911 /* Implement TARGET_ASM_INIT_SECTIONS. */
32912
32913 static void
32914 rs6000_elf_asm_init_sections (void)
32915 {
32916 toc_section
32917 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32918
32919 sdata2_section
32920 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32921 SDATA2_SECTION_ASM_OP);
32922 }
32923
32924 /* Implement TARGET_SELECT_RTX_SECTION. */
32925
32926 static section *
32927 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32928 unsigned HOST_WIDE_INT align)
32929 {
32930 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32931 return toc_section;
32932 else
32933 return default_elf_select_rtx_section (mode, x, align);
32934 }
32935 \f
32936 /* For a SYMBOL_REF, set generic flags and then perform some
32937 target-specific processing.
32938
32939 When the AIX ABI is requested on a non-AIX system, replace the
32940 function name with the real name (with a leading .) rather than the
32941 function descriptor name. This saves a lot of overriding code to
32942 read the prefixes. */
32943
32944 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32945 static void
32946 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32947 {
32948 default_encode_section_info (decl, rtl, first);
32949
32950 if (first
32951 && TREE_CODE (decl) == FUNCTION_DECL
32952 && !TARGET_AIX
32953 && DEFAULT_ABI == ABI_AIX)
32954 {
32955 rtx sym_ref = XEXP (rtl, 0);
32956 size_t len = strlen (XSTR (sym_ref, 0));
32957 char *str = XALLOCAVEC (char, len + 2);
32958 str[0] = '.';
32959 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32960 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32961 }
32962 }
32963
32964 static inline bool
32965 compare_section_name (const char *section, const char *templ)
32966 {
32967 int len;
32968
32969 len = strlen (templ);
32970 return (strncmp (section, templ, len) == 0
32971 && (section[len] == 0 || section[len] == '.'));
32972 }
32973
32974 bool
32975 rs6000_elf_in_small_data_p (const_tree decl)
32976 {
32977 if (rs6000_sdata == SDATA_NONE)
32978 return false;
32979
32980 /* We want to merge strings, so we never consider them small data. */
32981 if (TREE_CODE (decl) == STRING_CST)
32982 return false;
32983
32984 /* Functions are never in the small data area. */
32985 if (TREE_CODE (decl) == FUNCTION_DECL)
32986 return false;
32987
32988 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32989 {
32990 const char *section = DECL_SECTION_NAME (decl);
32991 if (compare_section_name (section, ".sdata")
32992 || compare_section_name (section, ".sdata2")
32993 || compare_section_name (section, ".gnu.linkonce.s")
32994 || compare_section_name (section, ".sbss")
32995 || compare_section_name (section, ".sbss2")
32996 || compare_section_name (section, ".gnu.linkonce.sb")
32997 || strcmp (section, ".PPC.EMB.sdata0") == 0
32998 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32999 return true;
33000 }
33001 else
33002 {
33003 /* If we are told not to put readonly data in sdata, then don't. */
33004 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
33005 && !rs6000_readonly_in_sdata)
33006 return false;
33007
33008 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33009
33010 if (size > 0
33011 && size <= g_switch_value
33012 /* If it's not public, and we're not going to reference it there,
33013 there's no need to put it in the small data section. */
33014 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33015 return true;
33016 }
33017
33018 return false;
33019 }
33020
33021 #endif /* USING_ELFOS_H */
33022 \f
33023 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33024
33025 static bool
33026 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33027 {
33028 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33029 }
33030
33031 /* Do not place thread-local symbols refs in the object blocks. */
33032
33033 static bool
33034 rs6000_use_blocks_for_decl_p (const_tree decl)
33035 {
33036 return !DECL_THREAD_LOCAL_P (decl);
33037 }
33038 \f
33039 /* Return a REG that occurs in ADDR with coefficient 1.
33040 ADDR can be effectively incremented by incrementing REG.
33041
33042 r0 is special and we must not select it as an address
33043 register by this routine since our caller will try to
33044 increment the returned register via an "la" instruction. */
33045
33046 rtx
33047 find_addr_reg (rtx addr)
33048 {
33049 while (GET_CODE (addr) == PLUS)
33050 {
33051 if (GET_CODE (XEXP (addr, 0)) == REG
33052 && REGNO (XEXP (addr, 0)) != 0)
33053 addr = XEXP (addr, 0);
33054 else if (GET_CODE (XEXP (addr, 1)) == REG
33055 && REGNO (XEXP (addr, 1)) != 0)
33056 addr = XEXP (addr, 1);
33057 else if (CONSTANT_P (XEXP (addr, 0)))
33058 addr = XEXP (addr, 1);
33059 else if (CONSTANT_P (XEXP (addr, 1)))
33060 addr = XEXP (addr, 0);
33061 else
33062 gcc_unreachable ();
33063 }
33064 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
33065 return addr;
33066 }
33067
33068 void
33069 rs6000_fatal_bad_address (rtx op)
33070 {
33071 fatal_insn ("bad address", op);
33072 }
33073
33074 #if TARGET_MACHO
33075
33076 typedef struct branch_island_d {
33077 tree function_name;
33078 tree label_name;
33079 int line_number;
33080 } branch_island;
33081
33082
33083 static vec<branch_island, va_gc> *branch_islands;
33084
33085 /* Remember to generate a branch island for far calls to the given
33086 function. */
33087
33088 static void
33089 add_compiler_branch_island (tree label_name, tree function_name,
33090 int line_number)
33091 {
33092 branch_island bi = {function_name, label_name, line_number};
33093 vec_safe_push (branch_islands, bi);
33094 }
33095
33096 /* Generate far-jump branch islands for everything recorded in
33097 branch_islands. Invoked immediately after the last instruction of
33098 the epilogue has been emitted; the branch islands must be appended
33099 to, and contiguous with, the function body. Mach-O stubs are
33100 generated in machopic_output_stub(). */
33101
33102 static void
33103 macho_branch_islands (void)
33104 {
33105 char tmp_buf[512];
33106
33107 while (!vec_safe_is_empty (branch_islands))
33108 {
33109 branch_island *bi = &branch_islands->last ();
33110 const char *label = IDENTIFIER_POINTER (bi->label_name);
33111 const char *name = IDENTIFIER_POINTER (bi->function_name);
33112 char name_buf[512];
33113 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33114 if (name[0] == '*' || name[0] == '&')
33115 strcpy (name_buf, name+1);
33116 else
33117 {
33118 name_buf[0] = '_';
33119 strcpy (name_buf+1, name);
33120 }
33121 strcpy (tmp_buf, "\n");
33122 strcat (tmp_buf, label);
33123 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33124 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33125 dbxout_stabd (N_SLINE, bi->line_number);
33126 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33127 if (flag_pic)
33128 {
33129 if (TARGET_LINK_STACK)
33130 {
33131 char name[32];
33132 get_ppc476_thunk_name (name);
33133 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33134 strcat (tmp_buf, name);
33135 strcat (tmp_buf, "\n");
33136 strcat (tmp_buf, label);
33137 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33138 }
33139 else
33140 {
33141 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33142 strcat (tmp_buf, label);
33143 strcat (tmp_buf, "_pic\n");
33144 strcat (tmp_buf, label);
33145 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33146 }
33147
33148 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33149 strcat (tmp_buf, name_buf);
33150 strcat (tmp_buf, " - ");
33151 strcat (tmp_buf, label);
33152 strcat (tmp_buf, "_pic)\n");
33153
33154 strcat (tmp_buf, "\tmtlr r0\n");
33155
33156 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33157 strcat (tmp_buf, name_buf);
33158 strcat (tmp_buf, " - ");
33159 strcat (tmp_buf, label);
33160 strcat (tmp_buf, "_pic)\n");
33161
33162 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33163 }
33164 else
33165 {
33166 strcat (tmp_buf, ":\nlis r12,hi16(");
33167 strcat (tmp_buf, name_buf);
33168 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33169 strcat (tmp_buf, name_buf);
33170 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33171 }
33172 output_asm_insn (tmp_buf, 0);
33173 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33174 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33175 dbxout_stabd (N_SLINE, bi->line_number);
33176 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33177 branch_islands->pop ();
33178 }
33179 }
33180
33181 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33182 already there or not. */
33183
33184 static int
33185 no_previous_def (tree function_name)
33186 {
33187 branch_island *bi;
33188 unsigned ix;
33189
33190 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33191 if (function_name == bi->function_name)
33192 return 0;
33193 return 1;
33194 }
33195
33196 /* GET_PREV_LABEL gets the label name from the previous definition of
33197 the function. */
33198
33199 static tree
33200 get_prev_label (tree function_name)
33201 {
33202 branch_island *bi;
33203 unsigned ix;
33204
33205 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33206 if (function_name == bi->function_name)
33207 return bi->label_name;
33208 return NULL_TREE;
33209 }
33210
33211 /* INSN is either a function call or a millicode call. It may have an
33212 unconditional jump in its delay slot.
33213
33214 CALL_DEST is the routine we are calling. */
33215
33216 char *
33217 macho_call_template (rtx_insn *insn, rtx *operands, int dest_operand_number,
33218 int cookie_operand_number)
33219 {
33220 static char buf[256];
33221 if (darwin_emit_branch_islands
33222 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33223 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33224 {
33225 tree labelname;
33226 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33227
33228 if (no_previous_def (funname))
33229 {
33230 rtx label_rtx = gen_label_rtx ();
33231 char *label_buf, temp_buf[256];
33232 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33233 CODE_LABEL_NUMBER (label_rtx));
33234 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33235 labelname = get_identifier (label_buf);
33236 add_compiler_branch_island (labelname, funname, insn_line (insn));
33237 }
33238 else
33239 labelname = get_prev_label (funname);
33240
33241 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33242 instruction will reach 'foo', otherwise link as 'bl L42'".
33243 "L42" should be a 'branch island', that will do a far jump to
33244 'foo'. Branch islands are generated in
33245 macho_branch_islands(). */
33246 sprintf (buf, "jbsr %%z%d,%.246s",
33247 dest_operand_number, IDENTIFIER_POINTER (labelname));
33248 }
33249 else
33250 sprintf (buf, "bl %%z%d", dest_operand_number);
33251 return buf;
33252 }
33253
33254 /* Generate PIC and indirect symbol stubs. */
33255
33256 void
33257 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33258 {
33259 unsigned int length;
33260 char *symbol_name, *lazy_ptr_name;
33261 char *local_label_0;
33262 static int label = 0;
33263
33264 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33265 symb = (*targetm.strip_name_encoding) (symb);
33266
33267
33268 length = strlen (symb);
33269 symbol_name = XALLOCAVEC (char, length + 32);
33270 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33271
33272 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33273 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33274
33275 if (flag_pic == 2)
33276 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33277 else
33278 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33279
33280 if (flag_pic == 2)
33281 {
33282 fprintf (file, "\t.align 5\n");
33283
33284 fprintf (file, "%s:\n", stub);
33285 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33286
33287 label++;
33288 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33289 sprintf (local_label_0, "\"L%011d$spb\"", label);
33290
33291 fprintf (file, "\tmflr r0\n");
33292 if (TARGET_LINK_STACK)
33293 {
33294 char name[32];
33295 get_ppc476_thunk_name (name);
33296 fprintf (file, "\tbl %s\n", name);
33297 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33298 }
33299 else
33300 {
33301 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33302 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33303 }
33304 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33305 lazy_ptr_name, local_label_0);
33306 fprintf (file, "\tmtlr r0\n");
33307 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33308 (TARGET_64BIT ? "ldu" : "lwzu"),
33309 lazy_ptr_name, local_label_0);
33310 fprintf (file, "\tmtctr r12\n");
33311 fprintf (file, "\tbctr\n");
33312 }
33313 else
33314 {
33315 fprintf (file, "\t.align 4\n");
33316
33317 fprintf (file, "%s:\n", stub);
33318 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33319
33320 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33321 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33322 (TARGET_64BIT ? "ldu" : "lwzu"),
33323 lazy_ptr_name);
33324 fprintf (file, "\tmtctr r12\n");
33325 fprintf (file, "\tbctr\n");
33326 }
33327
33328 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33329 fprintf (file, "%s:\n", lazy_ptr_name);
33330 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33331 fprintf (file, "%sdyld_stub_binding_helper\n",
33332 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33333 }
33334
33335 /* Legitimize PIC addresses. If the address is already
33336 position-independent, we return ORIG. Newly generated
33337 position-independent addresses go into a reg. This is REG if non
33338 zero, otherwise we allocate register(s) as necessary. */
33339
33340 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33341
33342 rtx
33343 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33344 rtx reg)
33345 {
33346 rtx base, offset;
33347
33348 if (reg == NULL && !reload_completed)
33349 reg = gen_reg_rtx (Pmode);
33350
33351 if (GET_CODE (orig) == CONST)
33352 {
33353 rtx reg_temp;
33354
33355 if (GET_CODE (XEXP (orig, 0)) == PLUS
33356 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33357 return orig;
33358
33359 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33360
33361 /* Use a different reg for the intermediate value, as
33362 it will be marked UNCHANGING. */
33363 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33364 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33365 Pmode, reg_temp);
33366 offset =
33367 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33368 Pmode, reg);
33369
33370 if (GET_CODE (offset) == CONST_INT)
33371 {
33372 if (SMALL_INT (offset))
33373 return plus_constant (Pmode, base, INTVAL (offset));
33374 else if (!reload_completed)
33375 offset = force_reg (Pmode, offset);
33376 else
33377 {
33378 rtx mem = force_const_mem (Pmode, orig);
33379 return machopic_legitimize_pic_address (mem, Pmode, reg);
33380 }
33381 }
33382 return gen_rtx_PLUS (Pmode, base, offset);
33383 }
33384
33385 /* Fall back on generic machopic code. */
33386 return machopic_legitimize_pic_address (orig, mode, reg);
33387 }
33388
33389 /* Output a .machine directive for the Darwin assembler, and call
33390 the generic start_file routine. */
33391
33392 static void
33393 rs6000_darwin_file_start (void)
33394 {
33395 static const struct
33396 {
33397 const char *arg;
33398 const char *name;
33399 HOST_WIDE_INT if_set;
33400 } mapping[] = {
33401 { "ppc64", "ppc64", MASK_64BIT },
33402 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33403 { "power4", "ppc970", 0 },
33404 { "G5", "ppc970", 0 },
33405 { "7450", "ppc7450", 0 },
33406 { "7400", "ppc7400", MASK_ALTIVEC },
33407 { "G4", "ppc7400", 0 },
33408 { "750", "ppc750", 0 },
33409 { "740", "ppc750", 0 },
33410 { "G3", "ppc750", 0 },
33411 { "604e", "ppc604e", 0 },
33412 { "604", "ppc604", 0 },
33413 { "603e", "ppc603", 0 },
33414 { "603", "ppc603", 0 },
33415 { "601", "ppc601", 0 },
33416 { NULL, "ppc", 0 } };
33417 const char *cpu_id = "";
33418 size_t i;
33419
33420 rs6000_file_start ();
33421 darwin_file_start ();
33422
33423 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33424
33425 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33426 cpu_id = rs6000_default_cpu;
33427
33428 if (global_options_set.x_rs6000_cpu_index)
33429 cpu_id = processor_target_table[rs6000_cpu_index].name;
33430
33431 /* Look through the mapping array. Pick the first name that either
33432 matches the argument, has a bit set in IF_SET that is also set
33433 in the target flags, or has a NULL name. */
33434
33435 i = 0;
33436 while (mapping[i].arg != NULL
33437 && strcmp (mapping[i].arg, cpu_id) != 0
33438 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33439 i++;
33440
33441 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33442 }
33443
33444 #endif /* TARGET_MACHO */
33445
33446 #if TARGET_ELF
33447 static int
33448 rs6000_elf_reloc_rw_mask (void)
33449 {
33450 if (flag_pic)
33451 return 3;
33452 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33453 return 2;
33454 else
33455 return 0;
33456 }
33457
33458 /* Record an element in the table of global constructors. SYMBOL is
33459 a SYMBOL_REF of the function to be called; PRIORITY is a number
33460 between 0 and MAX_INIT_PRIORITY.
33461
33462 This differs from default_named_section_asm_out_constructor in
33463 that we have special handling for -mrelocatable. */
33464
33465 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33466 static void
33467 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33468 {
33469 const char *section = ".ctors";
33470 char buf[18];
33471
33472 if (priority != DEFAULT_INIT_PRIORITY)
33473 {
33474 sprintf (buf, ".ctors.%.5u",
33475 /* Invert the numbering so the linker puts us in the proper
33476 order; constructors are run from right to left, and the
33477 linker sorts in increasing order. */
33478 MAX_INIT_PRIORITY - priority);
33479 section = buf;
33480 }
33481
33482 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33483 assemble_align (POINTER_SIZE);
33484
33485 if (DEFAULT_ABI == ABI_V4
33486 && (TARGET_RELOCATABLE || flag_pic > 1))
33487 {
33488 fputs ("\t.long (", asm_out_file);
33489 output_addr_const (asm_out_file, symbol);
33490 fputs (")@fixup\n", asm_out_file);
33491 }
33492 else
33493 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33494 }
33495
33496 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33497 static void
33498 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33499 {
33500 const char *section = ".dtors";
33501 char buf[18];
33502
33503 if (priority != DEFAULT_INIT_PRIORITY)
33504 {
33505 sprintf (buf, ".dtors.%.5u",
33506 /* Invert the numbering so the linker puts us in the proper
33507 order; constructors are run from right to left, and the
33508 linker sorts in increasing order. */
33509 MAX_INIT_PRIORITY - priority);
33510 section = buf;
33511 }
33512
33513 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33514 assemble_align (POINTER_SIZE);
33515
33516 if (DEFAULT_ABI == ABI_V4
33517 && (TARGET_RELOCATABLE || flag_pic > 1))
33518 {
33519 fputs ("\t.long (", asm_out_file);
33520 output_addr_const (asm_out_file, symbol);
33521 fputs (")@fixup\n", asm_out_file);
33522 }
33523 else
33524 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33525 }
33526
33527 void
33528 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33529 {
33530 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33531 {
33532 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33533 ASM_OUTPUT_LABEL (file, name);
33534 fputs (DOUBLE_INT_ASM_OP, file);
33535 rs6000_output_function_entry (file, name);
33536 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33537 if (DOT_SYMBOLS)
33538 {
33539 fputs ("\t.size\t", file);
33540 assemble_name (file, name);
33541 fputs (",24\n\t.type\t.", file);
33542 assemble_name (file, name);
33543 fputs (",@function\n", file);
33544 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33545 {
33546 fputs ("\t.globl\t.", file);
33547 assemble_name (file, name);
33548 putc ('\n', file);
33549 }
33550 }
33551 else
33552 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33553 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33554 rs6000_output_function_entry (file, name);
33555 fputs (":\n", file);
33556 return;
33557 }
33558
33559 int uses_toc;
33560 if (DEFAULT_ABI == ABI_V4
33561 && (TARGET_RELOCATABLE || flag_pic > 1)
33562 && !TARGET_SECURE_PLT
33563 && (!constant_pool_empty_p () || crtl->profile)
33564 && (uses_toc = uses_TOC ()))
33565 {
33566 char buf[256];
33567
33568 if (uses_toc == 2)
33569 switch_to_other_text_partition ();
33570 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33571
33572 fprintf (file, "\t.long ");
33573 assemble_name (file, toc_label_name);
33574 need_toc_init = 1;
33575 putc ('-', file);
33576 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33577 assemble_name (file, buf);
33578 putc ('\n', file);
33579 if (uses_toc == 2)
33580 switch_to_other_text_partition ();
33581 }
33582
33583 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33584 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33585
33586 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33587 {
33588 char buf[256];
33589
33590 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33591
33592 fprintf (file, "\t.quad .TOC.-");
33593 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33594 assemble_name (file, buf);
33595 putc ('\n', file);
33596 }
33597
33598 if (DEFAULT_ABI == ABI_AIX)
33599 {
33600 const char *desc_name, *orig_name;
33601
33602 orig_name = (*targetm.strip_name_encoding) (name);
33603 desc_name = orig_name;
33604 while (*desc_name == '.')
33605 desc_name++;
33606
33607 if (TREE_PUBLIC (decl))
33608 fprintf (file, "\t.globl %s\n", desc_name);
33609
33610 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33611 fprintf (file, "%s:\n", desc_name);
33612 fprintf (file, "\t.long %s\n", orig_name);
33613 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33614 fputs ("\t.long 0\n", file);
33615 fprintf (file, "\t.previous\n");
33616 }
33617 ASM_OUTPUT_LABEL (file, name);
33618 }
33619
33620 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33621 static void
33622 rs6000_elf_file_end (void)
33623 {
33624 #ifdef HAVE_AS_GNU_ATTRIBUTE
33625 /* ??? The value emitted depends on options active at file end.
33626 Assume anyone using #pragma or attributes that might change
33627 options knows what they are doing. */
33628 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33629 && rs6000_passes_float)
33630 {
33631 int fp;
33632
33633 if (TARGET_HARD_FLOAT)
33634 fp = 1;
33635 else
33636 fp = 2;
33637 if (rs6000_passes_long_double)
33638 {
33639 if (!TARGET_LONG_DOUBLE_128)
33640 fp |= 2 * 4;
33641 else if (TARGET_IEEEQUAD)
33642 fp |= 3 * 4;
33643 else
33644 fp |= 1 * 4;
33645 }
33646 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33647 }
33648 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33649 {
33650 if (rs6000_passes_vector)
33651 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33652 (TARGET_ALTIVEC_ABI ? 2 : 1));
33653 if (rs6000_returns_struct)
33654 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33655 aix_struct_return ? 2 : 1);
33656 }
33657 #endif
33658 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33659 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33660 file_end_indicate_exec_stack ();
33661 #endif
33662
33663 if (flag_split_stack)
33664 file_end_indicate_split_stack ();
33665
33666 if (cpu_builtin_p)
33667 {
33668 /* We have expanded a CPU builtin, so we need to emit a reference to
33669 the special symbol that LIBC uses to declare it supports the
33670 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33671 switch_to_section (data_section);
33672 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33673 fprintf (asm_out_file, "\t%s %s\n",
33674 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33675 }
33676 }
33677 #endif
33678
33679 #if TARGET_XCOFF
33680
33681 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33682 #define HAVE_XCOFF_DWARF_EXTRAS 0
33683 #endif
33684
33685 static enum unwind_info_type
33686 rs6000_xcoff_debug_unwind_info (void)
33687 {
33688 return UI_NONE;
33689 }
33690
33691 static void
33692 rs6000_xcoff_asm_output_anchor (rtx symbol)
33693 {
33694 char buffer[100];
33695
33696 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33697 SYMBOL_REF_BLOCK_OFFSET (symbol));
33698 fprintf (asm_out_file, "%s", SET_ASM_OP);
33699 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33700 fprintf (asm_out_file, ",");
33701 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33702 fprintf (asm_out_file, "\n");
33703 }
33704
33705 static void
33706 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33707 {
33708 fputs (GLOBAL_ASM_OP, stream);
33709 RS6000_OUTPUT_BASENAME (stream, name);
33710 putc ('\n', stream);
33711 }
33712
33713 /* A get_unnamed_decl callback, used for read-only sections. PTR
33714 points to the section string variable. */
33715
33716 static void
33717 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33718 {
33719 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33720 *(const char *const *) directive,
33721 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33722 }
33723
33724 /* Likewise for read-write sections. */
33725
33726 static void
33727 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33728 {
33729 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33730 *(const char *const *) directive,
33731 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33732 }
33733
33734 static void
33735 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33736 {
33737 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33738 *(const char *const *) directive,
33739 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33740 }
33741
33742 /* A get_unnamed_section callback, used for switching to toc_section. */
33743
33744 static void
33745 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33746 {
33747 if (TARGET_MINIMAL_TOC)
33748 {
33749 /* toc_section is always selected at least once from
33750 rs6000_xcoff_file_start, so this is guaranteed to
33751 always be defined once and only once in each file. */
33752 if (!toc_initialized)
33753 {
33754 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33755 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33756 toc_initialized = 1;
33757 }
33758 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33759 (TARGET_32BIT ? "" : ",3"));
33760 }
33761 else
33762 fputs ("\t.toc\n", asm_out_file);
33763 }
33764
33765 /* Implement TARGET_ASM_INIT_SECTIONS. */
33766
33767 static void
33768 rs6000_xcoff_asm_init_sections (void)
33769 {
33770 read_only_data_section
33771 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33772 &xcoff_read_only_section_name);
33773
33774 private_data_section
33775 = get_unnamed_section (SECTION_WRITE,
33776 rs6000_xcoff_output_readwrite_section_asm_op,
33777 &xcoff_private_data_section_name);
33778
33779 tls_data_section
33780 = get_unnamed_section (SECTION_TLS,
33781 rs6000_xcoff_output_tls_section_asm_op,
33782 &xcoff_tls_data_section_name);
33783
33784 tls_private_data_section
33785 = get_unnamed_section (SECTION_TLS,
33786 rs6000_xcoff_output_tls_section_asm_op,
33787 &xcoff_private_data_section_name);
33788
33789 read_only_private_data_section
33790 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33791 &xcoff_private_data_section_name);
33792
33793 toc_section
33794 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33795
33796 readonly_data_section = read_only_data_section;
33797 }
33798
33799 static int
33800 rs6000_xcoff_reloc_rw_mask (void)
33801 {
33802 return 3;
33803 }
33804
33805 static void
33806 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33807 tree decl ATTRIBUTE_UNUSED)
33808 {
33809 int smclass;
33810 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33811
33812 if (flags & SECTION_EXCLUDE)
33813 smclass = 4;
33814 else if (flags & SECTION_DEBUG)
33815 {
33816 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33817 return;
33818 }
33819 else if (flags & SECTION_CODE)
33820 smclass = 0;
33821 else if (flags & SECTION_TLS)
33822 smclass = 3;
33823 else if (flags & SECTION_WRITE)
33824 smclass = 2;
33825 else
33826 smclass = 1;
33827
33828 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33829 (flags & SECTION_CODE) ? "." : "",
33830 name, suffix[smclass], flags & SECTION_ENTSIZE);
33831 }
33832
33833 #define IN_NAMED_SECTION(DECL) \
33834 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33835 && DECL_SECTION_NAME (DECL) != NULL)
33836
33837 static section *
33838 rs6000_xcoff_select_section (tree decl, int reloc,
33839 unsigned HOST_WIDE_INT align)
33840 {
33841 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33842 named section. */
33843 if (align > BIGGEST_ALIGNMENT)
33844 {
33845 resolve_unique_section (decl, reloc, true);
33846 if (IN_NAMED_SECTION (decl))
33847 return get_named_section (decl, NULL, reloc);
33848 }
33849
33850 if (decl_readonly_section (decl, reloc))
33851 {
33852 if (TREE_PUBLIC (decl))
33853 return read_only_data_section;
33854 else
33855 return read_only_private_data_section;
33856 }
33857 else
33858 {
33859 #if HAVE_AS_TLS
33860 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33861 {
33862 if (TREE_PUBLIC (decl))
33863 return tls_data_section;
33864 else if (bss_initializer_p (decl))
33865 {
33866 /* Convert to COMMON to emit in BSS. */
33867 DECL_COMMON (decl) = 1;
33868 return tls_comm_section;
33869 }
33870 else
33871 return tls_private_data_section;
33872 }
33873 else
33874 #endif
33875 if (TREE_PUBLIC (decl))
33876 return data_section;
33877 else
33878 return private_data_section;
33879 }
33880 }
33881
33882 static void
33883 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33884 {
33885 const char *name;
33886
33887 /* Use select_section for private data and uninitialized data with
33888 alignment <= BIGGEST_ALIGNMENT. */
33889 if (!TREE_PUBLIC (decl)
33890 || DECL_COMMON (decl)
33891 || (DECL_INITIAL (decl) == NULL_TREE
33892 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33893 || DECL_INITIAL (decl) == error_mark_node
33894 || (flag_zero_initialized_in_bss
33895 && initializer_zerop (DECL_INITIAL (decl))))
33896 return;
33897
33898 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33899 name = (*targetm.strip_name_encoding) (name);
33900 set_decl_section_name (decl, name);
33901 }
33902
33903 /* Select section for constant in constant pool.
33904
33905 On RS/6000, all constants are in the private read-only data area.
33906 However, if this is being placed in the TOC it must be output as a
33907 toc entry. */
33908
33909 static section *
33910 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33911 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33912 {
33913 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33914 return toc_section;
33915 else
33916 return read_only_private_data_section;
33917 }
33918
33919 /* Remove any trailing [DS] or the like from the symbol name. */
33920
33921 static const char *
33922 rs6000_xcoff_strip_name_encoding (const char *name)
33923 {
33924 size_t len;
33925 if (*name == '*')
33926 name++;
33927 len = strlen (name);
33928 if (name[len - 1] == ']')
33929 return ggc_alloc_string (name, len - 4);
33930 else
33931 return name;
33932 }
33933
33934 /* Section attributes. AIX is always PIC. */
33935
33936 static unsigned int
33937 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33938 {
33939 unsigned int align;
33940 unsigned int flags = default_section_type_flags (decl, name, reloc);
33941
33942 /* Align to at least UNIT size. */
33943 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33944 align = MIN_UNITS_PER_WORD;
33945 else
33946 /* Increase alignment of large objects if not already stricter. */
33947 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33948 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33949 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33950
33951 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33952 }
33953
33954 /* Output at beginning of assembler file.
33955
33956 Initialize the section names for the RS/6000 at this point.
33957
33958 Specify filename, including full path, to assembler.
33959
33960 We want to go into the TOC section so at least one .toc will be emitted.
33961 Also, in order to output proper .bs/.es pairs, we need at least one static
33962 [RW] section emitted.
33963
33964 Finally, declare mcount when profiling to make the assembler happy. */
33965
33966 static void
33967 rs6000_xcoff_file_start (void)
33968 {
33969 rs6000_gen_section_name (&xcoff_bss_section_name,
33970 main_input_filename, ".bss_");
33971 rs6000_gen_section_name (&xcoff_private_data_section_name,
33972 main_input_filename, ".rw_");
33973 rs6000_gen_section_name (&xcoff_read_only_section_name,
33974 main_input_filename, ".ro_");
33975 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33976 main_input_filename, ".tls_");
33977 rs6000_gen_section_name (&xcoff_tbss_section_name,
33978 main_input_filename, ".tbss_[UL]");
33979
33980 fputs ("\t.file\t", asm_out_file);
33981 output_quoted_string (asm_out_file, main_input_filename);
33982 fputc ('\n', asm_out_file);
33983 if (write_symbols != NO_DEBUG)
33984 switch_to_section (private_data_section);
33985 switch_to_section (toc_section);
33986 switch_to_section (text_section);
33987 if (profile_flag)
33988 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33989 rs6000_file_start ();
33990 }
33991
33992 /* Output at end of assembler file.
33993 On the RS/6000, referencing data should automatically pull in text. */
33994
33995 static void
33996 rs6000_xcoff_file_end (void)
33997 {
33998 switch_to_section (text_section);
33999 fputs ("_section_.text:\n", asm_out_file);
34000 switch_to_section (data_section);
34001 fputs (TARGET_32BIT
34002 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34003 asm_out_file);
34004 }
34005
34006 struct declare_alias_data
34007 {
34008 FILE *file;
34009 bool function_descriptor;
34010 };
34011
34012 /* Declare alias N. A helper function for for_node_and_aliases. */
34013
34014 static bool
34015 rs6000_declare_alias (struct symtab_node *n, void *d)
34016 {
34017 struct declare_alias_data *data = (struct declare_alias_data *)d;
34018 /* Main symbol is output specially, because varasm machinery does part of
34019 the job for us - we do not need to declare .globl/lglobs and such. */
34020 if (!n->alias || n->weakref)
34021 return false;
34022
34023 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34024 return false;
34025
34026 /* Prevent assemble_alias from trying to use .set pseudo operation
34027 that does not behave as expected by the middle-end. */
34028 TREE_ASM_WRITTEN (n->decl) = true;
34029
34030 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34031 char *buffer = (char *) alloca (strlen (name) + 2);
34032 char *p;
34033 int dollar_inside = 0;
34034
34035 strcpy (buffer, name);
34036 p = strchr (buffer, '$');
34037 while (p) {
34038 *p = '_';
34039 dollar_inside++;
34040 p = strchr (p + 1, '$');
34041 }
34042 if (TREE_PUBLIC (n->decl))
34043 {
34044 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34045 {
34046 if (dollar_inside) {
34047 if (data->function_descriptor)
34048 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34049 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34050 }
34051 if (data->function_descriptor)
34052 {
34053 fputs ("\t.globl .", data->file);
34054 RS6000_OUTPUT_BASENAME (data->file, buffer);
34055 putc ('\n', data->file);
34056 }
34057 fputs ("\t.globl ", data->file);
34058 RS6000_OUTPUT_BASENAME (data->file, buffer);
34059 putc ('\n', data->file);
34060 }
34061 #ifdef ASM_WEAKEN_DECL
34062 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34063 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34064 #endif
34065 }
34066 else
34067 {
34068 if (dollar_inside)
34069 {
34070 if (data->function_descriptor)
34071 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34072 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34073 }
34074 if (data->function_descriptor)
34075 {
34076 fputs ("\t.lglobl .", data->file);
34077 RS6000_OUTPUT_BASENAME (data->file, buffer);
34078 putc ('\n', data->file);
34079 }
34080 fputs ("\t.lglobl ", data->file);
34081 RS6000_OUTPUT_BASENAME (data->file, buffer);
34082 putc ('\n', data->file);
34083 }
34084 if (data->function_descriptor)
34085 fputs (".", data->file);
34086 RS6000_OUTPUT_BASENAME (data->file, buffer);
34087 fputs (":\n", data->file);
34088 return false;
34089 }
34090
34091
34092 #ifdef HAVE_GAS_HIDDEN
34093 /* Helper function to calculate visibility of a DECL
34094 and return the value as a const string. */
34095
34096 static const char *
34097 rs6000_xcoff_visibility (tree decl)
34098 {
34099 static const char * const visibility_types[] = {
34100 "", ",protected", ",hidden", ",internal"
34101 };
34102
34103 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34104 return visibility_types[vis];
34105 }
34106 #endif
34107
34108
34109 /* This macro produces the initial definition of a function name.
34110 On the RS/6000, we need to place an extra '.' in the function name and
34111 output the function descriptor.
34112 Dollar signs are converted to underscores.
34113
34114 The csect for the function will have already been created when
34115 text_section was selected. We do have to go back to that csect, however.
34116
34117 The third and fourth parameters to the .function pseudo-op (16 and 044)
34118 are placeholders which no longer have any use.
34119
34120 Because AIX assembler's .set command has unexpected semantics, we output
34121 all aliases as alternative labels in front of the definition. */
34122
34123 void
34124 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34125 {
34126 char *buffer = (char *) alloca (strlen (name) + 1);
34127 char *p;
34128 int dollar_inside = 0;
34129 struct declare_alias_data data = {file, false};
34130
34131 strcpy (buffer, name);
34132 p = strchr (buffer, '$');
34133 while (p) {
34134 *p = '_';
34135 dollar_inside++;
34136 p = strchr (p + 1, '$');
34137 }
34138 if (TREE_PUBLIC (decl))
34139 {
34140 if (!RS6000_WEAK || !DECL_WEAK (decl))
34141 {
34142 if (dollar_inside) {
34143 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34144 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34145 }
34146 fputs ("\t.globl .", file);
34147 RS6000_OUTPUT_BASENAME (file, buffer);
34148 #ifdef HAVE_GAS_HIDDEN
34149 fputs (rs6000_xcoff_visibility (decl), file);
34150 #endif
34151 putc ('\n', file);
34152 }
34153 }
34154 else
34155 {
34156 if (dollar_inside) {
34157 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34158 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34159 }
34160 fputs ("\t.lglobl .", file);
34161 RS6000_OUTPUT_BASENAME (file, buffer);
34162 putc ('\n', file);
34163 }
34164 fputs ("\t.csect ", file);
34165 RS6000_OUTPUT_BASENAME (file, buffer);
34166 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34167 RS6000_OUTPUT_BASENAME (file, buffer);
34168 fputs (":\n", file);
34169 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34170 &data, true);
34171 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34172 RS6000_OUTPUT_BASENAME (file, buffer);
34173 fputs (", TOC[tc0], 0\n", file);
34174 in_section = NULL;
34175 switch_to_section (function_section (decl));
34176 putc ('.', file);
34177 RS6000_OUTPUT_BASENAME (file, buffer);
34178 fputs (":\n", file);
34179 data.function_descriptor = true;
34180 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34181 &data, true);
34182 if (!DECL_IGNORED_P (decl))
34183 {
34184 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34185 xcoffout_declare_function (file, decl, buffer);
34186 else if (write_symbols == DWARF2_DEBUG)
34187 {
34188 name = (*targetm.strip_name_encoding) (name);
34189 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34190 }
34191 }
34192 return;
34193 }
34194
34195
34196 /* Output assembly language to globalize a symbol from a DECL,
34197 possibly with visibility. */
34198
34199 void
34200 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34201 {
34202 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34203 fputs (GLOBAL_ASM_OP, stream);
34204 RS6000_OUTPUT_BASENAME (stream, name);
34205 #ifdef HAVE_GAS_HIDDEN
34206 fputs (rs6000_xcoff_visibility (decl), stream);
34207 #endif
34208 putc ('\n', stream);
34209 }
34210
34211 /* Output assembly language to define a symbol as COMMON from a DECL,
34212 possibly with visibility. */
34213
34214 void
34215 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34216 tree decl ATTRIBUTE_UNUSED,
34217 const char *name,
34218 unsigned HOST_WIDE_INT size,
34219 unsigned HOST_WIDE_INT align)
34220 {
34221 unsigned HOST_WIDE_INT align2 = 2;
34222
34223 if (align > 32)
34224 align2 = floor_log2 (align / BITS_PER_UNIT);
34225 else if (size > 4)
34226 align2 = 3;
34227
34228 fputs (COMMON_ASM_OP, stream);
34229 RS6000_OUTPUT_BASENAME (stream, name);
34230
34231 fprintf (stream,
34232 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34233 size, align2);
34234
34235 #ifdef HAVE_GAS_HIDDEN
34236 if (decl != NULL)
34237 fputs (rs6000_xcoff_visibility (decl), stream);
34238 #endif
34239 putc ('\n', stream);
34240 }
34241
34242 /* This macro produces the initial definition of a object (variable) name.
34243 Because AIX assembler's .set command has unexpected semantics, we output
34244 all aliases as alternative labels in front of the definition. */
34245
34246 void
34247 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34248 {
34249 struct declare_alias_data data = {file, false};
34250 RS6000_OUTPUT_BASENAME (file, name);
34251 fputs (":\n", file);
34252 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34253 &data, true);
34254 }
34255
34256 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34257
34258 void
34259 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34260 {
34261 fputs (integer_asm_op (size, FALSE), file);
34262 assemble_name (file, label);
34263 fputs ("-$", file);
34264 }
34265
34266 /* Output a symbol offset relative to the dbase for the current object.
34267 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34268 signed offsets.
34269
34270 __gcc_unwind_dbase is embedded in all executables/libraries through
34271 libgcc/config/rs6000/crtdbase.S. */
34272
34273 void
34274 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34275 {
34276 fputs (integer_asm_op (size, FALSE), file);
34277 assemble_name (file, label);
34278 fputs("-__gcc_unwind_dbase", file);
34279 }
34280
34281 #ifdef HAVE_AS_TLS
34282 static void
34283 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34284 {
34285 rtx symbol;
34286 int flags;
34287 const char *symname;
34288
34289 default_encode_section_info (decl, rtl, first);
34290
34291 /* Careful not to prod global register variables. */
34292 if (!MEM_P (rtl))
34293 return;
34294 symbol = XEXP (rtl, 0);
34295 if (GET_CODE (symbol) != SYMBOL_REF)
34296 return;
34297
34298 flags = SYMBOL_REF_FLAGS (symbol);
34299
34300 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34301 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34302
34303 SYMBOL_REF_FLAGS (symbol) = flags;
34304
34305 /* Append mapping class to extern decls. */
34306 symname = XSTR (symbol, 0);
34307 if (decl /* sync condition with assemble_external () */
34308 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34309 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34310 || TREE_CODE (decl) == FUNCTION_DECL)
34311 && symname[strlen (symname) - 1] != ']')
34312 {
34313 char *newname = (char *) alloca (strlen (symname) + 5);
34314 strcpy (newname, symname);
34315 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34316 ? "[DS]" : "[UA]"));
34317 XSTR (symbol, 0) = ggc_strdup (newname);
34318 }
34319 }
34320 #endif /* HAVE_AS_TLS */
34321 #endif /* TARGET_XCOFF */
34322
34323 void
34324 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34325 const char *name, const char *val)
34326 {
34327 fputs ("\t.weak\t", stream);
34328 RS6000_OUTPUT_BASENAME (stream, name);
34329 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34330 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34331 {
34332 if (TARGET_XCOFF)
34333 fputs ("[DS]", stream);
34334 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34335 if (TARGET_XCOFF)
34336 fputs (rs6000_xcoff_visibility (decl), stream);
34337 #endif
34338 fputs ("\n\t.weak\t.", stream);
34339 RS6000_OUTPUT_BASENAME (stream, name);
34340 }
34341 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34342 if (TARGET_XCOFF)
34343 fputs (rs6000_xcoff_visibility (decl), stream);
34344 #endif
34345 fputc ('\n', stream);
34346 if (val)
34347 {
34348 #ifdef ASM_OUTPUT_DEF
34349 ASM_OUTPUT_DEF (stream, name, val);
34350 #endif
34351 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34352 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34353 {
34354 fputs ("\t.set\t.", stream);
34355 RS6000_OUTPUT_BASENAME (stream, name);
34356 fputs (",.", stream);
34357 RS6000_OUTPUT_BASENAME (stream, val);
34358 fputc ('\n', stream);
34359 }
34360 }
34361 }
34362
34363
34364 /* Return true if INSN should not be copied. */
34365
34366 static bool
34367 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34368 {
34369 return recog_memoized (insn) >= 0
34370 && get_attr_cannot_copy (insn);
34371 }
34372
34373 /* Compute a (partial) cost for rtx X. Return true if the complete
34374 cost has been computed, and false if subexpressions should be
34375 scanned. In either case, *TOTAL contains the cost result. */
34376
34377 static bool
34378 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34379 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34380 {
34381 int code = GET_CODE (x);
34382
34383 switch (code)
34384 {
34385 /* On the RS/6000, if it is valid in the insn, it is free. */
34386 case CONST_INT:
34387 if (((outer_code == SET
34388 || outer_code == PLUS
34389 || outer_code == MINUS)
34390 && (satisfies_constraint_I (x)
34391 || satisfies_constraint_L (x)))
34392 || (outer_code == AND
34393 && (satisfies_constraint_K (x)
34394 || (mode == SImode
34395 ? satisfies_constraint_L (x)
34396 : satisfies_constraint_J (x))))
34397 || ((outer_code == IOR || outer_code == XOR)
34398 && (satisfies_constraint_K (x)
34399 || (mode == SImode
34400 ? satisfies_constraint_L (x)
34401 : satisfies_constraint_J (x))))
34402 || outer_code == ASHIFT
34403 || outer_code == ASHIFTRT
34404 || outer_code == LSHIFTRT
34405 || outer_code == ROTATE
34406 || outer_code == ROTATERT
34407 || outer_code == ZERO_EXTRACT
34408 || (outer_code == MULT
34409 && satisfies_constraint_I (x))
34410 || ((outer_code == DIV || outer_code == UDIV
34411 || outer_code == MOD || outer_code == UMOD)
34412 && exact_log2 (INTVAL (x)) >= 0)
34413 || (outer_code == COMPARE
34414 && (satisfies_constraint_I (x)
34415 || satisfies_constraint_K (x)))
34416 || ((outer_code == EQ || outer_code == NE)
34417 && (satisfies_constraint_I (x)
34418 || satisfies_constraint_K (x)
34419 || (mode == SImode
34420 ? satisfies_constraint_L (x)
34421 : satisfies_constraint_J (x))))
34422 || (outer_code == GTU
34423 && satisfies_constraint_I (x))
34424 || (outer_code == LTU
34425 && satisfies_constraint_P (x)))
34426 {
34427 *total = 0;
34428 return true;
34429 }
34430 else if ((outer_code == PLUS
34431 && reg_or_add_cint_operand (x, VOIDmode))
34432 || (outer_code == MINUS
34433 && reg_or_sub_cint_operand (x, VOIDmode))
34434 || ((outer_code == SET
34435 || outer_code == IOR
34436 || outer_code == XOR)
34437 && (INTVAL (x)
34438 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34439 {
34440 *total = COSTS_N_INSNS (1);
34441 return true;
34442 }
34443 /* FALLTHRU */
34444
34445 case CONST_DOUBLE:
34446 case CONST_WIDE_INT:
34447 case CONST:
34448 case HIGH:
34449 case SYMBOL_REF:
34450 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34451 return true;
34452
34453 case MEM:
34454 /* When optimizing for size, MEM should be slightly more expensive
34455 than generating address, e.g., (plus (reg) (const)).
34456 L1 cache latency is about two instructions. */
34457 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34458 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34459 *total += COSTS_N_INSNS (100);
34460 return true;
34461
34462 case LABEL_REF:
34463 *total = 0;
34464 return true;
34465
34466 case PLUS:
34467 case MINUS:
34468 if (FLOAT_MODE_P (mode))
34469 *total = rs6000_cost->fp;
34470 else
34471 *total = COSTS_N_INSNS (1);
34472 return false;
34473
34474 case MULT:
34475 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34476 && satisfies_constraint_I (XEXP (x, 1)))
34477 {
34478 if (INTVAL (XEXP (x, 1)) >= -256
34479 && INTVAL (XEXP (x, 1)) <= 255)
34480 *total = rs6000_cost->mulsi_const9;
34481 else
34482 *total = rs6000_cost->mulsi_const;
34483 }
34484 else if (mode == SFmode)
34485 *total = rs6000_cost->fp;
34486 else if (FLOAT_MODE_P (mode))
34487 *total = rs6000_cost->dmul;
34488 else if (mode == DImode)
34489 *total = rs6000_cost->muldi;
34490 else
34491 *total = rs6000_cost->mulsi;
34492 return false;
34493
34494 case FMA:
34495 if (mode == SFmode)
34496 *total = rs6000_cost->fp;
34497 else
34498 *total = rs6000_cost->dmul;
34499 break;
34500
34501 case DIV:
34502 case MOD:
34503 if (FLOAT_MODE_P (mode))
34504 {
34505 *total = mode == DFmode ? rs6000_cost->ddiv
34506 : rs6000_cost->sdiv;
34507 return false;
34508 }
34509 /* FALLTHRU */
34510
34511 case UDIV:
34512 case UMOD:
34513 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34514 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34515 {
34516 if (code == DIV || code == MOD)
34517 /* Shift, addze */
34518 *total = COSTS_N_INSNS (2);
34519 else
34520 /* Shift */
34521 *total = COSTS_N_INSNS (1);
34522 }
34523 else
34524 {
34525 if (GET_MODE (XEXP (x, 1)) == DImode)
34526 *total = rs6000_cost->divdi;
34527 else
34528 *total = rs6000_cost->divsi;
34529 }
34530 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34531 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34532 *total += COSTS_N_INSNS (2);
34533 return false;
34534
34535 case CTZ:
34536 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34537 return false;
34538
34539 case FFS:
34540 *total = COSTS_N_INSNS (4);
34541 return false;
34542
34543 case POPCOUNT:
34544 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34545 return false;
34546
34547 case PARITY:
34548 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34549 return false;
34550
34551 case NOT:
34552 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34553 *total = 0;
34554 else
34555 *total = COSTS_N_INSNS (1);
34556 return false;
34557
34558 case AND:
34559 if (CONST_INT_P (XEXP (x, 1)))
34560 {
34561 rtx left = XEXP (x, 0);
34562 rtx_code left_code = GET_CODE (left);
34563
34564 /* rotate-and-mask: 1 insn. */
34565 if ((left_code == ROTATE
34566 || left_code == ASHIFT
34567 || left_code == LSHIFTRT)
34568 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34569 {
34570 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34571 if (!CONST_INT_P (XEXP (left, 1)))
34572 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34573 *total += COSTS_N_INSNS (1);
34574 return true;
34575 }
34576
34577 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34578 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34579 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34580 || (val & 0xffff) == val
34581 || (val & 0xffff0000) == val
34582 || ((val & 0xffff) == 0 && mode == SImode))
34583 {
34584 *total = rtx_cost (left, mode, AND, 0, speed);
34585 *total += COSTS_N_INSNS (1);
34586 return true;
34587 }
34588
34589 /* 2 insns. */
34590 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34591 {
34592 *total = rtx_cost (left, mode, AND, 0, speed);
34593 *total += COSTS_N_INSNS (2);
34594 return true;
34595 }
34596 }
34597
34598 *total = COSTS_N_INSNS (1);
34599 return false;
34600
34601 case IOR:
34602 /* FIXME */
34603 *total = COSTS_N_INSNS (1);
34604 return true;
34605
34606 case CLZ:
34607 case XOR:
34608 case ZERO_EXTRACT:
34609 *total = COSTS_N_INSNS (1);
34610 return false;
34611
34612 case ASHIFT:
34613 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34614 the sign extend and shift separately within the insn. */
34615 if (TARGET_EXTSWSLI && mode == DImode
34616 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34617 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34618 {
34619 *total = 0;
34620 return false;
34621 }
34622 /* fall through */
34623
34624 case ASHIFTRT:
34625 case LSHIFTRT:
34626 case ROTATE:
34627 case ROTATERT:
34628 /* Handle mul_highpart. */
34629 if (outer_code == TRUNCATE
34630 && GET_CODE (XEXP (x, 0)) == MULT)
34631 {
34632 if (mode == DImode)
34633 *total = rs6000_cost->muldi;
34634 else
34635 *total = rs6000_cost->mulsi;
34636 return true;
34637 }
34638 else if (outer_code == AND)
34639 *total = 0;
34640 else
34641 *total = COSTS_N_INSNS (1);
34642 return false;
34643
34644 case SIGN_EXTEND:
34645 case ZERO_EXTEND:
34646 if (GET_CODE (XEXP (x, 0)) == MEM)
34647 *total = 0;
34648 else
34649 *total = COSTS_N_INSNS (1);
34650 return false;
34651
34652 case COMPARE:
34653 case NEG:
34654 case ABS:
34655 if (!FLOAT_MODE_P (mode))
34656 {
34657 *total = COSTS_N_INSNS (1);
34658 return false;
34659 }
34660 /* FALLTHRU */
34661
34662 case FLOAT:
34663 case UNSIGNED_FLOAT:
34664 case FIX:
34665 case UNSIGNED_FIX:
34666 case FLOAT_TRUNCATE:
34667 *total = rs6000_cost->fp;
34668 return false;
34669
34670 case FLOAT_EXTEND:
34671 if (mode == DFmode)
34672 *total = rs6000_cost->sfdf_convert;
34673 else
34674 *total = rs6000_cost->fp;
34675 return false;
34676
34677 case UNSPEC:
34678 switch (XINT (x, 1))
34679 {
34680 case UNSPEC_FRSP:
34681 *total = rs6000_cost->fp;
34682 return true;
34683
34684 default:
34685 break;
34686 }
34687 break;
34688
34689 case CALL:
34690 case IF_THEN_ELSE:
34691 if (!speed)
34692 {
34693 *total = COSTS_N_INSNS (1);
34694 return true;
34695 }
34696 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34697 {
34698 *total = rs6000_cost->fp;
34699 return false;
34700 }
34701 break;
34702
34703 case NE:
34704 case EQ:
34705 case GTU:
34706 case LTU:
34707 /* Carry bit requires mode == Pmode.
34708 NEG or PLUS already counted so only add one. */
34709 if (mode == Pmode
34710 && (outer_code == NEG || outer_code == PLUS))
34711 {
34712 *total = COSTS_N_INSNS (1);
34713 return true;
34714 }
34715 /* FALLTHRU */
34716
34717 case GT:
34718 case LT:
34719 case UNORDERED:
34720 if (outer_code == SET)
34721 {
34722 if (XEXP (x, 1) == const0_rtx)
34723 {
34724 *total = COSTS_N_INSNS (2);
34725 return true;
34726 }
34727 else
34728 {
34729 *total = COSTS_N_INSNS (3);
34730 return false;
34731 }
34732 }
34733 /* CC COMPARE. */
34734 if (outer_code == COMPARE)
34735 {
34736 *total = 0;
34737 return true;
34738 }
34739 break;
34740
34741 default:
34742 break;
34743 }
34744
34745 return false;
34746 }
34747
34748 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34749
34750 static bool
34751 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34752 int opno, int *total, bool speed)
34753 {
34754 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34755
34756 fprintf (stderr,
34757 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34758 "opno = %d, total = %d, speed = %s, x:\n",
34759 ret ? "complete" : "scan inner",
34760 GET_MODE_NAME (mode),
34761 GET_RTX_NAME (outer_code),
34762 opno,
34763 *total,
34764 speed ? "true" : "false");
34765
34766 debug_rtx (x);
34767
34768 return ret;
34769 }
34770
34771 static int
34772 rs6000_insn_cost (rtx_insn *insn, bool speed)
34773 {
34774 if (recog_memoized (insn) < 0)
34775 return 0;
34776
34777 if (!speed)
34778 return get_attr_length (insn);
34779
34780 int cost = get_attr_cost (insn);
34781 if (cost > 0)
34782 return cost;
34783
34784 int n = get_attr_length (insn) / 4;
34785 enum attr_type type = get_attr_type (insn);
34786
34787 switch (type)
34788 {
34789 case TYPE_LOAD:
34790 case TYPE_FPLOAD:
34791 case TYPE_VECLOAD:
34792 cost = COSTS_N_INSNS (n + 1);
34793 break;
34794
34795 case TYPE_MUL:
34796 switch (get_attr_size (insn))
34797 {
34798 case SIZE_8:
34799 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34800 break;
34801 case SIZE_16:
34802 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34803 break;
34804 case SIZE_32:
34805 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34806 break;
34807 case SIZE_64:
34808 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34809 break;
34810 default:
34811 gcc_unreachable ();
34812 }
34813 break;
34814 case TYPE_DIV:
34815 switch (get_attr_size (insn))
34816 {
34817 case SIZE_32:
34818 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34819 break;
34820 case SIZE_64:
34821 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34822 break;
34823 default:
34824 gcc_unreachable ();
34825 }
34826 break;
34827
34828 case TYPE_FP:
34829 cost = n * rs6000_cost->fp;
34830 break;
34831 case TYPE_DMUL:
34832 cost = n * rs6000_cost->dmul;
34833 break;
34834 case TYPE_SDIV:
34835 cost = n * rs6000_cost->sdiv;
34836 break;
34837 case TYPE_DDIV:
34838 cost = n * rs6000_cost->ddiv;
34839 break;
34840
34841 case TYPE_SYNC:
34842 case TYPE_LOAD_L:
34843 case TYPE_MFCR:
34844 case TYPE_MFCRF:
34845 cost = COSTS_N_INSNS (n + 2);
34846 break;
34847
34848 default:
34849 cost = COSTS_N_INSNS (n);
34850 }
34851
34852 return cost;
34853 }
34854
34855 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34856
34857 static int
34858 rs6000_debug_address_cost (rtx x, machine_mode mode,
34859 addr_space_t as, bool speed)
34860 {
34861 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34862
34863 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34864 ret, speed ? "true" : "false");
34865 debug_rtx (x);
34866
34867 return ret;
34868 }
34869
34870
34871 /* A C expression returning the cost of moving data from a register of class
34872 CLASS1 to one of CLASS2. */
34873
34874 static int
34875 rs6000_register_move_cost (machine_mode mode,
34876 reg_class_t from, reg_class_t to)
34877 {
34878 int ret;
34879
34880 if (TARGET_DEBUG_COST)
34881 dbg_cost_ctrl++;
34882
34883 /* Moves from/to GENERAL_REGS. */
34884 if (reg_classes_intersect_p (to, GENERAL_REGS)
34885 || reg_classes_intersect_p (from, GENERAL_REGS))
34886 {
34887 reg_class_t rclass = from;
34888
34889 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34890 rclass = to;
34891
34892 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34893 ret = (rs6000_memory_move_cost (mode, rclass, false)
34894 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34895
34896 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34897 shift. */
34898 else if (rclass == CR_REGS)
34899 ret = 4;
34900
34901 /* For those processors that have slow LR/CTR moves, make them more
34902 expensive than memory in order to bias spills to memory .*/
34903 else if ((rs6000_tune == PROCESSOR_POWER6
34904 || rs6000_tune == PROCESSOR_POWER7
34905 || rs6000_tune == PROCESSOR_POWER8
34906 || rs6000_tune == PROCESSOR_POWER9)
34907 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34908 ret = 6 * hard_regno_nregs (0, mode);
34909
34910 else
34911 /* A move will cost one instruction per GPR moved. */
34912 ret = 2 * hard_regno_nregs (0, mode);
34913 }
34914
34915 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34916 else if (VECTOR_MEM_VSX_P (mode)
34917 && reg_classes_intersect_p (to, VSX_REGS)
34918 && reg_classes_intersect_p (from, VSX_REGS))
34919 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34920
34921 /* Moving between two similar registers is just one instruction. */
34922 else if (reg_classes_intersect_p (to, from))
34923 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34924
34925 /* Everything else has to go through GENERAL_REGS. */
34926 else
34927 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34928 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34929
34930 if (TARGET_DEBUG_COST)
34931 {
34932 if (dbg_cost_ctrl == 1)
34933 fprintf (stderr,
34934 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34935 ret, GET_MODE_NAME (mode), reg_class_names[from],
34936 reg_class_names[to]);
34937 dbg_cost_ctrl--;
34938 }
34939
34940 return ret;
34941 }
34942
34943 /* A C expressions returning the cost of moving data of MODE from a register to
34944 or from memory. */
34945
34946 static int
34947 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34948 bool in ATTRIBUTE_UNUSED)
34949 {
34950 int ret;
34951
34952 if (TARGET_DEBUG_COST)
34953 dbg_cost_ctrl++;
34954
34955 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34956 ret = 4 * hard_regno_nregs (0, mode);
34957 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34958 || reg_classes_intersect_p (rclass, VSX_REGS)))
34959 ret = 4 * hard_regno_nregs (32, mode);
34960 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34961 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34962 else
34963 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34964
34965 if (TARGET_DEBUG_COST)
34966 {
34967 if (dbg_cost_ctrl == 1)
34968 fprintf (stderr,
34969 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34970 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34971 dbg_cost_ctrl--;
34972 }
34973
34974 return ret;
34975 }
34976
34977 /* Returns a code for a target-specific builtin that implements
34978 reciprocal of the function, or NULL_TREE if not available. */
34979
34980 static tree
34981 rs6000_builtin_reciprocal (tree fndecl)
34982 {
34983 switch (DECL_FUNCTION_CODE (fndecl))
34984 {
34985 case VSX_BUILTIN_XVSQRTDP:
34986 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34987 return NULL_TREE;
34988
34989 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34990
34991 case VSX_BUILTIN_XVSQRTSP:
34992 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34993 return NULL_TREE;
34994
34995 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34996
34997 default:
34998 return NULL_TREE;
34999 }
35000 }
35001
35002 /* Load up a constant. If the mode is a vector mode, splat the value across
35003 all of the vector elements. */
35004
35005 static rtx
35006 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
35007 {
35008 rtx reg;
35009
35010 if (mode == SFmode || mode == DFmode)
35011 {
35012 rtx d = const_double_from_real_value (dconst, mode);
35013 reg = force_reg (mode, d);
35014 }
35015 else if (mode == V4SFmode)
35016 {
35017 rtx d = const_double_from_real_value (dconst, SFmode);
35018 rtvec v = gen_rtvec (4, d, d, d, d);
35019 reg = gen_reg_rtx (mode);
35020 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35021 }
35022 else if (mode == V2DFmode)
35023 {
35024 rtx d = const_double_from_real_value (dconst, DFmode);
35025 rtvec v = gen_rtvec (2, d, d);
35026 reg = gen_reg_rtx (mode);
35027 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35028 }
35029 else
35030 gcc_unreachable ();
35031
35032 return reg;
35033 }
35034
35035 /* Generate an FMA instruction. */
35036
35037 static void
35038 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35039 {
35040 machine_mode mode = GET_MODE (target);
35041 rtx dst;
35042
35043 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35044 gcc_assert (dst != NULL);
35045
35046 if (dst != target)
35047 emit_move_insn (target, dst);
35048 }
35049
35050 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35051
35052 static void
35053 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35054 {
35055 machine_mode mode = GET_MODE (dst);
35056 rtx r;
35057
35058 /* This is a tad more complicated, since the fnma_optab is for
35059 a different expression: fma(-m1, m2, a), which is the same
35060 thing except in the case of signed zeros.
35061
35062 Fortunately we know that if FMA is supported that FNMSUB is
35063 also supported in the ISA. Just expand it directly. */
35064
35065 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35066
35067 r = gen_rtx_NEG (mode, a);
35068 r = gen_rtx_FMA (mode, m1, m2, r);
35069 r = gen_rtx_NEG (mode, r);
35070 emit_insn (gen_rtx_SET (dst, r));
35071 }
35072
35073 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35074 add a reg_note saying that this was a division. Support both scalar and
35075 vector divide. Assumes no trapping math and finite arguments. */
35076
35077 void
35078 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35079 {
35080 machine_mode mode = GET_MODE (dst);
35081 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35082 int i;
35083
35084 /* Low precision estimates guarantee 5 bits of accuracy. High
35085 precision estimates guarantee 14 bits of accuracy. SFmode
35086 requires 23 bits of accuracy. DFmode requires 52 bits of
35087 accuracy. Each pass at least doubles the accuracy, leading
35088 to the following. */
35089 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35090 if (mode == DFmode || mode == V2DFmode)
35091 passes++;
35092
35093 enum insn_code code = optab_handler (smul_optab, mode);
35094 insn_gen_fn gen_mul = GEN_FCN (code);
35095
35096 gcc_assert (code != CODE_FOR_nothing);
35097
35098 one = rs6000_load_constant_and_splat (mode, dconst1);
35099
35100 /* x0 = 1./d estimate */
35101 x0 = gen_reg_rtx (mode);
35102 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35103 UNSPEC_FRES)));
35104
35105 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35106 if (passes > 1) {
35107
35108 /* e0 = 1. - d * x0 */
35109 e0 = gen_reg_rtx (mode);
35110 rs6000_emit_nmsub (e0, d, x0, one);
35111
35112 /* x1 = x0 + e0 * x0 */
35113 x1 = gen_reg_rtx (mode);
35114 rs6000_emit_madd (x1, e0, x0, x0);
35115
35116 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35117 ++i, xprev = xnext, eprev = enext) {
35118
35119 /* enext = eprev * eprev */
35120 enext = gen_reg_rtx (mode);
35121 emit_insn (gen_mul (enext, eprev, eprev));
35122
35123 /* xnext = xprev + enext * xprev */
35124 xnext = gen_reg_rtx (mode);
35125 rs6000_emit_madd (xnext, enext, xprev, xprev);
35126 }
35127
35128 } else
35129 xprev = x0;
35130
35131 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35132
35133 /* u = n * xprev */
35134 u = gen_reg_rtx (mode);
35135 emit_insn (gen_mul (u, n, xprev));
35136
35137 /* v = n - (d * u) */
35138 v = gen_reg_rtx (mode);
35139 rs6000_emit_nmsub (v, d, u, n);
35140
35141 /* dst = (v * xprev) + u */
35142 rs6000_emit_madd (dst, v, xprev, u);
35143
35144 if (note_p)
35145 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35146 }
35147
35148 /* Goldschmidt's Algorithm for single/double-precision floating point
35149 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35150
35151 void
35152 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35153 {
35154 machine_mode mode = GET_MODE (src);
35155 rtx e = gen_reg_rtx (mode);
35156 rtx g = gen_reg_rtx (mode);
35157 rtx h = gen_reg_rtx (mode);
35158
35159 /* Low precision estimates guarantee 5 bits of accuracy. High
35160 precision estimates guarantee 14 bits of accuracy. SFmode
35161 requires 23 bits of accuracy. DFmode requires 52 bits of
35162 accuracy. Each pass at least doubles the accuracy, leading
35163 to the following. */
35164 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35165 if (mode == DFmode || mode == V2DFmode)
35166 passes++;
35167
35168 int i;
35169 rtx mhalf;
35170 enum insn_code code = optab_handler (smul_optab, mode);
35171 insn_gen_fn gen_mul = GEN_FCN (code);
35172
35173 gcc_assert (code != CODE_FOR_nothing);
35174
35175 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35176
35177 /* e = rsqrt estimate */
35178 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35179 UNSPEC_RSQRT)));
35180
35181 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35182 if (!recip)
35183 {
35184 rtx zero = force_reg (mode, CONST0_RTX (mode));
35185
35186 if (mode == SFmode)
35187 {
35188 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35189 e, zero, mode, 0);
35190 if (target != e)
35191 emit_move_insn (e, target);
35192 }
35193 else
35194 {
35195 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35196 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35197 }
35198 }
35199
35200 /* g = sqrt estimate. */
35201 emit_insn (gen_mul (g, e, src));
35202 /* h = 1/(2*sqrt) estimate. */
35203 emit_insn (gen_mul (h, e, mhalf));
35204
35205 if (recip)
35206 {
35207 if (passes == 1)
35208 {
35209 rtx t = gen_reg_rtx (mode);
35210 rs6000_emit_nmsub (t, g, h, mhalf);
35211 /* Apply correction directly to 1/rsqrt estimate. */
35212 rs6000_emit_madd (dst, e, t, e);
35213 }
35214 else
35215 {
35216 for (i = 0; i < passes; i++)
35217 {
35218 rtx t1 = gen_reg_rtx (mode);
35219 rtx g1 = gen_reg_rtx (mode);
35220 rtx h1 = gen_reg_rtx (mode);
35221
35222 rs6000_emit_nmsub (t1, g, h, mhalf);
35223 rs6000_emit_madd (g1, g, t1, g);
35224 rs6000_emit_madd (h1, h, t1, h);
35225
35226 g = g1;
35227 h = h1;
35228 }
35229 /* Multiply by 2 for 1/rsqrt. */
35230 emit_insn (gen_add3_insn (dst, h, h));
35231 }
35232 }
35233 else
35234 {
35235 rtx t = gen_reg_rtx (mode);
35236 rs6000_emit_nmsub (t, g, h, mhalf);
35237 rs6000_emit_madd (dst, g, t, g);
35238 }
35239
35240 return;
35241 }
35242
35243 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35244 (Power7) targets. DST is the target, and SRC is the argument operand. */
35245
35246 void
35247 rs6000_emit_popcount (rtx dst, rtx src)
35248 {
35249 machine_mode mode = GET_MODE (dst);
35250 rtx tmp1, tmp2;
35251
35252 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35253 if (TARGET_POPCNTD)
35254 {
35255 if (mode == SImode)
35256 emit_insn (gen_popcntdsi2 (dst, src));
35257 else
35258 emit_insn (gen_popcntddi2 (dst, src));
35259 return;
35260 }
35261
35262 tmp1 = gen_reg_rtx (mode);
35263
35264 if (mode == SImode)
35265 {
35266 emit_insn (gen_popcntbsi2 (tmp1, src));
35267 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35268 NULL_RTX, 0);
35269 tmp2 = force_reg (SImode, tmp2);
35270 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35271 }
35272 else
35273 {
35274 emit_insn (gen_popcntbdi2 (tmp1, src));
35275 tmp2 = expand_mult (DImode, tmp1,
35276 GEN_INT ((HOST_WIDE_INT)
35277 0x01010101 << 32 | 0x01010101),
35278 NULL_RTX, 0);
35279 tmp2 = force_reg (DImode, tmp2);
35280 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35281 }
35282 }
35283
35284
35285 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35286 target, and SRC is the argument operand. */
35287
35288 void
35289 rs6000_emit_parity (rtx dst, rtx src)
35290 {
35291 machine_mode mode = GET_MODE (dst);
35292 rtx tmp;
35293
35294 tmp = gen_reg_rtx (mode);
35295
35296 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35297 if (TARGET_CMPB)
35298 {
35299 if (mode == SImode)
35300 {
35301 emit_insn (gen_popcntbsi2 (tmp, src));
35302 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35303 }
35304 else
35305 {
35306 emit_insn (gen_popcntbdi2 (tmp, src));
35307 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35308 }
35309 return;
35310 }
35311
35312 if (mode == SImode)
35313 {
35314 /* Is mult+shift >= shift+xor+shift+xor? */
35315 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35316 {
35317 rtx tmp1, tmp2, tmp3, tmp4;
35318
35319 tmp1 = gen_reg_rtx (SImode);
35320 emit_insn (gen_popcntbsi2 (tmp1, src));
35321
35322 tmp2 = gen_reg_rtx (SImode);
35323 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35324 tmp3 = gen_reg_rtx (SImode);
35325 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35326
35327 tmp4 = gen_reg_rtx (SImode);
35328 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35329 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35330 }
35331 else
35332 rs6000_emit_popcount (tmp, src);
35333 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35334 }
35335 else
35336 {
35337 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35338 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35339 {
35340 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35341
35342 tmp1 = gen_reg_rtx (DImode);
35343 emit_insn (gen_popcntbdi2 (tmp1, src));
35344
35345 tmp2 = gen_reg_rtx (DImode);
35346 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35347 tmp3 = gen_reg_rtx (DImode);
35348 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35349
35350 tmp4 = gen_reg_rtx (DImode);
35351 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35352 tmp5 = gen_reg_rtx (DImode);
35353 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35354
35355 tmp6 = gen_reg_rtx (DImode);
35356 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35357 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35358 }
35359 else
35360 rs6000_emit_popcount (tmp, src);
35361 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35362 }
35363 }
35364
35365 /* Expand an Altivec constant permutation for little endian mode.
35366 OP0 and OP1 are the input vectors and TARGET is the output vector.
35367 SEL specifies the constant permutation vector.
35368
35369 There are two issues: First, the two input operands must be
35370 swapped so that together they form a double-wide array in LE
35371 order. Second, the vperm instruction has surprising behavior
35372 in LE mode: it interprets the elements of the source vectors
35373 in BE mode ("left to right") and interprets the elements of
35374 the destination vector in LE mode ("right to left"). To
35375 correct for this, we must subtract each element of the permute
35376 control vector from 31.
35377
35378 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35379 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35380 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35381 serve as the permute control vector. Then, in BE mode,
35382
35383 vperm 9,10,11,12
35384
35385 places the desired result in vr9. However, in LE mode the
35386 vector contents will be
35387
35388 vr10 = 00000003 00000002 00000001 00000000
35389 vr11 = 00000007 00000006 00000005 00000004
35390
35391 The result of the vperm using the same permute control vector is
35392
35393 vr9 = 05000000 07000000 01000000 03000000
35394
35395 That is, the leftmost 4 bytes of vr10 are interpreted as the
35396 source for the rightmost 4 bytes of vr9, and so on.
35397
35398 If we change the permute control vector to
35399
35400 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35401
35402 and issue
35403
35404 vperm 9,11,10,12
35405
35406 we get the desired
35407
35408 vr9 = 00000006 00000004 00000002 00000000. */
35409
35410 static void
35411 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35412 const vec_perm_indices &sel)
35413 {
35414 unsigned int i;
35415 rtx perm[16];
35416 rtx constv, unspec;
35417
35418 /* Unpack and adjust the constant selector. */
35419 for (i = 0; i < 16; ++i)
35420 {
35421 unsigned int elt = 31 - (sel[i] & 31);
35422 perm[i] = GEN_INT (elt);
35423 }
35424
35425 /* Expand to a permute, swapping the inputs and using the
35426 adjusted selector. */
35427 if (!REG_P (op0))
35428 op0 = force_reg (V16QImode, op0);
35429 if (!REG_P (op1))
35430 op1 = force_reg (V16QImode, op1);
35431
35432 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35433 constv = force_reg (V16QImode, constv);
35434 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35435 UNSPEC_VPERM);
35436 if (!REG_P (target))
35437 {
35438 rtx tmp = gen_reg_rtx (V16QImode);
35439 emit_move_insn (tmp, unspec);
35440 unspec = tmp;
35441 }
35442
35443 emit_move_insn (target, unspec);
35444 }
35445
35446 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35447 permute control vector. But here it's not a constant, so we must
35448 generate a vector NAND or NOR to do the adjustment. */
35449
35450 void
35451 altivec_expand_vec_perm_le (rtx operands[4])
35452 {
35453 rtx notx, iorx, unspec;
35454 rtx target = operands[0];
35455 rtx op0 = operands[1];
35456 rtx op1 = operands[2];
35457 rtx sel = operands[3];
35458 rtx tmp = target;
35459 rtx norreg = gen_reg_rtx (V16QImode);
35460 machine_mode mode = GET_MODE (target);
35461
35462 /* Get everything in regs so the pattern matches. */
35463 if (!REG_P (op0))
35464 op0 = force_reg (mode, op0);
35465 if (!REG_P (op1))
35466 op1 = force_reg (mode, op1);
35467 if (!REG_P (sel))
35468 sel = force_reg (V16QImode, sel);
35469 if (!REG_P (target))
35470 tmp = gen_reg_rtx (mode);
35471
35472 if (TARGET_P9_VECTOR)
35473 {
35474 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35475 UNSPEC_VPERMR);
35476 }
35477 else
35478 {
35479 /* Invert the selector with a VNAND if available, else a VNOR.
35480 The VNAND is preferred for future fusion opportunities. */
35481 notx = gen_rtx_NOT (V16QImode, sel);
35482 iorx = (TARGET_P8_VECTOR
35483 ? gen_rtx_IOR (V16QImode, notx, notx)
35484 : gen_rtx_AND (V16QImode, notx, notx));
35485 emit_insn (gen_rtx_SET (norreg, iorx));
35486
35487 /* Permute with operands reversed and adjusted selector. */
35488 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35489 UNSPEC_VPERM);
35490 }
35491
35492 /* Copy into target, possibly by way of a register. */
35493 if (!REG_P (target))
35494 {
35495 emit_move_insn (tmp, unspec);
35496 unspec = tmp;
35497 }
35498
35499 emit_move_insn (target, unspec);
35500 }
35501
35502 /* Expand an Altivec constant permutation. Return true if we match
35503 an efficient implementation; false to fall back to VPERM.
35504
35505 OP0 and OP1 are the input vectors and TARGET is the output vector.
35506 SEL specifies the constant permutation vector. */
35507
35508 static bool
35509 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35510 const vec_perm_indices &sel)
35511 {
35512 struct altivec_perm_insn {
35513 HOST_WIDE_INT mask;
35514 enum insn_code impl;
35515 unsigned char perm[16];
35516 };
35517 static const struct altivec_perm_insn patterns[] = {
35518 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35519 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35520 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35521 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35522 { OPTION_MASK_ALTIVEC,
35523 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35524 : CODE_FOR_altivec_vmrglb_direct),
35525 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35526 { OPTION_MASK_ALTIVEC,
35527 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35528 : CODE_FOR_altivec_vmrglh_direct),
35529 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35530 { OPTION_MASK_ALTIVEC,
35531 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35532 : CODE_FOR_altivec_vmrglw_direct),
35533 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35534 { OPTION_MASK_ALTIVEC,
35535 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35536 : CODE_FOR_altivec_vmrghb_direct),
35537 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35538 { OPTION_MASK_ALTIVEC,
35539 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35540 : CODE_FOR_altivec_vmrghh_direct),
35541 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35542 { OPTION_MASK_ALTIVEC,
35543 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35544 : CODE_FOR_altivec_vmrghw_direct),
35545 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35546 { OPTION_MASK_P8_VECTOR,
35547 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35548 : CODE_FOR_p8_vmrgow_v4sf_direct),
35549 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35550 { OPTION_MASK_P8_VECTOR,
35551 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35552 : CODE_FOR_p8_vmrgew_v4sf_direct),
35553 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35554 };
35555
35556 unsigned int i, j, elt, which;
35557 unsigned char perm[16];
35558 rtx x;
35559 bool one_vec;
35560
35561 /* Unpack the constant selector. */
35562 for (i = which = 0; i < 16; ++i)
35563 {
35564 elt = sel[i] & 31;
35565 which |= (elt < 16 ? 1 : 2);
35566 perm[i] = elt;
35567 }
35568
35569 /* Simplify the constant selector based on operands. */
35570 switch (which)
35571 {
35572 default:
35573 gcc_unreachable ();
35574
35575 case 3:
35576 one_vec = false;
35577 if (!rtx_equal_p (op0, op1))
35578 break;
35579 /* FALLTHRU */
35580
35581 case 2:
35582 for (i = 0; i < 16; ++i)
35583 perm[i] &= 15;
35584 op0 = op1;
35585 one_vec = true;
35586 break;
35587
35588 case 1:
35589 op1 = op0;
35590 one_vec = true;
35591 break;
35592 }
35593
35594 /* Look for splat patterns. */
35595 if (one_vec)
35596 {
35597 elt = perm[0];
35598
35599 for (i = 0; i < 16; ++i)
35600 if (perm[i] != elt)
35601 break;
35602 if (i == 16)
35603 {
35604 if (!BYTES_BIG_ENDIAN)
35605 elt = 15 - elt;
35606 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35607 return true;
35608 }
35609
35610 if (elt % 2 == 0)
35611 {
35612 for (i = 0; i < 16; i += 2)
35613 if (perm[i] != elt || perm[i + 1] != elt + 1)
35614 break;
35615 if (i == 16)
35616 {
35617 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35618 x = gen_reg_rtx (V8HImode);
35619 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35620 GEN_INT (field)));
35621 emit_move_insn (target, gen_lowpart (V16QImode, x));
35622 return true;
35623 }
35624 }
35625
35626 if (elt % 4 == 0)
35627 {
35628 for (i = 0; i < 16; i += 4)
35629 if (perm[i] != elt
35630 || perm[i + 1] != elt + 1
35631 || perm[i + 2] != elt + 2
35632 || perm[i + 3] != elt + 3)
35633 break;
35634 if (i == 16)
35635 {
35636 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35637 x = gen_reg_rtx (V4SImode);
35638 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35639 GEN_INT (field)));
35640 emit_move_insn (target, gen_lowpart (V16QImode, x));
35641 return true;
35642 }
35643 }
35644 }
35645
35646 /* Look for merge and pack patterns. */
35647 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35648 {
35649 bool swapped;
35650
35651 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35652 continue;
35653
35654 elt = patterns[j].perm[0];
35655 if (perm[0] == elt)
35656 swapped = false;
35657 else if (perm[0] == elt + 16)
35658 swapped = true;
35659 else
35660 continue;
35661 for (i = 1; i < 16; ++i)
35662 {
35663 elt = patterns[j].perm[i];
35664 if (swapped)
35665 elt = (elt >= 16 ? elt - 16 : elt + 16);
35666 else if (one_vec && elt >= 16)
35667 elt -= 16;
35668 if (perm[i] != elt)
35669 break;
35670 }
35671 if (i == 16)
35672 {
35673 enum insn_code icode = patterns[j].impl;
35674 machine_mode omode = insn_data[icode].operand[0].mode;
35675 machine_mode imode = insn_data[icode].operand[1].mode;
35676
35677 /* For little-endian, don't use vpkuwum and vpkuhum if the
35678 underlying vector type is not V4SI and V8HI, respectively.
35679 For example, using vpkuwum with a V8HI picks up the even
35680 halfwords (BE numbering) when the even halfwords (LE
35681 numbering) are what we need. */
35682 if (!BYTES_BIG_ENDIAN
35683 && icode == CODE_FOR_altivec_vpkuwum_direct
35684 && ((GET_CODE (op0) == REG
35685 && GET_MODE (op0) != V4SImode)
35686 || (GET_CODE (op0) == SUBREG
35687 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35688 continue;
35689 if (!BYTES_BIG_ENDIAN
35690 && icode == CODE_FOR_altivec_vpkuhum_direct
35691 && ((GET_CODE (op0) == REG
35692 && GET_MODE (op0) != V8HImode)
35693 || (GET_CODE (op0) == SUBREG
35694 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35695 continue;
35696
35697 /* For little-endian, the two input operands must be swapped
35698 (or swapped back) to ensure proper right-to-left numbering
35699 from 0 to 2N-1. */
35700 if (swapped ^ !BYTES_BIG_ENDIAN)
35701 std::swap (op0, op1);
35702 if (imode != V16QImode)
35703 {
35704 op0 = gen_lowpart (imode, op0);
35705 op1 = gen_lowpart (imode, op1);
35706 }
35707 if (omode == V16QImode)
35708 x = target;
35709 else
35710 x = gen_reg_rtx (omode);
35711 emit_insn (GEN_FCN (icode) (x, op0, op1));
35712 if (omode != V16QImode)
35713 emit_move_insn (target, gen_lowpart (V16QImode, x));
35714 return true;
35715 }
35716 }
35717
35718 if (!BYTES_BIG_ENDIAN)
35719 {
35720 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35721 return true;
35722 }
35723
35724 return false;
35725 }
35726
35727 /* Expand a VSX Permute Doubleword constant permutation.
35728 Return true if we match an efficient implementation. */
35729
35730 static bool
35731 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35732 unsigned char perm0, unsigned char perm1)
35733 {
35734 rtx x;
35735
35736 /* If both selectors come from the same operand, fold to single op. */
35737 if ((perm0 & 2) == (perm1 & 2))
35738 {
35739 if (perm0 & 2)
35740 op0 = op1;
35741 else
35742 op1 = op0;
35743 }
35744 /* If both operands are equal, fold to simpler permutation. */
35745 if (rtx_equal_p (op0, op1))
35746 {
35747 perm0 = perm0 & 1;
35748 perm1 = (perm1 & 1) + 2;
35749 }
35750 /* If the first selector comes from the second operand, swap. */
35751 else if (perm0 & 2)
35752 {
35753 if (perm1 & 2)
35754 return false;
35755 perm0 -= 2;
35756 perm1 += 2;
35757 std::swap (op0, op1);
35758 }
35759 /* If the second selector does not come from the second operand, fail. */
35760 else if ((perm1 & 2) == 0)
35761 return false;
35762
35763 /* Success! */
35764 if (target != NULL)
35765 {
35766 machine_mode vmode, dmode;
35767 rtvec v;
35768
35769 vmode = GET_MODE (target);
35770 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35771 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35772 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35773 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35774 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35775 emit_insn (gen_rtx_SET (target, x));
35776 }
35777 return true;
35778 }
35779
35780 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35781
35782 static bool
35783 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35784 rtx op1, const vec_perm_indices &sel)
35785 {
35786 bool testing_p = !target;
35787
35788 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35789 if (TARGET_ALTIVEC && testing_p)
35790 return true;
35791
35792 /* Check for ps_merge* or xxpermdi insns. */
35793 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35794 {
35795 if (testing_p)
35796 {
35797 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35798 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35799 }
35800 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35801 return true;
35802 }
35803
35804 if (TARGET_ALTIVEC)
35805 {
35806 /* Force the target-independent code to lower to V16QImode. */
35807 if (vmode != V16QImode)
35808 return false;
35809 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35810 return true;
35811 }
35812
35813 return false;
35814 }
35815
35816 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35817 OP0 and OP1 are the input vectors and TARGET is the output vector.
35818 PERM specifies the constant permutation vector. */
35819
35820 static void
35821 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35822 machine_mode vmode, const vec_perm_builder &perm)
35823 {
35824 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35825 if (x != target)
35826 emit_move_insn (target, x);
35827 }
35828
35829 /* Expand an extract even operation. */
35830
35831 void
35832 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35833 {
35834 machine_mode vmode = GET_MODE (target);
35835 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35836 vec_perm_builder perm (nelt, nelt, 1);
35837
35838 for (i = 0; i < nelt; i++)
35839 perm.quick_push (i * 2);
35840
35841 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35842 }
35843
35844 /* Expand a vector interleave operation. */
35845
35846 void
35847 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35848 {
35849 machine_mode vmode = GET_MODE (target);
35850 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35851 vec_perm_builder perm (nelt, nelt, 1);
35852
35853 high = (highp ? 0 : nelt / 2);
35854 for (i = 0; i < nelt / 2; i++)
35855 {
35856 perm.quick_push (i + high);
35857 perm.quick_push (i + nelt + high);
35858 }
35859
35860 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35861 }
35862
35863 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35864 void
35865 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35866 {
35867 HOST_WIDE_INT hwi_scale (scale);
35868 REAL_VALUE_TYPE r_pow;
35869 rtvec v = rtvec_alloc (2);
35870 rtx elt;
35871 rtx scale_vec = gen_reg_rtx (V2DFmode);
35872 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35873 elt = const_double_from_real_value (r_pow, DFmode);
35874 RTVEC_ELT (v, 0) = elt;
35875 RTVEC_ELT (v, 1) = elt;
35876 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35877 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35878 }
35879
35880 /* Return an RTX representing where to find the function value of a
35881 function returning MODE. */
35882 static rtx
35883 rs6000_complex_function_value (machine_mode mode)
35884 {
35885 unsigned int regno;
35886 rtx r1, r2;
35887 machine_mode inner = GET_MODE_INNER (mode);
35888 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35889
35890 if (TARGET_FLOAT128_TYPE
35891 && (mode == KCmode
35892 || (mode == TCmode && TARGET_IEEEQUAD)))
35893 regno = ALTIVEC_ARG_RETURN;
35894
35895 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35896 regno = FP_ARG_RETURN;
35897
35898 else
35899 {
35900 regno = GP_ARG_RETURN;
35901
35902 /* 32-bit is OK since it'll go in r3/r4. */
35903 if (TARGET_32BIT && inner_bytes >= 4)
35904 return gen_rtx_REG (mode, regno);
35905 }
35906
35907 if (inner_bytes >= 8)
35908 return gen_rtx_REG (mode, regno);
35909
35910 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35911 const0_rtx);
35912 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35913 GEN_INT (inner_bytes));
35914 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35915 }
35916
35917 /* Return an rtx describing a return value of MODE as a PARALLEL
35918 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35919 stride REG_STRIDE. */
35920
35921 static rtx
35922 rs6000_parallel_return (machine_mode mode,
35923 int n_elts, machine_mode elt_mode,
35924 unsigned int regno, unsigned int reg_stride)
35925 {
35926 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35927
35928 int i;
35929 for (i = 0; i < n_elts; i++)
35930 {
35931 rtx r = gen_rtx_REG (elt_mode, regno);
35932 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35933 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35934 regno += reg_stride;
35935 }
35936
35937 return par;
35938 }
35939
35940 /* Target hook for TARGET_FUNCTION_VALUE.
35941
35942 An integer value is in r3 and a floating-point value is in fp1,
35943 unless -msoft-float. */
35944
35945 static rtx
35946 rs6000_function_value (const_tree valtype,
35947 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35948 bool outgoing ATTRIBUTE_UNUSED)
35949 {
35950 machine_mode mode;
35951 unsigned int regno;
35952 machine_mode elt_mode;
35953 int n_elts;
35954
35955 /* Special handling for structs in darwin64. */
35956 if (TARGET_MACHO
35957 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35958 {
35959 CUMULATIVE_ARGS valcum;
35960 rtx valret;
35961
35962 valcum.words = 0;
35963 valcum.fregno = FP_ARG_MIN_REG;
35964 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35965 /* Do a trial code generation as if this were going to be passed as
35966 an argument; if any part goes in memory, we return NULL. */
35967 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35968 if (valret)
35969 return valret;
35970 /* Otherwise fall through to standard ABI rules. */
35971 }
35972
35973 mode = TYPE_MODE (valtype);
35974
35975 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35976 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35977 {
35978 int first_reg, n_regs;
35979
35980 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35981 {
35982 /* _Decimal128 must use even/odd register pairs. */
35983 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35984 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35985 }
35986 else
35987 {
35988 first_reg = ALTIVEC_ARG_RETURN;
35989 n_regs = 1;
35990 }
35991
35992 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35993 }
35994
35995 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35996 if (TARGET_32BIT && TARGET_POWERPC64)
35997 switch (mode)
35998 {
35999 default:
36000 break;
36001 case E_DImode:
36002 case E_SCmode:
36003 case E_DCmode:
36004 case E_TCmode:
36005 int count = GET_MODE_SIZE (mode) / 4;
36006 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
36007 }
36008
36009 if ((INTEGRAL_TYPE_P (valtype)
36010 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
36011 || POINTER_TYPE_P (valtype))
36012 mode = TARGET_32BIT ? SImode : DImode;
36013
36014 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36015 /* _Decimal128 must use an even/odd register pair. */
36016 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36017 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
36018 && !FLOAT128_VECTOR_P (mode))
36019 regno = FP_ARG_RETURN;
36020 else if (TREE_CODE (valtype) == COMPLEX_TYPE
36021 && targetm.calls.split_complex_arg)
36022 return rs6000_complex_function_value (mode);
36023 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36024 return register is used in both cases, and we won't see V2DImode/V2DFmode
36025 for pure altivec, combine the two cases. */
36026 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
36027 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
36028 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
36029 regno = ALTIVEC_ARG_RETURN;
36030 else
36031 regno = GP_ARG_RETURN;
36032
36033 return gen_rtx_REG (mode, regno);
36034 }
36035
36036 /* Define how to find the value returned by a library function
36037 assuming the value has mode MODE. */
36038 rtx
36039 rs6000_libcall_value (machine_mode mode)
36040 {
36041 unsigned int regno;
36042
36043 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36044 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36045 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36046
36047 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36048 /* _Decimal128 must use an even/odd register pair. */
36049 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36050 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
36051 regno = FP_ARG_RETURN;
36052 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36053 return register is used in both cases, and we won't see V2DImode/V2DFmode
36054 for pure altivec, combine the two cases. */
36055 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36056 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36057 regno = ALTIVEC_ARG_RETURN;
36058 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36059 return rs6000_complex_function_value (mode);
36060 else
36061 regno = GP_ARG_RETURN;
36062
36063 return gen_rtx_REG (mode, regno);
36064 }
36065
36066 /* Compute register pressure classes. We implement the target hook to avoid
36067 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36068 lead to incorrect estimates of number of available registers and therefor
36069 increased register pressure/spill. */
36070 static int
36071 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36072 {
36073 int n;
36074
36075 n = 0;
36076 pressure_classes[n++] = GENERAL_REGS;
36077 if (TARGET_VSX)
36078 pressure_classes[n++] = VSX_REGS;
36079 else
36080 {
36081 if (TARGET_ALTIVEC)
36082 pressure_classes[n++] = ALTIVEC_REGS;
36083 if (TARGET_HARD_FLOAT)
36084 pressure_classes[n++] = FLOAT_REGS;
36085 }
36086 pressure_classes[n++] = CR_REGS;
36087 pressure_classes[n++] = SPECIAL_REGS;
36088
36089 return n;
36090 }
36091
36092 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36093 Frame pointer elimination is automatically handled.
36094
36095 For the RS/6000, if frame pointer elimination is being done, we would like
36096 to convert ap into fp, not sp.
36097
36098 We need r30 if -mminimal-toc was specified, and there are constant pool
36099 references. */
36100
36101 static bool
36102 rs6000_can_eliminate (const int from, const int to)
36103 {
36104 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36105 ? ! frame_pointer_needed
36106 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36107 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36108 || constant_pool_empty_p ()
36109 : true);
36110 }
36111
36112 /* Define the offset between two registers, FROM to be eliminated and its
36113 replacement TO, at the start of a routine. */
36114 HOST_WIDE_INT
36115 rs6000_initial_elimination_offset (int from, int to)
36116 {
36117 rs6000_stack_t *info = rs6000_stack_info ();
36118 HOST_WIDE_INT offset;
36119
36120 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36121 offset = info->push_p ? 0 : -info->total_size;
36122 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36123 {
36124 offset = info->push_p ? 0 : -info->total_size;
36125 if (FRAME_GROWS_DOWNWARD)
36126 offset += info->fixed_size + info->vars_size + info->parm_size;
36127 }
36128 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36129 offset = FRAME_GROWS_DOWNWARD
36130 ? info->fixed_size + info->vars_size + info->parm_size
36131 : 0;
36132 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36133 offset = info->total_size;
36134 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36135 offset = info->push_p ? info->total_size : 0;
36136 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36137 offset = 0;
36138 else
36139 gcc_unreachable ();
36140
36141 return offset;
36142 }
36143
36144 /* Fill in sizes of registers used by unwinder. */
36145
36146 static void
36147 rs6000_init_dwarf_reg_sizes_extra (tree address)
36148 {
36149 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36150 {
36151 int i;
36152 machine_mode mode = TYPE_MODE (char_type_node);
36153 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36154 rtx mem = gen_rtx_MEM (BLKmode, addr);
36155 rtx value = gen_int_mode (16, mode);
36156
36157 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36158 The unwinder still needs to know the size of Altivec registers. */
36159
36160 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36161 {
36162 int column = DWARF_REG_TO_UNWIND_COLUMN
36163 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36164 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36165
36166 emit_move_insn (adjust_address (mem, mode, offset), value);
36167 }
36168 }
36169 }
36170
36171 /* Map internal gcc register numbers to debug format register numbers.
36172 FORMAT specifies the type of debug register number to use:
36173 0 -- debug information, except for frame-related sections
36174 1 -- DWARF .debug_frame section
36175 2 -- DWARF .eh_frame section */
36176
36177 unsigned int
36178 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36179 {
36180 /* Except for the above, we use the internal number for non-DWARF
36181 debug information, and also for .eh_frame. */
36182 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36183 return regno;
36184
36185 /* On some platforms, we use the standard DWARF register
36186 numbering for .debug_info and .debug_frame. */
36187 #ifdef RS6000_USE_DWARF_NUMBERING
36188 if (regno <= 63)
36189 return regno;
36190 if (regno == LR_REGNO)
36191 return 108;
36192 if (regno == CTR_REGNO)
36193 return 109;
36194 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36195 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36196 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36197 to the DWARF reg for CR. */
36198 if (format == 1 && regno == CR2_REGNO)
36199 return 64;
36200 if (CR_REGNO_P (regno))
36201 return regno - CR0_REGNO + 86;
36202 if (regno == CA_REGNO)
36203 return 101; /* XER */
36204 if (ALTIVEC_REGNO_P (regno))
36205 return regno - FIRST_ALTIVEC_REGNO + 1124;
36206 if (regno == VRSAVE_REGNO)
36207 return 356;
36208 if (regno == VSCR_REGNO)
36209 return 67;
36210 #endif
36211 return regno;
36212 }
36213
36214 /* target hook eh_return_filter_mode */
36215 static scalar_int_mode
36216 rs6000_eh_return_filter_mode (void)
36217 {
36218 return TARGET_32BIT ? SImode : word_mode;
36219 }
36220
36221 /* Target hook for translate_mode_attribute. */
36222 static machine_mode
36223 rs6000_translate_mode_attribute (machine_mode mode)
36224 {
36225 if ((FLOAT128_IEEE_P (mode)
36226 && ieee128_float_type_node == long_double_type_node)
36227 || (FLOAT128_IBM_P (mode)
36228 && ibm128_float_type_node == long_double_type_node))
36229 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36230 return mode;
36231 }
36232
36233 /* Target hook for scalar_mode_supported_p. */
36234 static bool
36235 rs6000_scalar_mode_supported_p (scalar_mode mode)
36236 {
36237 /* -m32 does not support TImode. This is the default, from
36238 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36239 same ABI as for -m32. But default_scalar_mode_supported_p allows
36240 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36241 for -mpowerpc64. */
36242 if (TARGET_32BIT && mode == TImode)
36243 return false;
36244
36245 if (DECIMAL_FLOAT_MODE_P (mode))
36246 return default_decimal_float_supported_p ();
36247 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36248 return true;
36249 else
36250 return default_scalar_mode_supported_p (mode);
36251 }
36252
36253 /* Target hook for vector_mode_supported_p. */
36254 static bool
36255 rs6000_vector_mode_supported_p (machine_mode mode)
36256 {
36257 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36258 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36259 double-double. */
36260 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36261 return true;
36262
36263 else
36264 return false;
36265 }
36266
36267 /* Target hook for floatn_mode. */
36268 static opt_scalar_float_mode
36269 rs6000_floatn_mode (int n, bool extended)
36270 {
36271 if (extended)
36272 {
36273 switch (n)
36274 {
36275 case 32:
36276 return DFmode;
36277
36278 case 64:
36279 if (TARGET_FLOAT128_TYPE)
36280 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36281 else
36282 return opt_scalar_float_mode ();
36283
36284 case 128:
36285 return opt_scalar_float_mode ();
36286
36287 default:
36288 /* Those are the only valid _FloatNx types. */
36289 gcc_unreachable ();
36290 }
36291 }
36292 else
36293 {
36294 switch (n)
36295 {
36296 case 32:
36297 return SFmode;
36298
36299 case 64:
36300 return DFmode;
36301
36302 case 128:
36303 if (TARGET_FLOAT128_TYPE)
36304 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36305 else
36306 return opt_scalar_float_mode ();
36307
36308 default:
36309 return opt_scalar_float_mode ();
36310 }
36311 }
36312
36313 }
36314
36315 /* Target hook for c_mode_for_suffix. */
36316 static machine_mode
36317 rs6000_c_mode_for_suffix (char suffix)
36318 {
36319 if (TARGET_FLOAT128_TYPE)
36320 {
36321 if (suffix == 'q' || suffix == 'Q')
36322 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36323
36324 /* At the moment, we are not defining a suffix for IBM extended double.
36325 If/when the default for -mabi=ieeelongdouble is changed, and we want
36326 to support __ibm128 constants in legacy library code, we may need to
36327 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36328 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36329 __float80 constants. */
36330 }
36331
36332 return VOIDmode;
36333 }
36334
36335 /* Target hook for invalid_arg_for_unprototyped_fn. */
36336 static const char *
36337 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36338 {
36339 return (!rs6000_darwin64_abi
36340 && typelist == 0
36341 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36342 && (funcdecl == NULL_TREE
36343 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36344 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36345 ? N_("AltiVec argument passed to unprototyped function")
36346 : NULL;
36347 }
36348
36349 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36350 setup by using __stack_chk_fail_local hidden function instead of
36351 calling __stack_chk_fail directly. Otherwise it is better to call
36352 __stack_chk_fail directly. */
36353
36354 static tree ATTRIBUTE_UNUSED
36355 rs6000_stack_protect_fail (void)
36356 {
36357 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36358 ? default_hidden_stack_protect_fail ()
36359 : default_external_stack_protect_fail ();
36360 }
36361
36362 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36363
36364 #if TARGET_ELF
36365 static unsigned HOST_WIDE_INT
36366 rs6000_asan_shadow_offset (void)
36367 {
36368 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36369 }
36370 #endif
36371 \f
36372 /* Mask options that we want to support inside of attribute((target)) and
36373 #pragma GCC target operations. Note, we do not include things like
36374 64/32-bit, endianness, hard/soft floating point, etc. that would have
36375 different calling sequences. */
36376
36377 struct rs6000_opt_mask {
36378 const char *name; /* option name */
36379 HOST_WIDE_INT mask; /* mask to set */
36380 bool invert; /* invert sense of mask */
36381 bool valid_target; /* option is a target option */
36382 };
36383
36384 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36385 {
36386 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36387 { "cmpb", OPTION_MASK_CMPB, false, true },
36388 { "crypto", OPTION_MASK_CRYPTO, false, true },
36389 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36390 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36391 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36392 false, true },
36393 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36394 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36395 { "fprnd", OPTION_MASK_FPRND, false, true },
36396 { "hard-dfp", OPTION_MASK_DFP, false, true },
36397 { "htm", OPTION_MASK_HTM, false, true },
36398 { "isel", OPTION_MASK_ISEL, false, true },
36399 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36400 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36401 { "modulo", OPTION_MASK_MODULO, false, true },
36402 { "mulhw", OPTION_MASK_MULHW, false, true },
36403 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36404 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36405 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36406 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36407 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36408 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36409 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36410 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36411 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36412 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36413 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36414 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36415 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36416 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36417 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36418 { "string", 0, false, true },
36419 { "update", OPTION_MASK_NO_UPDATE, true , true },
36420 { "vsx", OPTION_MASK_VSX, false, true },
36421 #ifdef OPTION_MASK_64BIT
36422 #if TARGET_AIX_OS
36423 { "aix64", OPTION_MASK_64BIT, false, false },
36424 { "aix32", OPTION_MASK_64BIT, true, false },
36425 #else
36426 { "64", OPTION_MASK_64BIT, false, false },
36427 { "32", OPTION_MASK_64BIT, true, false },
36428 #endif
36429 #endif
36430 #ifdef OPTION_MASK_EABI
36431 { "eabi", OPTION_MASK_EABI, false, false },
36432 #endif
36433 #ifdef OPTION_MASK_LITTLE_ENDIAN
36434 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36435 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36436 #endif
36437 #ifdef OPTION_MASK_RELOCATABLE
36438 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36439 #endif
36440 #ifdef OPTION_MASK_STRICT_ALIGN
36441 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36442 #endif
36443 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36444 { "string", 0, false, false },
36445 };
36446
36447 /* Builtin mask mapping for printing the flags. */
36448 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36449 {
36450 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36451 { "vsx", RS6000_BTM_VSX, false, false },
36452 { "fre", RS6000_BTM_FRE, false, false },
36453 { "fres", RS6000_BTM_FRES, false, false },
36454 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36455 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36456 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36457 { "cell", RS6000_BTM_CELL, false, false },
36458 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36459 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36460 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36461 { "crypto", RS6000_BTM_CRYPTO, false, false },
36462 { "htm", RS6000_BTM_HTM, false, false },
36463 { "hard-dfp", RS6000_BTM_DFP, false, false },
36464 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36465 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36466 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36467 { "float128", RS6000_BTM_FLOAT128, false, false },
36468 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36469 };
36470
36471 /* Option variables that we want to support inside attribute((target)) and
36472 #pragma GCC target operations. */
36473
36474 struct rs6000_opt_var {
36475 const char *name; /* option name */
36476 size_t global_offset; /* offset of the option in global_options. */
36477 size_t target_offset; /* offset of the option in target options. */
36478 };
36479
36480 static struct rs6000_opt_var const rs6000_opt_vars[] =
36481 {
36482 { "friz",
36483 offsetof (struct gcc_options, x_TARGET_FRIZ),
36484 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36485 { "avoid-indexed-addresses",
36486 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36487 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36488 { "longcall",
36489 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36490 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36491 { "optimize-swaps",
36492 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36493 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36494 { "allow-movmisalign",
36495 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36496 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36497 { "sched-groups",
36498 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36499 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36500 { "always-hint",
36501 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36502 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36503 { "align-branch-targets",
36504 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36505 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36506 { "tls-markers",
36507 offsetof (struct gcc_options, x_tls_markers),
36508 offsetof (struct cl_target_option, x_tls_markers), },
36509 { "sched-prolog",
36510 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36511 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36512 { "sched-epilog",
36513 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36514 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36515 { "speculate-indirect-jumps",
36516 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36517 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36518 };
36519
36520 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36521 parsing. Return true if there were no errors. */
36522
36523 static bool
36524 rs6000_inner_target_options (tree args, bool attr_p)
36525 {
36526 bool ret = true;
36527
36528 if (args == NULL_TREE)
36529 ;
36530
36531 else if (TREE_CODE (args) == STRING_CST)
36532 {
36533 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36534 char *q;
36535
36536 while ((q = strtok (p, ",")) != NULL)
36537 {
36538 bool error_p = false;
36539 bool not_valid_p = false;
36540 const char *cpu_opt = NULL;
36541
36542 p = NULL;
36543 if (strncmp (q, "cpu=", 4) == 0)
36544 {
36545 int cpu_index = rs6000_cpu_name_lookup (q+4);
36546 if (cpu_index >= 0)
36547 rs6000_cpu_index = cpu_index;
36548 else
36549 {
36550 error_p = true;
36551 cpu_opt = q+4;
36552 }
36553 }
36554 else if (strncmp (q, "tune=", 5) == 0)
36555 {
36556 int tune_index = rs6000_cpu_name_lookup (q+5);
36557 if (tune_index >= 0)
36558 rs6000_tune_index = tune_index;
36559 else
36560 {
36561 error_p = true;
36562 cpu_opt = q+5;
36563 }
36564 }
36565 else
36566 {
36567 size_t i;
36568 bool invert = false;
36569 char *r = q;
36570
36571 error_p = true;
36572 if (strncmp (r, "no-", 3) == 0)
36573 {
36574 invert = true;
36575 r += 3;
36576 }
36577
36578 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36579 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36580 {
36581 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36582
36583 if (!rs6000_opt_masks[i].valid_target)
36584 not_valid_p = true;
36585 else
36586 {
36587 error_p = false;
36588 rs6000_isa_flags_explicit |= mask;
36589
36590 /* VSX needs altivec, so -mvsx automagically sets
36591 altivec and disables -mavoid-indexed-addresses. */
36592 if (!invert)
36593 {
36594 if (mask == OPTION_MASK_VSX)
36595 {
36596 mask |= OPTION_MASK_ALTIVEC;
36597 TARGET_AVOID_XFORM = 0;
36598 }
36599 }
36600
36601 if (rs6000_opt_masks[i].invert)
36602 invert = !invert;
36603
36604 if (invert)
36605 rs6000_isa_flags &= ~mask;
36606 else
36607 rs6000_isa_flags |= mask;
36608 }
36609 break;
36610 }
36611
36612 if (error_p && !not_valid_p)
36613 {
36614 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36615 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36616 {
36617 size_t j = rs6000_opt_vars[i].global_offset;
36618 *((int *) ((char *)&global_options + j)) = !invert;
36619 error_p = false;
36620 not_valid_p = false;
36621 break;
36622 }
36623 }
36624 }
36625
36626 if (error_p)
36627 {
36628 const char *eprefix, *esuffix;
36629
36630 ret = false;
36631 if (attr_p)
36632 {
36633 eprefix = "__attribute__((__target__(";
36634 esuffix = ")))";
36635 }
36636 else
36637 {
36638 eprefix = "#pragma GCC target ";
36639 esuffix = "";
36640 }
36641
36642 if (cpu_opt)
36643 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36644 q, esuffix);
36645 else if (not_valid_p)
36646 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36647 else
36648 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36649 }
36650 }
36651 }
36652
36653 else if (TREE_CODE (args) == TREE_LIST)
36654 {
36655 do
36656 {
36657 tree value = TREE_VALUE (args);
36658 if (value)
36659 {
36660 bool ret2 = rs6000_inner_target_options (value, attr_p);
36661 if (!ret2)
36662 ret = false;
36663 }
36664 args = TREE_CHAIN (args);
36665 }
36666 while (args != NULL_TREE);
36667 }
36668
36669 else
36670 {
36671 error ("attribute %<target%> argument not a string");
36672 return false;
36673 }
36674
36675 return ret;
36676 }
36677
36678 /* Print out the target options as a list for -mdebug=target. */
36679
36680 static void
36681 rs6000_debug_target_options (tree args, const char *prefix)
36682 {
36683 if (args == NULL_TREE)
36684 fprintf (stderr, "%s<NULL>", prefix);
36685
36686 else if (TREE_CODE (args) == STRING_CST)
36687 {
36688 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36689 char *q;
36690
36691 while ((q = strtok (p, ",")) != NULL)
36692 {
36693 p = NULL;
36694 fprintf (stderr, "%s\"%s\"", prefix, q);
36695 prefix = ", ";
36696 }
36697 }
36698
36699 else if (TREE_CODE (args) == TREE_LIST)
36700 {
36701 do
36702 {
36703 tree value = TREE_VALUE (args);
36704 if (value)
36705 {
36706 rs6000_debug_target_options (value, prefix);
36707 prefix = ", ";
36708 }
36709 args = TREE_CHAIN (args);
36710 }
36711 while (args != NULL_TREE);
36712 }
36713
36714 else
36715 gcc_unreachable ();
36716
36717 return;
36718 }
36719
36720 \f
36721 /* Hook to validate attribute((target("..."))). */
36722
36723 static bool
36724 rs6000_valid_attribute_p (tree fndecl,
36725 tree ARG_UNUSED (name),
36726 tree args,
36727 int flags)
36728 {
36729 struct cl_target_option cur_target;
36730 bool ret;
36731 tree old_optimize;
36732 tree new_target, new_optimize;
36733 tree func_optimize;
36734
36735 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36736
36737 if (TARGET_DEBUG_TARGET)
36738 {
36739 tree tname = DECL_NAME (fndecl);
36740 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36741 if (tname)
36742 fprintf (stderr, "function: %.*s\n",
36743 (int) IDENTIFIER_LENGTH (tname),
36744 IDENTIFIER_POINTER (tname));
36745 else
36746 fprintf (stderr, "function: unknown\n");
36747
36748 fprintf (stderr, "args:");
36749 rs6000_debug_target_options (args, " ");
36750 fprintf (stderr, "\n");
36751
36752 if (flags)
36753 fprintf (stderr, "flags: 0x%x\n", flags);
36754
36755 fprintf (stderr, "--------------------\n");
36756 }
36757
36758 /* attribute((target("default"))) does nothing, beyond
36759 affecting multi-versioning. */
36760 if (TREE_VALUE (args)
36761 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36762 && TREE_CHAIN (args) == NULL_TREE
36763 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36764 return true;
36765
36766 old_optimize = build_optimization_node (&global_options);
36767 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36768
36769 /* If the function changed the optimization levels as well as setting target
36770 options, start with the optimizations specified. */
36771 if (func_optimize && func_optimize != old_optimize)
36772 cl_optimization_restore (&global_options,
36773 TREE_OPTIMIZATION (func_optimize));
36774
36775 /* The target attributes may also change some optimization flags, so update
36776 the optimization options if necessary. */
36777 cl_target_option_save (&cur_target, &global_options);
36778 rs6000_cpu_index = rs6000_tune_index = -1;
36779 ret = rs6000_inner_target_options (args, true);
36780
36781 /* Set up any additional state. */
36782 if (ret)
36783 {
36784 ret = rs6000_option_override_internal (false);
36785 new_target = build_target_option_node (&global_options);
36786 }
36787 else
36788 new_target = NULL;
36789
36790 new_optimize = build_optimization_node (&global_options);
36791
36792 if (!new_target)
36793 ret = false;
36794
36795 else if (fndecl)
36796 {
36797 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36798
36799 if (old_optimize != new_optimize)
36800 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36801 }
36802
36803 cl_target_option_restore (&global_options, &cur_target);
36804
36805 if (old_optimize != new_optimize)
36806 cl_optimization_restore (&global_options,
36807 TREE_OPTIMIZATION (old_optimize));
36808
36809 return ret;
36810 }
36811
36812 \f
36813 /* Hook to validate the current #pragma GCC target and set the state, and
36814 update the macros based on what was changed. If ARGS is NULL, then
36815 POP_TARGET is used to reset the options. */
36816
36817 bool
36818 rs6000_pragma_target_parse (tree args, tree pop_target)
36819 {
36820 tree prev_tree = build_target_option_node (&global_options);
36821 tree cur_tree;
36822 struct cl_target_option *prev_opt, *cur_opt;
36823 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36824 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36825
36826 if (TARGET_DEBUG_TARGET)
36827 {
36828 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36829 fprintf (stderr, "args:");
36830 rs6000_debug_target_options (args, " ");
36831 fprintf (stderr, "\n");
36832
36833 if (pop_target)
36834 {
36835 fprintf (stderr, "pop_target:\n");
36836 debug_tree (pop_target);
36837 }
36838 else
36839 fprintf (stderr, "pop_target: <NULL>\n");
36840
36841 fprintf (stderr, "--------------------\n");
36842 }
36843
36844 if (! args)
36845 {
36846 cur_tree = ((pop_target)
36847 ? pop_target
36848 : target_option_default_node);
36849 cl_target_option_restore (&global_options,
36850 TREE_TARGET_OPTION (cur_tree));
36851 }
36852 else
36853 {
36854 rs6000_cpu_index = rs6000_tune_index = -1;
36855 if (!rs6000_inner_target_options (args, false)
36856 || !rs6000_option_override_internal (false)
36857 || (cur_tree = build_target_option_node (&global_options))
36858 == NULL_TREE)
36859 {
36860 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36861 fprintf (stderr, "invalid pragma\n");
36862
36863 return false;
36864 }
36865 }
36866
36867 target_option_current_node = cur_tree;
36868 rs6000_activate_target_options (target_option_current_node);
36869
36870 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36871 change the macros that are defined. */
36872 if (rs6000_target_modify_macros_ptr)
36873 {
36874 prev_opt = TREE_TARGET_OPTION (prev_tree);
36875 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36876 prev_flags = prev_opt->x_rs6000_isa_flags;
36877
36878 cur_opt = TREE_TARGET_OPTION (cur_tree);
36879 cur_flags = cur_opt->x_rs6000_isa_flags;
36880 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36881
36882 diff_bumask = (prev_bumask ^ cur_bumask);
36883 diff_flags = (prev_flags ^ cur_flags);
36884
36885 if ((diff_flags != 0) || (diff_bumask != 0))
36886 {
36887 /* Delete old macros. */
36888 rs6000_target_modify_macros_ptr (false,
36889 prev_flags & diff_flags,
36890 prev_bumask & diff_bumask);
36891
36892 /* Define new macros. */
36893 rs6000_target_modify_macros_ptr (true,
36894 cur_flags & diff_flags,
36895 cur_bumask & diff_bumask);
36896 }
36897 }
36898
36899 return true;
36900 }
36901
36902 \f
36903 /* Remember the last target of rs6000_set_current_function. */
36904 static GTY(()) tree rs6000_previous_fndecl;
36905
36906 /* Restore target's globals from NEW_TREE and invalidate the
36907 rs6000_previous_fndecl cache. */
36908
36909 void
36910 rs6000_activate_target_options (tree new_tree)
36911 {
36912 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36913 if (TREE_TARGET_GLOBALS (new_tree))
36914 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36915 else if (new_tree == target_option_default_node)
36916 restore_target_globals (&default_target_globals);
36917 else
36918 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36919 rs6000_previous_fndecl = NULL_TREE;
36920 }
36921
36922 /* Establish appropriate back-end context for processing the function
36923 FNDECL. The argument might be NULL to indicate processing at top
36924 level, outside of any function scope. */
36925 static void
36926 rs6000_set_current_function (tree fndecl)
36927 {
36928 if (TARGET_DEBUG_TARGET)
36929 {
36930 fprintf (stderr, "\n==================== rs6000_set_current_function");
36931
36932 if (fndecl)
36933 fprintf (stderr, ", fndecl %s (%p)",
36934 (DECL_NAME (fndecl)
36935 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36936 : "<unknown>"), (void *)fndecl);
36937
36938 if (rs6000_previous_fndecl)
36939 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36940
36941 fprintf (stderr, "\n");
36942 }
36943
36944 /* Only change the context if the function changes. This hook is called
36945 several times in the course of compiling a function, and we don't want to
36946 slow things down too much or call target_reinit when it isn't safe. */
36947 if (fndecl == rs6000_previous_fndecl)
36948 return;
36949
36950 tree old_tree;
36951 if (rs6000_previous_fndecl == NULL_TREE)
36952 old_tree = target_option_current_node;
36953 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36954 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36955 else
36956 old_tree = target_option_default_node;
36957
36958 tree new_tree;
36959 if (fndecl == NULL_TREE)
36960 {
36961 if (old_tree != target_option_current_node)
36962 new_tree = target_option_current_node;
36963 else
36964 new_tree = NULL_TREE;
36965 }
36966 else
36967 {
36968 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36969 if (new_tree == NULL_TREE)
36970 new_tree = target_option_default_node;
36971 }
36972
36973 if (TARGET_DEBUG_TARGET)
36974 {
36975 if (new_tree)
36976 {
36977 fprintf (stderr, "\nnew fndecl target specific options:\n");
36978 debug_tree (new_tree);
36979 }
36980
36981 if (old_tree)
36982 {
36983 fprintf (stderr, "\nold fndecl target specific options:\n");
36984 debug_tree (old_tree);
36985 }
36986
36987 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36988 fprintf (stderr, "--------------------\n");
36989 }
36990
36991 if (new_tree && old_tree != new_tree)
36992 rs6000_activate_target_options (new_tree);
36993
36994 if (fndecl)
36995 rs6000_previous_fndecl = fndecl;
36996 }
36997
36998 \f
36999 /* Save the current options */
37000
37001 static void
37002 rs6000_function_specific_save (struct cl_target_option *ptr,
37003 struct gcc_options *opts)
37004 {
37005 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
37006 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
37007 }
37008
37009 /* Restore the current options */
37010
37011 static void
37012 rs6000_function_specific_restore (struct gcc_options *opts,
37013 struct cl_target_option *ptr)
37014
37015 {
37016 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
37017 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
37018 (void) rs6000_option_override_internal (false);
37019 }
37020
37021 /* Print the current options */
37022
37023 static void
37024 rs6000_function_specific_print (FILE *file, int indent,
37025 struct cl_target_option *ptr)
37026 {
37027 rs6000_print_isa_options (file, indent, "Isa options set",
37028 ptr->x_rs6000_isa_flags);
37029
37030 rs6000_print_isa_options (file, indent, "Isa options explicit",
37031 ptr->x_rs6000_isa_flags_explicit);
37032 }
37033
37034 /* Helper function to print the current isa or misc options on a line. */
37035
37036 static void
37037 rs6000_print_options_internal (FILE *file,
37038 int indent,
37039 const char *string,
37040 HOST_WIDE_INT flags,
37041 const char *prefix,
37042 const struct rs6000_opt_mask *opts,
37043 size_t num_elements)
37044 {
37045 size_t i;
37046 size_t start_column = 0;
37047 size_t cur_column;
37048 size_t max_column = 120;
37049 size_t prefix_len = strlen (prefix);
37050 size_t comma_len = 0;
37051 const char *comma = "";
37052
37053 if (indent)
37054 start_column += fprintf (file, "%*s", indent, "");
37055
37056 if (!flags)
37057 {
37058 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37059 return;
37060 }
37061
37062 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37063
37064 /* Print the various mask options. */
37065 cur_column = start_column;
37066 for (i = 0; i < num_elements; i++)
37067 {
37068 bool invert = opts[i].invert;
37069 const char *name = opts[i].name;
37070 const char *no_str = "";
37071 HOST_WIDE_INT mask = opts[i].mask;
37072 size_t len = comma_len + prefix_len + strlen (name);
37073
37074 if (!invert)
37075 {
37076 if ((flags & mask) == 0)
37077 {
37078 no_str = "no-";
37079 len += sizeof ("no-") - 1;
37080 }
37081
37082 flags &= ~mask;
37083 }
37084
37085 else
37086 {
37087 if ((flags & mask) != 0)
37088 {
37089 no_str = "no-";
37090 len += sizeof ("no-") - 1;
37091 }
37092
37093 flags |= mask;
37094 }
37095
37096 cur_column += len;
37097 if (cur_column > max_column)
37098 {
37099 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37100 cur_column = start_column + len;
37101 comma = "";
37102 }
37103
37104 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37105 comma = ", ";
37106 comma_len = sizeof (", ") - 1;
37107 }
37108
37109 fputs ("\n", file);
37110 }
37111
37112 /* Helper function to print the current isa options on a line. */
37113
37114 static void
37115 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37116 HOST_WIDE_INT flags)
37117 {
37118 rs6000_print_options_internal (file, indent, string, flags, "-m",
37119 &rs6000_opt_masks[0],
37120 ARRAY_SIZE (rs6000_opt_masks));
37121 }
37122
37123 static void
37124 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37125 HOST_WIDE_INT flags)
37126 {
37127 rs6000_print_options_internal (file, indent, string, flags, "",
37128 &rs6000_builtin_mask_names[0],
37129 ARRAY_SIZE (rs6000_builtin_mask_names));
37130 }
37131
37132 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37133 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37134 -mupper-regs-df, etc.).
37135
37136 If the user used -mno-power8-vector, we need to turn off all of the implicit
37137 ISA 2.07 and 3.0 options that relate to the vector unit.
37138
37139 If the user used -mno-power9-vector, we need to turn off all of the implicit
37140 ISA 3.0 options that relate to the vector unit.
37141
37142 This function does not handle explicit options such as the user specifying
37143 -mdirect-move. These are handled in rs6000_option_override_internal, and
37144 the appropriate error is given if needed.
37145
37146 We return a mask of all of the implicit options that should not be enabled
37147 by default. */
37148
37149 static HOST_WIDE_INT
37150 rs6000_disable_incompatible_switches (void)
37151 {
37152 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37153 size_t i, j;
37154
37155 static const struct {
37156 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37157 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37158 const char *const name; /* name of the switch. */
37159 } flags[] = {
37160 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37161 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37162 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37163 };
37164
37165 for (i = 0; i < ARRAY_SIZE (flags); i++)
37166 {
37167 HOST_WIDE_INT no_flag = flags[i].no_flag;
37168
37169 if ((rs6000_isa_flags & no_flag) == 0
37170 && (rs6000_isa_flags_explicit & no_flag) != 0)
37171 {
37172 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37173 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37174 & rs6000_isa_flags
37175 & dep_flags);
37176
37177 if (set_flags)
37178 {
37179 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37180 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37181 {
37182 set_flags &= ~rs6000_opt_masks[j].mask;
37183 error ("%<-mno-%s%> turns off %<-m%s%>",
37184 flags[i].name,
37185 rs6000_opt_masks[j].name);
37186 }
37187
37188 gcc_assert (!set_flags);
37189 }
37190
37191 rs6000_isa_flags &= ~dep_flags;
37192 ignore_masks |= no_flag | dep_flags;
37193 }
37194 }
37195
37196 return ignore_masks;
37197 }
37198
37199 \f
37200 /* Helper function for printing the function name when debugging. */
37201
37202 static const char *
37203 get_decl_name (tree fn)
37204 {
37205 tree name;
37206
37207 if (!fn)
37208 return "<null>";
37209
37210 name = DECL_NAME (fn);
37211 if (!name)
37212 return "<no-name>";
37213
37214 return IDENTIFIER_POINTER (name);
37215 }
37216
37217 /* Return the clone id of the target we are compiling code for in a target
37218 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37219 the priority list for the target clones (ordered from lowest to
37220 highest). */
37221
37222 static int
37223 rs6000_clone_priority (tree fndecl)
37224 {
37225 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37226 HOST_WIDE_INT isa_masks;
37227 int ret = CLONE_DEFAULT;
37228 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37229 const char *attrs_str = NULL;
37230
37231 attrs = TREE_VALUE (TREE_VALUE (attrs));
37232 attrs_str = TREE_STRING_POINTER (attrs);
37233
37234 /* Return priority zero for default function. Return the ISA needed for the
37235 function if it is not the default. */
37236 if (strcmp (attrs_str, "default") != 0)
37237 {
37238 if (fn_opts == NULL_TREE)
37239 fn_opts = target_option_default_node;
37240
37241 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37242 isa_masks = rs6000_isa_flags;
37243 else
37244 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37245
37246 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37247 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37248 break;
37249 }
37250
37251 if (TARGET_DEBUG_TARGET)
37252 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37253 get_decl_name (fndecl), ret);
37254
37255 return ret;
37256 }
37257
37258 /* This compares the priority of target features in function DECL1 and DECL2.
37259 It returns positive value if DECL1 is higher priority, negative value if
37260 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37261 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37262
37263 static int
37264 rs6000_compare_version_priority (tree decl1, tree decl2)
37265 {
37266 int priority1 = rs6000_clone_priority (decl1);
37267 int priority2 = rs6000_clone_priority (decl2);
37268 int ret = priority1 - priority2;
37269
37270 if (TARGET_DEBUG_TARGET)
37271 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37272 get_decl_name (decl1), get_decl_name (decl2), ret);
37273
37274 return ret;
37275 }
37276
37277 /* Make a dispatcher declaration for the multi-versioned function DECL.
37278 Calls to DECL function will be replaced with calls to the dispatcher
37279 by the front-end. Returns the decl of the dispatcher function. */
37280
37281 static tree
37282 rs6000_get_function_versions_dispatcher (void *decl)
37283 {
37284 tree fn = (tree) decl;
37285 struct cgraph_node *node = NULL;
37286 struct cgraph_node *default_node = NULL;
37287 struct cgraph_function_version_info *node_v = NULL;
37288 struct cgraph_function_version_info *first_v = NULL;
37289
37290 tree dispatch_decl = NULL;
37291
37292 struct cgraph_function_version_info *default_version_info = NULL;
37293 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37294
37295 if (TARGET_DEBUG_TARGET)
37296 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37297 get_decl_name (fn));
37298
37299 node = cgraph_node::get (fn);
37300 gcc_assert (node != NULL);
37301
37302 node_v = node->function_version ();
37303 gcc_assert (node_v != NULL);
37304
37305 if (node_v->dispatcher_resolver != NULL)
37306 return node_v->dispatcher_resolver;
37307
37308 /* Find the default version and make it the first node. */
37309 first_v = node_v;
37310 /* Go to the beginning of the chain. */
37311 while (first_v->prev != NULL)
37312 first_v = first_v->prev;
37313
37314 default_version_info = first_v;
37315 while (default_version_info != NULL)
37316 {
37317 const tree decl2 = default_version_info->this_node->decl;
37318 if (is_function_default_version (decl2))
37319 break;
37320 default_version_info = default_version_info->next;
37321 }
37322
37323 /* If there is no default node, just return NULL. */
37324 if (default_version_info == NULL)
37325 return NULL;
37326
37327 /* Make default info the first node. */
37328 if (first_v != default_version_info)
37329 {
37330 default_version_info->prev->next = default_version_info->next;
37331 if (default_version_info->next)
37332 default_version_info->next->prev = default_version_info->prev;
37333 first_v->prev = default_version_info;
37334 default_version_info->next = first_v;
37335 default_version_info->prev = NULL;
37336 }
37337
37338 default_node = default_version_info->this_node;
37339
37340 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37341 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37342 "target_clones attribute needs GLIBC (2.23 and newer) that "
37343 "exports hardware capability bits");
37344 #else
37345
37346 if (targetm.has_ifunc_p ())
37347 {
37348 struct cgraph_function_version_info *it_v = NULL;
37349 struct cgraph_node *dispatcher_node = NULL;
37350 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37351
37352 /* Right now, the dispatching is done via ifunc. */
37353 dispatch_decl = make_dispatcher_decl (default_node->decl);
37354
37355 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37356 gcc_assert (dispatcher_node != NULL);
37357 dispatcher_node->dispatcher_function = 1;
37358 dispatcher_version_info
37359 = dispatcher_node->insert_new_function_version ();
37360 dispatcher_version_info->next = default_version_info;
37361 dispatcher_node->definition = 1;
37362
37363 /* Set the dispatcher for all the versions. */
37364 it_v = default_version_info;
37365 while (it_v != NULL)
37366 {
37367 it_v->dispatcher_resolver = dispatch_decl;
37368 it_v = it_v->next;
37369 }
37370 }
37371 else
37372 {
37373 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37374 "multiversioning needs ifunc which is not supported "
37375 "on this target");
37376 }
37377 #endif
37378
37379 return dispatch_decl;
37380 }
37381
37382 /* Make the resolver function decl to dispatch the versions of a multi-
37383 versioned function, DEFAULT_DECL. Create an empty basic block in the
37384 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37385 function. */
37386
37387 static tree
37388 make_resolver_func (const tree default_decl,
37389 const tree dispatch_decl,
37390 basic_block *empty_bb)
37391 {
37392 /* Make the resolver function static. The resolver function returns
37393 void *. */
37394 tree decl_name = clone_function_name (default_decl, "resolver");
37395 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37396 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37397 tree decl = build_fn_decl (resolver_name, type);
37398 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37399
37400 DECL_NAME (decl) = decl_name;
37401 TREE_USED (decl) = 1;
37402 DECL_ARTIFICIAL (decl) = 1;
37403 DECL_IGNORED_P (decl) = 0;
37404 TREE_PUBLIC (decl) = 0;
37405 DECL_UNINLINABLE (decl) = 1;
37406
37407 /* Resolver is not external, body is generated. */
37408 DECL_EXTERNAL (decl) = 0;
37409 DECL_EXTERNAL (dispatch_decl) = 0;
37410
37411 DECL_CONTEXT (decl) = NULL_TREE;
37412 DECL_INITIAL (decl) = make_node (BLOCK);
37413 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37414
37415 /* Build result decl and add to function_decl. */
37416 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37417 DECL_ARTIFICIAL (t) = 1;
37418 DECL_IGNORED_P (t) = 1;
37419 DECL_RESULT (decl) = t;
37420
37421 gimplify_function_tree (decl);
37422 push_cfun (DECL_STRUCT_FUNCTION (decl));
37423 *empty_bb = init_lowered_empty_function (decl, false,
37424 profile_count::uninitialized ());
37425
37426 cgraph_node::add_new_function (decl, true);
37427 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37428
37429 pop_cfun ();
37430
37431 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37432 DECL_ATTRIBUTES (dispatch_decl)
37433 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37434
37435 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37436
37437 return decl;
37438 }
37439
37440 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37441 return a pointer to VERSION_DECL if we are running on a machine that
37442 supports the index CLONE_ISA hardware architecture bits. This function will
37443 be called during version dispatch to decide which function version to
37444 execute. It returns the basic block at the end, to which more conditions
37445 can be added. */
37446
37447 static basic_block
37448 add_condition_to_bb (tree function_decl, tree version_decl,
37449 int clone_isa, basic_block new_bb)
37450 {
37451 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37452
37453 gcc_assert (new_bb != NULL);
37454 gimple_seq gseq = bb_seq (new_bb);
37455
37456
37457 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37458 build_fold_addr_expr (version_decl));
37459 tree result_var = create_tmp_var (ptr_type_node);
37460 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37461 gimple *return_stmt = gimple_build_return (result_var);
37462
37463 if (clone_isa == CLONE_DEFAULT)
37464 {
37465 gimple_seq_add_stmt (&gseq, convert_stmt);
37466 gimple_seq_add_stmt (&gseq, return_stmt);
37467 set_bb_seq (new_bb, gseq);
37468 gimple_set_bb (convert_stmt, new_bb);
37469 gimple_set_bb (return_stmt, new_bb);
37470 pop_cfun ();
37471 return new_bb;
37472 }
37473
37474 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37475 tree cond_var = create_tmp_var (bool_int_type_node);
37476 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37477 const char *arg_str = rs6000_clone_map[clone_isa].name;
37478 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37479 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37480 gimple_call_set_lhs (call_cond_stmt, cond_var);
37481
37482 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37483 gimple_set_bb (call_cond_stmt, new_bb);
37484 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37485
37486 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37487 NULL_TREE, NULL_TREE);
37488 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37489 gimple_set_bb (if_else_stmt, new_bb);
37490 gimple_seq_add_stmt (&gseq, if_else_stmt);
37491
37492 gimple_seq_add_stmt (&gseq, convert_stmt);
37493 gimple_seq_add_stmt (&gseq, return_stmt);
37494 set_bb_seq (new_bb, gseq);
37495
37496 basic_block bb1 = new_bb;
37497 edge e12 = split_block (bb1, if_else_stmt);
37498 basic_block bb2 = e12->dest;
37499 e12->flags &= ~EDGE_FALLTHRU;
37500 e12->flags |= EDGE_TRUE_VALUE;
37501
37502 edge e23 = split_block (bb2, return_stmt);
37503 gimple_set_bb (convert_stmt, bb2);
37504 gimple_set_bb (return_stmt, bb2);
37505
37506 basic_block bb3 = e23->dest;
37507 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37508
37509 remove_edge (e23);
37510 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37511
37512 pop_cfun ();
37513 return bb3;
37514 }
37515
37516 /* This function generates the dispatch function for multi-versioned functions.
37517 DISPATCH_DECL is the function which will contain the dispatch logic.
37518 FNDECLS are the function choices for dispatch, and is a tree chain.
37519 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37520 code is generated. */
37521
37522 static int
37523 dispatch_function_versions (tree dispatch_decl,
37524 void *fndecls_p,
37525 basic_block *empty_bb)
37526 {
37527 int ix;
37528 tree ele;
37529 vec<tree> *fndecls;
37530 tree clones[CLONE_MAX];
37531
37532 if (TARGET_DEBUG_TARGET)
37533 fputs ("dispatch_function_versions, top\n", stderr);
37534
37535 gcc_assert (dispatch_decl != NULL
37536 && fndecls_p != NULL
37537 && empty_bb != NULL);
37538
37539 /* fndecls_p is actually a vector. */
37540 fndecls = static_cast<vec<tree> *> (fndecls_p);
37541
37542 /* At least one more version other than the default. */
37543 gcc_assert (fndecls->length () >= 2);
37544
37545 /* The first version in the vector is the default decl. */
37546 memset ((void *) clones, '\0', sizeof (clones));
37547 clones[CLONE_DEFAULT] = (*fndecls)[0];
37548
37549 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37550 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37551 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37552 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37553 to insert the code here to do the call. */
37554
37555 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37556 {
37557 int priority = rs6000_clone_priority (ele);
37558 if (!clones[priority])
37559 clones[priority] = ele;
37560 }
37561
37562 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37563 if (clones[ix])
37564 {
37565 if (TARGET_DEBUG_TARGET)
37566 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37567 ix, get_decl_name (clones[ix]));
37568
37569 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37570 *empty_bb);
37571 }
37572
37573 return 0;
37574 }
37575
37576 /* Generate the dispatching code body to dispatch multi-versioned function
37577 DECL. The target hook is called to process the "target" attributes and
37578 provide the code to dispatch the right function at run-time. NODE points
37579 to the dispatcher decl whose body will be created. */
37580
37581 static tree
37582 rs6000_generate_version_dispatcher_body (void *node_p)
37583 {
37584 tree resolver;
37585 basic_block empty_bb;
37586 struct cgraph_node *node = (cgraph_node *) node_p;
37587 struct cgraph_function_version_info *ninfo = node->function_version ();
37588
37589 if (ninfo->dispatcher_resolver)
37590 return ninfo->dispatcher_resolver;
37591
37592 /* node is going to be an alias, so remove the finalized bit. */
37593 node->definition = false;
37594
37595 /* The first version in the chain corresponds to the default version. */
37596 ninfo->dispatcher_resolver = resolver
37597 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37598
37599 if (TARGET_DEBUG_TARGET)
37600 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37601 get_decl_name (resolver));
37602
37603 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37604 auto_vec<tree, 2> fn_ver_vec;
37605
37606 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37607 vinfo;
37608 vinfo = vinfo->next)
37609 {
37610 struct cgraph_node *version = vinfo->this_node;
37611 /* Check for virtual functions here again, as by this time it should
37612 have been determined if this function needs a vtable index or
37613 not. This happens for methods in derived classes that override
37614 virtual methods in base classes but are not explicitly marked as
37615 virtual. */
37616 if (DECL_VINDEX (version->decl))
37617 sorry ("Virtual function multiversioning not supported");
37618
37619 fn_ver_vec.safe_push (version->decl);
37620 }
37621
37622 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37623 cgraph_edge::rebuild_edges ();
37624 pop_cfun ();
37625 return resolver;
37626 }
37627
37628 \f
37629 /* Hook to determine if one function can safely inline another. */
37630
37631 static bool
37632 rs6000_can_inline_p (tree caller, tree callee)
37633 {
37634 bool ret = false;
37635 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37636 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37637
37638 /* If callee has no option attributes, then it is ok to inline. */
37639 if (!callee_tree)
37640 ret = true;
37641
37642 /* If caller has no option attributes, but callee does then it is not ok to
37643 inline. */
37644 else if (!caller_tree)
37645 ret = false;
37646
37647 else
37648 {
37649 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37650 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37651
37652 /* Callee's options should a subset of the caller's, i.e. a vsx function
37653 can inline an altivec function but a non-vsx function can't inline a
37654 vsx function. */
37655 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37656 == callee_opts->x_rs6000_isa_flags)
37657 ret = true;
37658 }
37659
37660 if (TARGET_DEBUG_TARGET)
37661 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37662 get_decl_name (caller), get_decl_name (callee),
37663 (ret ? "can" : "cannot"));
37664
37665 return ret;
37666 }
37667 \f
37668 /* Allocate a stack temp and fixup the address so it meets the particular
37669 memory requirements (either offetable or REG+REG addressing). */
37670
37671 rtx
37672 rs6000_allocate_stack_temp (machine_mode mode,
37673 bool offsettable_p,
37674 bool reg_reg_p)
37675 {
37676 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37677 rtx addr = XEXP (stack, 0);
37678 int strict_p = reload_completed;
37679
37680 if (!legitimate_indirect_address_p (addr, strict_p))
37681 {
37682 if (offsettable_p
37683 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37684 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37685
37686 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37687 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37688 }
37689
37690 return stack;
37691 }
37692
37693 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37694 convert to such a form to deal with memory reference instructions
37695 like STFIWX and LDBRX that only take reg+reg addressing. */
37696
37697 rtx
37698 rs6000_force_indexed_or_indirect_mem (rtx x)
37699 {
37700 machine_mode mode = GET_MODE (x);
37701
37702 gcc_assert (MEM_P (x));
37703 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37704 {
37705 rtx addr = XEXP (x, 0);
37706 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37707 {
37708 rtx reg = XEXP (addr, 0);
37709 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37710 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37711 gcc_assert (REG_P (reg));
37712 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37713 addr = reg;
37714 }
37715 else if (GET_CODE (addr) == PRE_MODIFY)
37716 {
37717 rtx reg = XEXP (addr, 0);
37718 rtx expr = XEXP (addr, 1);
37719 gcc_assert (REG_P (reg));
37720 gcc_assert (GET_CODE (expr) == PLUS);
37721 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37722 addr = reg;
37723 }
37724
37725 x = replace_equiv_address (x, force_reg (Pmode, addr));
37726 }
37727
37728 return x;
37729 }
37730
37731 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37732
37733 On the RS/6000, all integer constants are acceptable, most won't be valid
37734 for particular insns, though. Only easy FP constants are acceptable. */
37735
37736 static bool
37737 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37738 {
37739 if (TARGET_ELF && tls_referenced_p (x))
37740 return false;
37741
37742 if (CONST_DOUBLE_P (x))
37743 return easy_fp_constant (x, mode);
37744
37745 if (GET_CODE (x) == CONST_VECTOR)
37746 return easy_vector_constant (x, mode);
37747
37748 return true;
37749 }
37750
37751 \f
37752 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37753
37754 static bool
37755 chain_already_loaded (rtx_insn *last)
37756 {
37757 for (; last != NULL; last = PREV_INSN (last))
37758 {
37759 if (NONJUMP_INSN_P (last))
37760 {
37761 rtx patt = PATTERN (last);
37762
37763 if (GET_CODE (patt) == SET)
37764 {
37765 rtx lhs = XEXP (patt, 0);
37766
37767 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37768 return true;
37769 }
37770 }
37771 }
37772 return false;
37773 }
37774
37775 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37776
37777 void
37778 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37779 {
37780 rtx func = func_desc;
37781 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37782 rtx toc_load = NULL_RTX;
37783 rtx toc_restore = NULL_RTX;
37784 rtx func_addr;
37785 rtx abi_reg = NULL_RTX;
37786 rtx call[4];
37787 int n_call;
37788 rtx insn;
37789
37790 if (global_tlsarg)
37791 tlsarg = global_tlsarg;
37792
37793 /* Handle longcall attributes. */
37794 if ((INTVAL (cookie) & CALL_LONG) != 0
37795 && GET_CODE (func_desc) == SYMBOL_REF)
37796 func = rs6000_longcall_ref (func_desc, tlsarg);
37797
37798 /* Handle indirect calls. */
37799 if (GET_CODE (func) != SYMBOL_REF
37800 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37801 {
37802 /* Save the TOC into its reserved slot before the call,
37803 and prepare to restore it after the call. */
37804 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37805 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37806 gen_rtvec (1, stack_toc_offset),
37807 UNSPEC_TOCSLOT);
37808 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37809
37810 /* Can we optimize saving the TOC in the prologue or
37811 do we need to do it at every call? */
37812 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37813 cfun->machine->save_toc_in_prologue = true;
37814 else
37815 {
37816 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37817 rtx stack_toc_mem = gen_frame_mem (Pmode,
37818 gen_rtx_PLUS (Pmode, stack_ptr,
37819 stack_toc_offset));
37820 MEM_VOLATILE_P (stack_toc_mem) = 1;
37821 if (HAVE_AS_PLTSEQ
37822 && TARGET_TLS_MARKERS
37823 && DEFAULT_ABI == ABI_ELFv2
37824 && GET_CODE (func_desc) == SYMBOL_REF)
37825 {
37826 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37827 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37828 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37829 }
37830 else
37831 emit_move_insn (stack_toc_mem, toc_reg);
37832 }
37833
37834 if (DEFAULT_ABI == ABI_ELFv2)
37835 {
37836 /* A function pointer in the ELFv2 ABI is just a plain address, but
37837 the ABI requires it to be loaded into r12 before the call. */
37838 func_addr = gen_rtx_REG (Pmode, 12);
37839 if (!rtx_equal_p (func_addr, func))
37840 emit_move_insn (func_addr, func);
37841 abi_reg = func_addr;
37842 /* Indirect calls via CTR are strongly preferred over indirect
37843 calls via LR, so move the address there. Needed to mark
37844 this insn for linker plt sequence editing too. */
37845 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37846 if (HAVE_AS_PLTSEQ
37847 && TARGET_TLS_MARKERS
37848 && GET_CODE (func_desc) == SYMBOL_REF)
37849 {
37850 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37851 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37852 emit_insn (gen_rtx_SET (func_addr, mark_func));
37853 v = gen_rtvec (2, func_addr, func_desc);
37854 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37855 }
37856 else
37857 emit_move_insn (func_addr, abi_reg);
37858 }
37859 else
37860 {
37861 /* A function pointer under AIX is a pointer to a data area whose
37862 first word contains the actual address of the function, whose
37863 second word contains a pointer to its TOC, and whose third word
37864 contains a value to place in the static chain register (r11).
37865 Note that if we load the static chain, our "trampoline" need
37866 not have any executable code. */
37867
37868 /* Load up address of the actual function. */
37869 func = force_reg (Pmode, func);
37870 func_addr = gen_reg_rtx (Pmode);
37871 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37872
37873 /* Indirect calls via CTR are strongly preferred over indirect
37874 calls via LR, so move the address there. */
37875 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37876 emit_move_insn (ctr_reg, func_addr);
37877 func_addr = ctr_reg;
37878
37879 /* Prepare to load the TOC of the called function. Note that the
37880 TOC load must happen immediately before the actual call so
37881 that unwinding the TOC registers works correctly. See the
37882 comment in frob_update_context. */
37883 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37884 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37885 gen_rtx_PLUS (Pmode, func,
37886 func_toc_offset));
37887 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37888
37889 /* If we have a static chain, load it up. But, if the call was
37890 originally direct, the 3rd word has not been written since no
37891 trampoline has been built, so we ought not to load it, lest we
37892 override a static chain value. */
37893 if (!(GET_CODE (func_desc) == SYMBOL_REF
37894 && SYMBOL_REF_FUNCTION_P (func_desc))
37895 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37896 && !chain_already_loaded (get_current_sequence ()->next->last))
37897 {
37898 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37899 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37900 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37901 gen_rtx_PLUS (Pmode, func,
37902 func_sc_offset));
37903 emit_move_insn (sc_reg, func_sc_mem);
37904 abi_reg = sc_reg;
37905 }
37906 }
37907 }
37908 else
37909 {
37910 /* Direct calls use the TOC: for local calls, the callee will
37911 assume the TOC register is set; for non-local calls, the
37912 PLT stub needs the TOC register. */
37913 abi_reg = toc_reg;
37914 func_addr = func;
37915 }
37916
37917 /* Create the call. */
37918 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37919 if (value != NULL_RTX)
37920 call[0] = gen_rtx_SET (value, call[0]);
37921 n_call = 1;
37922
37923 if (toc_load)
37924 call[n_call++] = toc_load;
37925 if (toc_restore)
37926 call[n_call++] = toc_restore;
37927
37928 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37929
37930 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37931 insn = emit_call_insn (insn);
37932
37933 /* Mention all registers defined by the ABI to hold information
37934 as uses in CALL_INSN_FUNCTION_USAGE. */
37935 if (abi_reg)
37936 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37937 }
37938
37939 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37940
37941 void
37942 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37943 {
37944 rtx call[2];
37945 rtx insn;
37946
37947 gcc_assert (INTVAL (cookie) == 0);
37948
37949 if (global_tlsarg)
37950 tlsarg = global_tlsarg;
37951
37952 /* Create the call. */
37953 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
37954 if (value != NULL_RTX)
37955 call[0] = gen_rtx_SET (value, call[0]);
37956
37957 call[1] = simple_return_rtx;
37958
37959 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37960 insn = emit_call_insn (insn);
37961
37962 /* Note use of the TOC register. */
37963 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37964 }
37965
37966 /* Expand code to perform a call under the SYSV4 ABI. */
37967
37968 void
37969 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37970 {
37971 rtx func = func_desc;
37972 rtx func_addr;
37973 rtx call[3];
37974 rtx insn;
37975 rtx abi_reg = NULL_RTX;
37976
37977 if (global_tlsarg)
37978 tlsarg = global_tlsarg;
37979
37980 /* Handle longcall attributes. */
37981 if ((INTVAL (cookie) & CALL_LONG) != 0
37982 && GET_CODE (func_desc) == SYMBOL_REF)
37983 {
37984 func = rs6000_longcall_ref (func_desc, tlsarg);
37985 /* If the longcall was implemented using PLT16 relocs, then r11
37986 needs to be valid at the call for lazy linking. */
37987 if (HAVE_AS_PLTSEQ
37988 && TARGET_TLS_MARKERS)
37989 abi_reg = func;
37990 }
37991
37992 /* Handle indirect calls. */
37993 if (GET_CODE (func) != SYMBOL_REF)
37994 {
37995 func = force_reg (Pmode, func);
37996
37997 /* Indirect calls via CTR are strongly preferred over indirect
37998 calls via LR, so move the address there. Needed to mark
37999 this insn for linker plt sequence editing too. */
38000 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38001 if (HAVE_AS_PLTSEQ
38002 && TARGET_TLS_MARKERS
38003 && GET_CODE (func_desc) == SYMBOL_REF)
38004 {
38005 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38006 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38007 emit_insn (gen_rtx_SET (func_addr, mark_func));
38008 v = gen_rtvec (2, func_addr, func_desc);
38009 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38010 }
38011 else
38012 emit_move_insn (func_addr, func);
38013 }
38014 else
38015 func_addr = func;
38016
38017 /* Create the call. */
38018 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38019 if (value != NULL_RTX)
38020 call[0] = gen_rtx_SET (value, call[0]);
38021
38022 unsigned int mask = CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS;
38023 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (INTVAL (cookie) & mask));
38024 call[2] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
38025
38026 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38027 insn = emit_call_insn (insn);
38028 if (abi_reg)
38029 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38030 }
38031
38032 /* Expand code to perform a sibling call under the SysV4 ABI. */
38033
38034 void
38035 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38036 {
38037 rtx func = func_desc;
38038 rtx func_addr;
38039 rtx call[3];
38040 rtx insn;
38041 rtx abi_reg = NULL_RTX;
38042
38043 if (global_tlsarg)
38044 tlsarg = global_tlsarg;
38045
38046 /* Handle longcall attributes. */
38047 if ((INTVAL (cookie) & CALL_LONG) != 0
38048 && GET_CODE (func_desc) == SYMBOL_REF)
38049 {
38050 func = rs6000_longcall_ref (func_desc, tlsarg);
38051 /* If the longcall was implemented using PLT16 relocs, then r11
38052 needs to be valid at the call for lazy linking. */
38053 if (HAVE_AS_PLTSEQ
38054 && TARGET_TLS_MARKERS)
38055 abi_reg = func;
38056 }
38057
38058 /* Handle indirect calls. */
38059 if (GET_CODE (func) != SYMBOL_REF)
38060 {
38061 func = force_reg (Pmode, func);
38062
38063 /* Indirect sibcalls must go via CTR. Needed to mark
38064 this insn for linker plt sequence editing too. */
38065 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38066 if (HAVE_AS_PLTSEQ
38067 && TARGET_TLS_MARKERS
38068 && GET_CODE (func_desc) == SYMBOL_REF)
38069 {
38070 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38071 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38072 emit_insn (gen_rtx_SET (func_addr, mark_func));
38073 v = gen_rtvec (2, func_addr, func_desc);
38074 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38075 }
38076 else
38077 emit_move_insn (func_addr, func);
38078 }
38079 else
38080 func_addr = func;
38081
38082 /* Create the call. */
38083 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38084 if (value != NULL_RTX)
38085 call[0] = gen_rtx_SET (value, call[0]);
38086
38087 unsigned int mask = CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS;
38088 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (INTVAL (cookie) & mask));
38089 call[2] = simple_return_rtx;
38090
38091 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38092 insn = emit_call_insn (insn);
38093 if (abi_reg)
38094 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38095 }
38096
38097 #if TARGET_MACHO
38098
38099 /* Expand code to perform a call under the Darwin ABI.
38100 Modulo handling of mlongcall, this is much the same as sysv.
38101 if/when the longcall optimisation is removed, we could drop this
38102 code and use the sysv case (taking care to avoid the tls stuff).
38103
38104 We can use this for sibcalls too, if needed. */
38105
38106 void
38107 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38108 rtx cookie, bool sibcall)
38109 {
38110 rtx func = func_desc;
38111 rtx func_addr;
38112 rtx call[3];
38113 rtx insn;
38114 int cookie_val = INTVAL (cookie);
38115 bool make_island = false;
38116
38117 /* Handle longcall attributes, there are two cases for Darwin:
38118 1) Newer linkers are capable of synthesising any branch islands needed.
38119 2) We need a helper branch island synthesised by the compiler.
38120 The second case has mostly been retired and we don't use it for m64.
38121 In fact, it's is an optimisation, we could just indirect as sysv does..
38122 ... however, backwards compatibility for now.
38123 If we're going to use this, then we need to keep the CALL_LONG bit set,
38124 so that we can pick up the special insn form later. */
38125 if ((cookie_val & CALL_LONG) != 0
38126 && GET_CODE (func_desc) == SYMBOL_REF)
38127 {
38128 if (darwin_emit_branch_islands && TARGET_32BIT)
38129 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38130 else
38131 {
38132 /* The linker is capable of doing this, but the user explicitly
38133 asked for -mlongcall, so we'll do the 'normal' version. */
38134 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38135 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38136 }
38137 }
38138
38139 /* Handle indirect calls. */
38140 if (GET_CODE (func) != SYMBOL_REF)
38141 {
38142 func = force_reg (Pmode, func);
38143
38144 /* Indirect calls via CTR are strongly preferred over indirect
38145 calls via LR, and are required for indirect sibcalls, so move
38146 the address there. */
38147 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38148 emit_move_insn (func_addr, func);
38149 }
38150 else
38151 func_addr = func;
38152
38153 /* Create the call. */
38154 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38155 if (value != NULL_RTX)
38156 call[0] = gen_rtx_SET (value, call[0]);
38157
38158 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38159
38160 if (sibcall)
38161 call[2] = simple_return_rtx;
38162 else
38163 call[2] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
38164
38165 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38166 insn = emit_call_insn (insn);
38167 /* Now we have the debug info in the insn, we can set up the branch island
38168 if we're using one. */
38169 if (make_island)
38170 {
38171 tree funname = get_identifier (XSTR (func_desc, 0));
38172
38173 if (no_previous_def (funname))
38174 {
38175 rtx label_rtx = gen_label_rtx ();
38176 char *label_buf, temp_buf[256];
38177 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38178 CODE_LABEL_NUMBER (label_rtx));
38179 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38180 tree labelname = get_identifier (label_buf);
38181 add_compiler_branch_island (labelname, funname,
38182 insn_line ((const rtx_insn*)insn));
38183 }
38184 }
38185 }
38186 #endif
38187
38188 void
38189 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38190 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38191 {
38192 #if TARGET_MACHO
38193 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38194 #else
38195 gcc_unreachable();
38196 #endif
38197 }
38198
38199
38200 void
38201 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38202 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38203 {
38204 #if TARGET_MACHO
38205 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38206 #else
38207 gcc_unreachable();
38208 #endif
38209 }
38210
38211
38212 /* Return whether we need to always update the saved TOC pointer when we update
38213 the stack pointer. */
38214
38215 static bool
38216 rs6000_save_toc_in_prologue_p (void)
38217 {
38218 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38219 }
38220
38221 #ifdef HAVE_GAS_HIDDEN
38222 # define USE_HIDDEN_LINKONCE 1
38223 #else
38224 # define USE_HIDDEN_LINKONCE 0
38225 #endif
38226
38227 /* Fills in the label name that should be used for a 476 link stack thunk. */
38228
38229 void
38230 get_ppc476_thunk_name (char name[32])
38231 {
38232 gcc_assert (TARGET_LINK_STACK);
38233
38234 if (USE_HIDDEN_LINKONCE)
38235 sprintf (name, "__ppc476.get_thunk");
38236 else
38237 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38238 }
38239
38240 /* This function emits the simple thunk routine that is used to preserve
38241 the link stack on the 476 cpu. */
38242
38243 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38244 static void
38245 rs6000_code_end (void)
38246 {
38247 char name[32];
38248 tree decl;
38249
38250 if (!TARGET_LINK_STACK)
38251 return;
38252
38253 get_ppc476_thunk_name (name);
38254
38255 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38256 build_function_type_list (void_type_node, NULL_TREE));
38257 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38258 NULL_TREE, void_type_node);
38259 TREE_PUBLIC (decl) = 1;
38260 TREE_STATIC (decl) = 1;
38261
38262 #if RS6000_WEAK
38263 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38264 {
38265 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38266 targetm.asm_out.unique_section (decl, 0);
38267 switch_to_section (get_named_section (decl, NULL, 0));
38268 DECL_WEAK (decl) = 1;
38269 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38270 targetm.asm_out.globalize_label (asm_out_file, name);
38271 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38272 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38273 }
38274 else
38275 #endif
38276 {
38277 switch_to_section (text_section);
38278 ASM_OUTPUT_LABEL (asm_out_file, name);
38279 }
38280
38281 DECL_INITIAL (decl) = make_node (BLOCK);
38282 current_function_decl = decl;
38283 allocate_struct_function (decl, false);
38284 init_function_start (decl);
38285 first_function_block_is_cold = false;
38286 /* Make sure unwind info is emitted for the thunk if needed. */
38287 final_start_function (emit_barrier (), asm_out_file, 1);
38288
38289 fputs ("\tblr\n", asm_out_file);
38290
38291 final_end_function ();
38292 init_insn_lengths ();
38293 free_after_compilation (cfun);
38294 set_cfun (NULL);
38295 current_function_decl = NULL;
38296 }
38297
38298 /* Add r30 to hard reg set if the prologue sets it up and it is not
38299 pic_offset_table_rtx. */
38300
38301 static void
38302 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38303 {
38304 if (!TARGET_SINGLE_PIC_BASE
38305 && TARGET_TOC
38306 && TARGET_MINIMAL_TOC
38307 && !constant_pool_empty_p ())
38308 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38309 if (cfun->machine->split_stack_argp_used)
38310 add_to_hard_reg_set (&set->set, Pmode, 12);
38311
38312 /* Make sure the hard reg set doesn't include r2, which was possibly added
38313 via PIC_OFFSET_TABLE_REGNUM. */
38314 if (TARGET_TOC)
38315 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38316 }
38317
38318 \f
38319 /* Helper function for rs6000_split_logical to emit a logical instruction after
38320 spliting the operation to single GPR registers.
38321
38322 DEST is the destination register.
38323 OP1 and OP2 are the input source registers.
38324 CODE is the base operation (AND, IOR, XOR, NOT).
38325 MODE is the machine mode.
38326 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38327 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38328 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38329
38330 static void
38331 rs6000_split_logical_inner (rtx dest,
38332 rtx op1,
38333 rtx op2,
38334 enum rtx_code code,
38335 machine_mode mode,
38336 bool complement_final_p,
38337 bool complement_op1_p,
38338 bool complement_op2_p)
38339 {
38340 rtx bool_rtx;
38341
38342 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38343 if (op2 && GET_CODE (op2) == CONST_INT
38344 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38345 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38346 {
38347 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38348 HOST_WIDE_INT value = INTVAL (op2) & mask;
38349
38350 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38351 if (code == AND)
38352 {
38353 if (value == 0)
38354 {
38355 emit_insn (gen_rtx_SET (dest, const0_rtx));
38356 return;
38357 }
38358
38359 else if (value == mask)
38360 {
38361 if (!rtx_equal_p (dest, op1))
38362 emit_insn (gen_rtx_SET (dest, op1));
38363 return;
38364 }
38365 }
38366
38367 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38368 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38369 else if (code == IOR || code == XOR)
38370 {
38371 if (value == 0)
38372 {
38373 if (!rtx_equal_p (dest, op1))
38374 emit_insn (gen_rtx_SET (dest, op1));
38375 return;
38376 }
38377 }
38378 }
38379
38380 if (code == AND && mode == SImode
38381 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38382 {
38383 emit_insn (gen_andsi3 (dest, op1, op2));
38384 return;
38385 }
38386
38387 if (complement_op1_p)
38388 op1 = gen_rtx_NOT (mode, op1);
38389
38390 if (complement_op2_p)
38391 op2 = gen_rtx_NOT (mode, op2);
38392
38393 /* For canonical RTL, if only one arm is inverted it is the first. */
38394 if (!complement_op1_p && complement_op2_p)
38395 std::swap (op1, op2);
38396
38397 bool_rtx = ((code == NOT)
38398 ? gen_rtx_NOT (mode, op1)
38399 : gen_rtx_fmt_ee (code, mode, op1, op2));
38400
38401 if (complement_final_p)
38402 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38403
38404 emit_insn (gen_rtx_SET (dest, bool_rtx));
38405 }
38406
38407 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38408 operations are split immediately during RTL generation to allow for more
38409 optimizations of the AND/IOR/XOR.
38410
38411 OPERANDS is an array containing the destination and two input operands.
38412 CODE is the base operation (AND, IOR, XOR, NOT).
38413 MODE is the machine mode.
38414 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38415 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38416 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38417 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38418 formation of the AND instructions. */
38419
38420 static void
38421 rs6000_split_logical_di (rtx operands[3],
38422 enum rtx_code code,
38423 bool complement_final_p,
38424 bool complement_op1_p,
38425 bool complement_op2_p)
38426 {
38427 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38428 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38429 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38430 enum hi_lo { hi = 0, lo = 1 };
38431 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38432 size_t i;
38433
38434 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38435 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38436 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38437 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38438
38439 if (code == NOT)
38440 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38441 else
38442 {
38443 if (GET_CODE (operands[2]) != CONST_INT)
38444 {
38445 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38446 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38447 }
38448 else
38449 {
38450 HOST_WIDE_INT value = INTVAL (operands[2]);
38451 HOST_WIDE_INT value_hi_lo[2];
38452
38453 gcc_assert (!complement_final_p);
38454 gcc_assert (!complement_op1_p);
38455 gcc_assert (!complement_op2_p);
38456
38457 value_hi_lo[hi] = value >> 32;
38458 value_hi_lo[lo] = value & lower_32bits;
38459
38460 for (i = 0; i < 2; i++)
38461 {
38462 HOST_WIDE_INT sub_value = value_hi_lo[i];
38463
38464 if (sub_value & sign_bit)
38465 sub_value |= upper_32bits;
38466
38467 op2_hi_lo[i] = GEN_INT (sub_value);
38468
38469 /* If this is an AND instruction, check to see if we need to load
38470 the value in a register. */
38471 if (code == AND && sub_value != -1 && sub_value != 0
38472 && !and_operand (op2_hi_lo[i], SImode))
38473 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38474 }
38475 }
38476 }
38477
38478 for (i = 0; i < 2; i++)
38479 {
38480 /* Split large IOR/XOR operations. */
38481 if ((code == IOR || code == XOR)
38482 && GET_CODE (op2_hi_lo[i]) == CONST_INT
38483 && !complement_final_p
38484 && !complement_op1_p
38485 && !complement_op2_p
38486 && !logical_const_operand (op2_hi_lo[i], SImode))
38487 {
38488 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38489 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38490 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38491 rtx tmp = gen_reg_rtx (SImode);
38492
38493 /* Make sure the constant is sign extended. */
38494 if ((hi_16bits & sign_bit) != 0)
38495 hi_16bits |= upper_32bits;
38496
38497 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38498 code, SImode, false, false, false);
38499
38500 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38501 code, SImode, false, false, false);
38502 }
38503 else
38504 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38505 code, SImode, complement_final_p,
38506 complement_op1_p, complement_op2_p);
38507 }
38508
38509 return;
38510 }
38511
38512 /* Split the insns that make up boolean operations operating on multiple GPR
38513 registers. The boolean MD patterns ensure that the inputs either are
38514 exactly the same as the output registers, or there is no overlap.
38515
38516 OPERANDS is an array containing the destination and two input operands.
38517 CODE is the base operation (AND, IOR, XOR, NOT).
38518 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38519 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38520 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38521
38522 void
38523 rs6000_split_logical (rtx operands[3],
38524 enum rtx_code code,
38525 bool complement_final_p,
38526 bool complement_op1_p,
38527 bool complement_op2_p)
38528 {
38529 machine_mode mode = GET_MODE (operands[0]);
38530 machine_mode sub_mode;
38531 rtx op0, op1, op2;
38532 int sub_size, regno0, regno1, nregs, i;
38533
38534 /* If this is DImode, use the specialized version that can run before
38535 register allocation. */
38536 if (mode == DImode && !TARGET_POWERPC64)
38537 {
38538 rs6000_split_logical_di (operands, code, complement_final_p,
38539 complement_op1_p, complement_op2_p);
38540 return;
38541 }
38542
38543 op0 = operands[0];
38544 op1 = operands[1];
38545 op2 = (code == NOT) ? NULL_RTX : operands[2];
38546 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38547 sub_size = GET_MODE_SIZE (sub_mode);
38548 regno0 = REGNO (op0);
38549 regno1 = REGNO (op1);
38550
38551 gcc_assert (reload_completed);
38552 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38553 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38554
38555 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38556 gcc_assert (nregs > 1);
38557
38558 if (op2 && REG_P (op2))
38559 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38560
38561 for (i = 0; i < nregs; i++)
38562 {
38563 int offset = i * sub_size;
38564 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38565 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38566 rtx sub_op2 = ((code == NOT)
38567 ? NULL_RTX
38568 : simplify_subreg (sub_mode, op2, mode, offset));
38569
38570 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38571 complement_final_p, complement_op1_p,
38572 complement_op2_p);
38573 }
38574
38575 return;
38576 }
38577
38578 \f
38579 /* Return true if the peephole2 can combine a load involving a combination of
38580 an addis instruction and a load with an offset that can be fused together on
38581 a power8. */
38582
38583 bool
38584 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38585 rtx addis_value, /* addis value. */
38586 rtx target, /* target register that is loaded. */
38587 rtx mem) /* bottom part of the memory addr. */
38588 {
38589 rtx addr;
38590 rtx base_reg;
38591
38592 /* Validate arguments. */
38593 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38594 return false;
38595
38596 if (!base_reg_operand (target, GET_MODE (target)))
38597 return false;
38598
38599 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38600 return false;
38601
38602 /* Allow sign/zero extension. */
38603 if (GET_CODE (mem) == ZERO_EXTEND
38604 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38605 mem = XEXP (mem, 0);
38606
38607 if (!MEM_P (mem))
38608 return false;
38609
38610 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38611 return false;
38612
38613 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38614 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38615 return false;
38616
38617 /* Validate that the register used to load the high value is either the
38618 register being loaded, or we can safely replace its use.
38619
38620 This function is only called from the peephole2 pass and we assume that
38621 there are 2 instructions in the peephole (addis and load), so we want to
38622 check if the target register was not used in the memory address and the
38623 register to hold the addis result is dead after the peephole. */
38624 if (REGNO (addis_reg) != REGNO (target))
38625 {
38626 if (reg_mentioned_p (target, mem))
38627 return false;
38628
38629 if (!peep2_reg_dead_p (2, addis_reg))
38630 return false;
38631
38632 /* If the target register being loaded is the stack pointer, we must
38633 avoid loading any other value into it, even temporarily. */
38634 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38635 return false;
38636 }
38637
38638 base_reg = XEXP (addr, 0);
38639 return REGNO (addis_reg) == REGNO (base_reg);
38640 }
38641
38642 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38643 sequence. We adjust the addis register to use the target register. If the
38644 load sign extends, we adjust the code to do the zero extending load, and an
38645 explicit sign extension later since the fusion only covers zero extending
38646 loads.
38647
38648 The operands are:
38649 operands[0] register set with addis (to be replaced with target)
38650 operands[1] value set via addis
38651 operands[2] target register being loaded
38652 operands[3] D-form memory reference using operands[0]. */
38653
38654 void
38655 expand_fusion_gpr_load (rtx *operands)
38656 {
38657 rtx addis_value = operands[1];
38658 rtx target = operands[2];
38659 rtx orig_mem = operands[3];
38660 rtx new_addr, new_mem, orig_addr, offset;
38661 enum rtx_code plus_or_lo_sum;
38662 machine_mode target_mode = GET_MODE (target);
38663 machine_mode extend_mode = target_mode;
38664 machine_mode ptr_mode = Pmode;
38665 enum rtx_code extend = UNKNOWN;
38666
38667 if (GET_CODE (orig_mem) == ZERO_EXTEND
38668 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38669 {
38670 extend = GET_CODE (orig_mem);
38671 orig_mem = XEXP (orig_mem, 0);
38672 target_mode = GET_MODE (orig_mem);
38673 }
38674
38675 gcc_assert (MEM_P (orig_mem));
38676
38677 orig_addr = XEXP (orig_mem, 0);
38678 plus_or_lo_sum = GET_CODE (orig_addr);
38679 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38680
38681 offset = XEXP (orig_addr, 1);
38682 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38683 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38684
38685 if (extend != UNKNOWN)
38686 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38687
38688 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38689 UNSPEC_FUSION_GPR);
38690 emit_insn (gen_rtx_SET (target, new_mem));
38691
38692 if (extend == SIGN_EXTEND)
38693 {
38694 int sub_off = ((BYTES_BIG_ENDIAN)
38695 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38696 : 0);
38697 rtx sign_reg
38698 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38699
38700 emit_insn (gen_rtx_SET (target,
38701 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38702 }
38703
38704 return;
38705 }
38706
38707 /* Emit the addis instruction that will be part of a fused instruction
38708 sequence. */
38709
38710 void
38711 emit_fusion_addis (rtx target, rtx addis_value)
38712 {
38713 rtx fuse_ops[10];
38714 const char *addis_str = NULL;
38715
38716 /* Emit the addis instruction. */
38717 fuse_ops[0] = target;
38718 if (satisfies_constraint_L (addis_value))
38719 {
38720 fuse_ops[1] = addis_value;
38721 addis_str = "lis %0,%v1";
38722 }
38723
38724 else if (GET_CODE (addis_value) == PLUS)
38725 {
38726 rtx op0 = XEXP (addis_value, 0);
38727 rtx op1 = XEXP (addis_value, 1);
38728
38729 if (REG_P (op0) && CONST_INT_P (op1)
38730 && satisfies_constraint_L (op1))
38731 {
38732 fuse_ops[1] = op0;
38733 fuse_ops[2] = op1;
38734 addis_str = "addis %0,%1,%v2";
38735 }
38736 }
38737
38738 else if (GET_CODE (addis_value) == HIGH)
38739 {
38740 rtx value = XEXP (addis_value, 0);
38741 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38742 {
38743 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38744 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38745 if (TARGET_ELF)
38746 addis_str = "addis %0,%2,%1@toc@ha";
38747
38748 else if (TARGET_XCOFF)
38749 addis_str = "addis %0,%1@u(%2)";
38750
38751 else
38752 gcc_unreachable ();
38753 }
38754
38755 else if (GET_CODE (value) == PLUS)
38756 {
38757 rtx op0 = XEXP (value, 0);
38758 rtx op1 = XEXP (value, 1);
38759
38760 if (GET_CODE (op0) == UNSPEC
38761 && XINT (op0, 1) == UNSPEC_TOCREL
38762 && CONST_INT_P (op1))
38763 {
38764 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38765 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38766 fuse_ops[3] = op1;
38767 if (TARGET_ELF)
38768 addis_str = "addis %0,%2,%1+%3@toc@ha";
38769
38770 else if (TARGET_XCOFF)
38771 addis_str = "addis %0,%1+%3@u(%2)";
38772
38773 else
38774 gcc_unreachable ();
38775 }
38776 }
38777
38778 else if (satisfies_constraint_L (value))
38779 {
38780 fuse_ops[1] = value;
38781 addis_str = "lis %0,%v1";
38782 }
38783
38784 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38785 {
38786 fuse_ops[1] = value;
38787 addis_str = "lis %0,%1@ha";
38788 }
38789 }
38790
38791 if (!addis_str)
38792 fatal_insn ("Could not generate addis value for fusion", addis_value);
38793
38794 output_asm_insn (addis_str, fuse_ops);
38795 }
38796
38797 /* Emit a D-form load or store instruction that is the second instruction
38798 of a fusion sequence. */
38799
38800 static void
38801 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38802 {
38803 rtx fuse_ops[10];
38804 char insn_template[80];
38805
38806 fuse_ops[0] = load_reg;
38807 fuse_ops[1] = addis_reg;
38808
38809 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38810 {
38811 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38812 fuse_ops[2] = offset;
38813 output_asm_insn (insn_template, fuse_ops);
38814 }
38815
38816 else if (GET_CODE (offset) == UNSPEC
38817 && XINT (offset, 1) == UNSPEC_TOCREL)
38818 {
38819 if (TARGET_ELF)
38820 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38821
38822 else if (TARGET_XCOFF)
38823 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38824
38825 else
38826 gcc_unreachable ();
38827
38828 fuse_ops[2] = XVECEXP (offset, 0, 0);
38829 output_asm_insn (insn_template, fuse_ops);
38830 }
38831
38832 else if (GET_CODE (offset) == PLUS
38833 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38834 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38835 && CONST_INT_P (XEXP (offset, 1)))
38836 {
38837 rtx tocrel_unspec = XEXP (offset, 0);
38838 if (TARGET_ELF)
38839 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38840
38841 else if (TARGET_XCOFF)
38842 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38843
38844 else
38845 gcc_unreachable ();
38846
38847 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38848 fuse_ops[3] = XEXP (offset, 1);
38849 output_asm_insn (insn_template, fuse_ops);
38850 }
38851
38852 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38853 {
38854 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38855
38856 fuse_ops[2] = offset;
38857 output_asm_insn (insn_template, fuse_ops);
38858 }
38859
38860 else
38861 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38862
38863 return;
38864 }
38865
38866 /* Given an address, convert it into the addis and load offset parts. Addresses
38867 created during the peephole2 process look like:
38868 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38869 (unspec [(...)] UNSPEC_TOCREL)) */
38870
38871 static void
38872 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38873 {
38874 rtx hi, lo;
38875
38876 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38877 {
38878 hi = XEXP (addr, 0);
38879 lo = XEXP (addr, 1);
38880 }
38881 else
38882 gcc_unreachable ();
38883
38884 *p_hi = hi;
38885 *p_lo = lo;
38886 }
38887
38888 /* Return a string to fuse an addis instruction with a gpr load to the same
38889 register that we loaded up the addis instruction. The address that is used
38890 is the logical address that was formed during peephole2:
38891 (lo_sum (high) (low-part))
38892
38893 The code is complicated, so we call output_asm_insn directly, and just
38894 return "". */
38895
38896 const char *
38897 emit_fusion_gpr_load (rtx target, rtx mem)
38898 {
38899 rtx addis_value;
38900 rtx addr;
38901 rtx load_offset;
38902 const char *load_str = NULL;
38903 machine_mode mode;
38904
38905 if (GET_CODE (mem) == ZERO_EXTEND)
38906 mem = XEXP (mem, 0);
38907
38908 gcc_assert (REG_P (target) && MEM_P (mem));
38909
38910 addr = XEXP (mem, 0);
38911 fusion_split_address (addr, &addis_value, &load_offset);
38912
38913 /* Now emit the load instruction to the same register. */
38914 mode = GET_MODE (mem);
38915 switch (mode)
38916 {
38917 case E_QImode:
38918 load_str = "lbz";
38919 break;
38920
38921 case E_HImode:
38922 load_str = "lhz";
38923 break;
38924
38925 case E_SImode:
38926 case E_SFmode:
38927 load_str = "lwz";
38928 break;
38929
38930 case E_DImode:
38931 case E_DFmode:
38932 gcc_assert (TARGET_POWERPC64);
38933 load_str = "ld";
38934 break;
38935
38936 default:
38937 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38938 }
38939
38940 /* Emit the addis instruction. */
38941 emit_fusion_addis (target, addis_value);
38942
38943 /* Emit the D-form load instruction. */
38944 emit_fusion_load (target, target, load_offset, load_str);
38945
38946 return "";
38947 }
38948 \f
38949
38950 #ifdef RS6000_GLIBC_ATOMIC_FENV
38951 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38952 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38953 #endif
38954
38955 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38956
38957 static void
38958 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38959 {
38960 if (!TARGET_HARD_FLOAT)
38961 {
38962 #ifdef RS6000_GLIBC_ATOMIC_FENV
38963 if (atomic_hold_decl == NULL_TREE)
38964 {
38965 atomic_hold_decl
38966 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38967 get_identifier ("__atomic_feholdexcept"),
38968 build_function_type_list (void_type_node,
38969 double_ptr_type_node,
38970 NULL_TREE));
38971 TREE_PUBLIC (atomic_hold_decl) = 1;
38972 DECL_EXTERNAL (atomic_hold_decl) = 1;
38973 }
38974
38975 if (atomic_clear_decl == NULL_TREE)
38976 {
38977 atomic_clear_decl
38978 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38979 get_identifier ("__atomic_feclearexcept"),
38980 build_function_type_list (void_type_node,
38981 NULL_TREE));
38982 TREE_PUBLIC (atomic_clear_decl) = 1;
38983 DECL_EXTERNAL (atomic_clear_decl) = 1;
38984 }
38985
38986 tree const_double = build_qualified_type (double_type_node,
38987 TYPE_QUAL_CONST);
38988 tree const_double_ptr = build_pointer_type (const_double);
38989 if (atomic_update_decl == NULL_TREE)
38990 {
38991 atomic_update_decl
38992 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38993 get_identifier ("__atomic_feupdateenv"),
38994 build_function_type_list (void_type_node,
38995 const_double_ptr,
38996 NULL_TREE));
38997 TREE_PUBLIC (atomic_update_decl) = 1;
38998 DECL_EXTERNAL (atomic_update_decl) = 1;
38999 }
39000
39001 tree fenv_var = create_tmp_var_raw (double_type_node);
39002 TREE_ADDRESSABLE (fenv_var) = 1;
39003 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39004
39005 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39006 *clear = build_call_expr (atomic_clear_decl, 0);
39007 *update = build_call_expr (atomic_update_decl, 1,
39008 fold_convert (const_double_ptr, fenv_addr));
39009 #endif
39010 return;
39011 }
39012
39013 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39014 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39015 tree call_mffs = build_call_expr (mffs, 0);
39016
39017 /* Generates the equivalent of feholdexcept (&fenv_var)
39018
39019 *fenv_var = __builtin_mffs ();
39020 double fenv_hold;
39021 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39022 __builtin_mtfsf (0xff, fenv_hold); */
39023
39024 /* Mask to clear everything except for the rounding modes and non-IEEE
39025 arithmetic flag. */
39026 const unsigned HOST_WIDE_INT hold_exception_mask =
39027 HOST_WIDE_INT_C (0xffffffff00000007);
39028
39029 tree fenv_var = create_tmp_var_raw (double_type_node);
39030
39031 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39032
39033 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39034 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39035 build_int_cst (uint64_type_node,
39036 hold_exception_mask));
39037
39038 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39039 fenv_llu_and);
39040
39041 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39042 build_int_cst (unsigned_type_node, 0xff),
39043 fenv_hold_mtfsf);
39044
39045 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39046
39047 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39048
39049 double fenv_clear = __builtin_mffs ();
39050 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39051 __builtin_mtfsf (0xff, fenv_clear); */
39052
39053 /* Mask to clear everything except for the rounding modes and non-IEEE
39054 arithmetic flag. */
39055 const unsigned HOST_WIDE_INT clear_exception_mask =
39056 HOST_WIDE_INT_C (0xffffffff00000000);
39057
39058 tree fenv_clear = create_tmp_var_raw (double_type_node);
39059
39060 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39061
39062 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39063 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39064 fenv_clean_llu,
39065 build_int_cst (uint64_type_node,
39066 clear_exception_mask));
39067
39068 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39069 fenv_clear_llu_and);
39070
39071 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39072 build_int_cst (unsigned_type_node, 0xff),
39073 fenv_clear_mtfsf);
39074
39075 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39076
39077 /* Generates the equivalent of feupdateenv (&fenv_var)
39078
39079 double old_fenv = __builtin_mffs ();
39080 double fenv_update;
39081 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39082 (*(uint64_t*)fenv_var 0x1ff80fff);
39083 __builtin_mtfsf (0xff, fenv_update); */
39084
39085 const unsigned HOST_WIDE_INT update_exception_mask =
39086 HOST_WIDE_INT_C (0xffffffff1fffff00);
39087 const unsigned HOST_WIDE_INT new_exception_mask =
39088 HOST_WIDE_INT_C (0x1ff80fff);
39089
39090 tree old_fenv = create_tmp_var_raw (double_type_node);
39091 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39092
39093 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39094 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39095 build_int_cst (uint64_type_node,
39096 update_exception_mask));
39097
39098 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39099 build_int_cst (uint64_type_node,
39100 new_exception_mask));
39101
39102 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39103 old_llu_and, new_llu_and);
39104
39105 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39106 new_llu_mask);
39107
39108 tree update_mtfsf = build_call_expr (mtfsf, 2,
39109 build_int_cst (unsigned_type_node, 0xff),
39110 fenv_update_mtfsf);
39111
39112 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39113 }
39114
39115 void
39116 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39117 {
39118 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39119
39120 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39121 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39122
39123 /* The destination of the vmrgew instruction layout is:
39124 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39125 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39126 vmrgew instruction will be correct. */
39127 if (BYTES_BIG_ENDIAN)
39128 {
39129 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39130 GEN_INT (0)));
39131 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39132 GEN_INT (3)));
39133 }
39134 else
39135 {
39136 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39137 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39138 }
39139
39140 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39141 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39142
39143 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39144 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39145
39146 if (BYTES_BIG_ENDIAN)
39147 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39148 else
39149 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39150 }
39151
39152 void
39153 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39154 {
39155 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39156
39157 rtx_tmp0 = gen_reg_rtx (V2DImode);
39158 rtx_tmp1 = gen_reg_rtx (V2DImode);
39159
39160 /* The destination of the vmrgew instruction layout is:
39161 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39162 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39163 vmrgew instruction will be correct. */
39164 if (BYTES_BIG_ENDIAN)
39165 {
39166 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39167 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39168 }
39169 else
39170 {
39171 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39172 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39173 }
39174
39175 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39176 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39177
39178 if (signed_convert)
39179 {
39180 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39181 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39182 }
39183 else
39184 {
39185 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39186 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39187 }
39188
39189 if (BYTES_BIG_ENDIAN)
39190 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39191 else
39192 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39193 }
39194
39195 void
39196 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39197 rtx src2)
39198 {
39199 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39200
39201 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39202 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39203
39204 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39205 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39206
39207 rtx_tmp2 = gen_reg_rtx (V4SImode);
39208 rtx_tmp3 = gen_reg_rtx (V4SImode);
39209
39210 if (signed_convert)
39211 {
39212 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39213 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39214 }
39215 else
39216 {
39217 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39218 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39219 }
39220
39221 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39222 }
39223
39224 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39225
39226 static bool
39227 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39228 optimization_type opt_type)
39229 {
39230 switch (op)
39231 {
39232 case rsqrt_optab:
39233 return (opt_type == OPTIMIZE_FOR_SPEED
39234 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39235
39236 default:
39237 return true;
39238 }
39239 }
39240
39241 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39242
39243 static HOST_WIDE_INT
39244 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39245 {
39246 if (TREE_CODE (exp) == STRING_CST
39247 && (STRICT_ALIGNMENT || !optimize_size))
39248 return MAX (align, BITS_PER_WORD);
39249 return align;
39250 }
39251
39252 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39253
39254 static HOST_WIDE_INT
39255 rs6000_starting_frame_offset (void)
39256 {
39257 if (FRAME_GROWS_DOWNWARD)
39258 return 0;
39259 return RS6000_STARTING_FRAME_OFFSET;
39260 }
39261 \f
39262
39263 /* Create an alias for a mangled name where we have changed the mangling (in
39264 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39265 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39266
39267 #if TARGET_ELF && RS6000_WEAK
39268 static void
39269 rs6000_globalize_decl_name (FILE * stream, tree decl)
39270 {
39271 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39272
39273 targetm.asm_out.globalize_label (stream, name);
39274
39275 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39276 {
39277 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39278 const char *old_name;
39279
39280 ieee128_mangling_gcc_8_1 = true;
39281 lang_hooks.set_decl_assembler_name (decl);
39282 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39283 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39284 ieee128_mangling_gcc_8_1 = false;
39285
39286 if (strcmp (name, old_name) != 0)
39287 {
39288 fprintf (stream, "\t.weak %s\n", old_name);
39289 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39290 }
39291 }
39292 }
39293 #endif
39294
39295 \f
39296 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39297 function names from <foo>l to <foo>f128 if the default long double type is
39298 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39299 include file switches the names on systems that support long double as IEEE
39300 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39301 In the future, glibc will export names like __ieee128_sinf128 and we can
39302 switch to using those instead of using sinf128, which pollutes the user's
39303 namespace.
39304
39305 This will switch the names for Fortran math functions as well (which doesn't
39306 use math.h). However, Fortran needs other changes to the compiler and
39307 library before you can switch the real*16 type at compile time.
39308
39309 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39310 only do this if the default is that long double is IBM extended double, and
39311 the user asked for IEEE 128-bit. */
39312
39313 static tree
39314 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39315 {
39316 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39317 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39318 {
39319 size_t len = IDENTIFIER_LENGTH (id);
39320 const char *name = IDENTIFIER_POINTER (id);
39321
39322 if (name[len - 1] == 'l')
39323 {
39324 bool uses_ieee128_p = false;
39325 tree type = TREE_TYPE (decl);
39326 machine_mode ret_mode = TYPE_MODE (type);
39327
39328 /* See if the function returns a IEEE 128-bit floating point type or
39329 complex type. */
39330 if (ret_mode == TFmode || ret_mode == TCmode)
39331 uses_ieee128_p = true;
39332 else
39333 {
39334 function_args_iterator args_iter;
39335 tree arg;
39336
39337 /* See if the function passes a IEEE 128-bit floating point type
39338 or complex type. */
39339 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39340 {
39341 machine_mode arg_mode = TYPE_MODE (arg);
39342 if (arg_mode == TFmode || arg_mode == TCmode)
39343 {
39344 uses_ieee128_p = true;
39345 break;
39346 }
39347 }
39348 }
39349
39350 /* If we passed or returned an IEEE 128-bit floating point type,
39351 change the name. */
39352 if (uses_ieee128_p)
39353 {
39354 char *name2 = (char *) alloca (len + 4);
39355 memcpy (name2, name, len - 1);
39356 strcpy (name2 + len - 1, "f128");
39357 id = get_identifier (name2);
39358 }
39359 }
39360 }
39361
39362 return id;
39363 }
39364
39365 \f
39366 struct gcc_target targetm = TARGET_INITIALIZER;
39367
39368 #include "gt-rs6000.h"