rs6000: Delete many HAVE_AS_* (PR87149)
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84
85 /* This file should be included last. */
86 #include "target-def.h"
87
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
91
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
105
106 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
107
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack {
110 int reload_completed; /* stack info won't change from here on */
111 int first_gp_reg_save; /* first callee saved GP register used */
112 int first_fp_reg_save; /* first callee saved FP register used */
113 int first_altivec_reg_save; /* first callee saved AltiVec register used */
114 int lr_save_p; /* true if the link reg needs to be saved */
115 int cr_save_p; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask; /* mask of vec registers to save */
117 int push_p; /* true if we need to allocate stack space */
118 int calls_p; /* true if the function makes any calls */
119 int world_save_p; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi; /* which ABI to use */
122 int gp_save_offset; /* offset to save GP regs from initial SP */
123 int fp_save_offset; /* offset to save FP regs from initial SP */
124 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset; /* offset to save LR from initial SP */
126 int cr_save_offset; /* offset to save CR from initial SP */
127 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset; /* offset to save the varargs registers */
129 int ehrd_offset; /* offset to EH return data */
130 int ehcr_offset; /* offset to EH CR field data */
131 int reg_size; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size; /* variable save area size */
133 int parm_size; /* outgoing parameter size */
134 int save_size; /* save area size */
135 int fixed_size; /* fixed size of stack frame */
136 int gp_size; /* size of saved GP registers */
137 int fp_size; /* size of saved FP registers */
138 int altivec_size; /* size of saved AltiVec registers */
139 int cr_size; /* size to hold CR if not in fixed area */
140 int vrsave_size; /* size to hold VRSAVE */
141 int altivec_padding_size; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
143 int savres_strategy;
144 } rs6000_stack_t;
145
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
149 {
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame;
152 /* Flags if __builtin_return_address (0) was used. */
153 int ra_need_lr;
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
155 int lr_save_state;
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer;
164 bool split_stack_argp_used;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 /* The number of components we use for separate shrink-wrapping. */
168 int n_components;
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately[32];
172 bool fpr_is_wrapped_separately[32];
173 bool lr_is_wrapped_separately;
174 bool toc_is_wrapped_separately;
175 } machine_function;
176
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load;
179
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined;
182
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno;
186
187 #ifdef USING_ELFOS_H
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno = 0;
190 #endif
191
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
193 int dot_symbols;
194
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode;
199
200 #if TARGET_ELF
201 /* Note whether IEEE 128-bit floating point was passed or returned, either as
202 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
203 floating point. We changed the default C++ mangling for these types and we
204 may want to generate a weak alias of the old mangling (U10__float128) to the
205 new mangling (u9__ieee128). */
206 static bool rs6000_passes_ieee128;
207 #endif
208
209 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
210 name used in current releases (i.e. u9__ieee128). */
211 static bool ieee128_mangling_gcc_8_1;
212
213 /* Width in bits of a pointer. */
214 unsigned rs6000_pointer_size;
215
216 #ifdef HAVE_AS_GNU_ATTRIBUTE
217 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
218 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
219 # endif
220 /* Flag whether floating point values have been passed/returned.
221 Note that this doesn't say whether fprs are used, since the
222 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
223 should be set for soft-float values passed in gprs and ieee128
224 values passed in vsx registers. */
225 static bool rs6000_passes_float;
226 static bool rs6000_passes_long_double;
227 /* Flag whether vector values have been passed/returned. */
228 static bool rs6000_passes_vector;
229 /* Flag whether small (<= 8 byte) structures have been returned. */
230 static bool rs6000_returns_struct;
231 #endif
232
233 /* Value is TRUE if register/mode pair is acceptable. */
234 static bool rs6000_hard_regno_mode_ok_p
235 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
236
237 /* Maximum number of registers needed for a given register class and mode. */
238 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
239
240 /* How many registers are needed for a given register and mode. */
241 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
242
243 /* Map register number to register class. */
244 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
245
246 static int dbg_cost_ctrl;
247
248 /* Built in types. */
249 tree rs6000_builtin_types[RS6000_BTI_MAX];
250 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
251
252 /* Flag to say the TOC is initialized */
253 int toc_initialized, need_toc_init;
254 char toc_label_name[10];
255
256 /* Cached value of rs6000_variable_issue. This is cached in
257 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
258 static short cached_can_issue_more;
259
260 static GTY(()) section *read_only_data_section;
261 static GTY(()) section *private_data_section;
262 static GTY(()) section *tls_data_section;
263 static GTY(()) section *tls_private_data_section;
264 static GTY(()) section *read_only_private_data_section;
265 static GTY(()) section *sdata2_section;
266 static GTY(()) section *toc_section;
267
268 struct builtin_description
269 {
270 const HOST_WIDE_INT mask;
271 const enum insn_code icode;
272 const char *const name;
273 const enum rs6000_builtins code;
274 };
275
276 /* Describe the vector unit used for modes. */
277 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
278 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
279
280 /* Register classes for various constraints that are based on the target
281 switches. */
282 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
283
284 /* Describe the alignment of a vector. */
285 int rs6000_vector_align[NUM_MACHINE_MODES];
286
287 /* Map selected modes to types for builtins. */
288 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
289
290 /* What modes to automatically generate reciprocal divide estimate (fre) and
291 reciprocal sqrt (frsqrte) for. */
292 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
293
294 /* Masks to determine which reciprocal esitmate instructions to generate
295 automatically. */
296 enum rs6000_recip_mask {
297 RECIP_SF_DIV = 0x001, /* Use divide estimate */
298 RECIP_DF_DIV = 0x002,
299 RECIP_V4SF_DIV = 0x004,
300 RECIP_V2DF_DIV = 0x008,
301
302 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
303 RECIP_DF_RSQRT = 0x020,
304 RECIP_V4SF_RSQRT = 0x040,
305 RECIP_V2DF_RSQRT = 0x080,
306
307 /* Various combination of flags for -mrecip=xxx. */
308 RECIP_NONE = 0,
309 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
310 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
311 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
312
313 RECIP_HIGH_PRECISION = RECIP_ALL,
314
315 /* On low precision machines like the power5, don't enable double precision
316 reciprocal square root estimate, since it isn't accurate enough. */
317 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
318 };
319
320 /* -mrecip options. */
321 static struct
322 {
323 const char *string; /* option name */
324 unsigned int mask; /* mask bits to set */
325 } recip_options[] = {
326 { "all", RECIP_ALL },
327 { "none", RECIP_NONE },
328 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
329 | RECIP_V2DF_DIV) },
330 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
331 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
332 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
333 | RECIP_V2DF_RSQRT) },
334 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
335 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
336 };
337
338 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
339 static const struct
340 {
341 const char *cpu;
342 unsigned int cpuid;
343 } cpu_is_info[] = {
344 { "power9", PPC_PLATFORM_POWER9 },
345 { "power8", PPC_PLATFORM_POWER8 },
346 { "power7", PPC_PLATFORM_POWER7 },
347 { "power6x", PPC_PLATFORM_POWER6X },
348 { "power6", PPC_PLATFORM_POWER6 },
349 { "power5+", PPC_PLATFORM_POWER5_PLUS },
350 { "power5", PPC_PLATFORM_POWER5 },
351 { "ppc970", PPC_PLATFORM_PPC970 },
352 { "power4", PPC_PLATFORM_POWER4 },
353 { "ppca2", PPC_PLATFORM_PPCA2 },
354 { "ppc476", PPC_PLATFORM_PPC476 },
355 { "ppc464", PPC_PLATFORM_PPC464 },
356 { "ppc440", PPC_PLATFORM_PPC440 },
357 { "ppc405", PPC_PLATFORM_PPC405 },
358 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
359 };
360
361 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
362 static const struct
363 {
364 const char *hwcap;
365 int mask;
366 unsigned int id;
367 } cpu_supports_info[] = {
368 /* AT_HWCAP masks. */
369 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
370 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
371 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
372 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
373 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
374 { "booke", PPC_FEATURE_BOOKE, 0 },
375 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
376 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
377 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
378 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
379 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
380 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
381 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
382 { "notb", PPC_FEATURE_NO_TB, 0 },
383 { "pa6t", PPC_FEATURE_PA6T, 0 },
384 { "power4", PPC_FEATURE_POWER4, 0 },
385 { "power5", PPC_FEATURE_POWER5, 0 },
386 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
387 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
388 { "ppc32", PPC_FEATURE_32, 0 },
389 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
390 { "ppc64", PPC_FEATURE_64, 0 },
391 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
392 { "smt", PPC_FEATURE_SMT, 0 },
393 { "spe", PPC_FEATURE_HAS_SPE, 0 },
394 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
395 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
396 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
397
398 /* AT_HWCAP2 masks. */
399 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
400 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
401 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
402 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
403 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
404 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
405 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
406 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
407 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
408 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
409 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
410 { "darn", PPC_FEATURE2_DARN, 1 },
411 { "scv", PPC_FEATURE2_SCV, 1 }
412 };
413
414 /* On PowerPC, we have a limited number of target clones that we care about
415 which means we can use an array to hold the options, rather than having more
416 elaborate data structures to identify each possible variation. Order the
417 clones from the default to the highest ISA. */
418 enum {
419 CLONE_DEFAULT = 0, /* default clone. */
420 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
421 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
422 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
423 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
424 CLONE_MAX
425 };
426
427 /* Map compiler ISA bits into HWCAP names. */
428 struct clone_map {
429 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
430 const char *name; /* name to use in __builtin_cpu_supports. */
431 };
432
433 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
434 { 0, "" }, /* Default options. */
435 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
436 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
437 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
438 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
439 };
440
441
442 /* Newer LIBCs explicitly export this symbol to declare that they provide
443 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
444 reference to this symbol whenever we expand a CPU builtin, so that
445 we never link against an old LIBC. */
446 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
447
448 /* True if we have expanded a CPU builtin. */
449 bool cpu_builtin_p;
450
451 /* Pointer to function (in rs6000-c.c) that can define or undefine target
452 macros that have changed. Languages that don't support the preprocessor
453 don't link in rs6000-c.c, so we can't call it directly. */
454 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
455
456 /* Simplfy register classes into simpler classifications. We assume
457 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
458 check for standard register classes (gpr/floating/altivec/vsx) and
459 floating/vector classes (float/altivec/vsx). */
460
461 enum rs6000_reg_type {
462 NO_REG_TYPE,
463 PSEUDO_REG_TYPE,
464 GPR_REG_TYPE,
465 VSX_REG_TYPE,
466 ALTIVEC_REG_TYPE,
467 FPR_REG_TYPE,
468 SPR_REG_TYPE,
469 CR_REG_TYPE
470 };
471
472 /* Map register class to register type. */
473 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
474
475 /* First/last register type for the 'normal' register types (i.e. general
476 purpose, floating point, altivec, and VSX registers). */
477 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
478
479 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
480
481
482 /* Register classes we care about in secondary reload or go if legitimate
483 address. We only need to worry about GPR, FPR, and Altivec registers here,
484 along an ANY field that is the OR of the 3 register classes. */
485
486 enum rs6000_reload_reg_type {
487 RELOAD_REG_GPR, /* General purpose registers. */
488 RELOAD_REG_FPR, /* Traditional floating point regs. */
489 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
490 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
491 N_RELOAD_REG
492 };
493
494 /* For setting up register classes, loop through the 3 register classes mapping
495 into real registers, and skip the ANY class, which is just an OR of the
496 bits. */
497 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
498 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
499
500 /* Map reload register type to a register in the register class. */
501 struct reload_reg_map_type {
502 const char *name; /* Register class name. */
503 int reg; /* Register in the register class. */
504 };
505
506 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
507 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
508 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
509 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
510 { "Any", -1 }, /* RELOAD_REG_ANY. */
511 };
512
513 /* Mask bits for each register class, indexed per mode. Historically the
514 compiler has been more restrictive which types can do PRE_MODIFY instead of
515 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
516 typedef unsigned char addr_mask_type;
517
518 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
519 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
520 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
521 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
522 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
523 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
524 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
525 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
526
527 /* Register type masks based on the type, of valid addressing modes. */
528 struct rs6000_reg_addr {
529 enum insn_code reload_load; /* INSN to reload for loading. */
530 enum insn_code reload_store; /* INSN to reload for storing. */
531 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
532 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
533 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
534 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
535 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
536 };
537
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
539
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
543 {
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
546 }
547
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
551 {
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
554 }
555
556 /* Return true if we have D-form addressing in altivec registers. */
557 static inline bool
558 mode_supports_vmx_dform (machine_mode mode)
559 {
560 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
561 }
562
563 /* Return true if we have D-form addressing in VSX registers. This addressing
564 is more limited than normal d-form addressing in that the offset must be
565 aligned on a 16-byte boundary. */
566 static inline bool
567 mode_supports_dq_form (machine_mode mode)
568 {
569 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
570 != 0);
571 }
572
573 /* Given that there exists at least one variable that is set (produced)
574 by OUT_INSN and read (consumed) by IN_INSN, return true iff
575 IN_INSN represents one or more memory store operations and none of
576 the variables set by OUT_INSN is used by IN_INSN as the address of a
577 store operation. If either IN_INSN or OUT_INSN does not represent
578 a "single" RTL SET expression (as loosely defined by the
579 implementation of the single_set function) or a PARALLEL with only
580 SETs, CLOBBERs, and USEs inside, this function returns false.
581
582 This rs6000-specific version of store_data_bypass_p checks for
583 certain conditions that result in assertion failures (and internal
584 compiler errors) in the generic store_data_bypass_p function and
585 returns false rather than calling store_data_bypass_p if one of the
586 problematic conditions is detected. */
587
588 int
589 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
590 {
591 rtx out_set, in_set;
592 rtx out_pat, in_pat;
593 rtx out_exp, in_exp;
594 int i, j;
595
596 in_set = single_set (in_insn);
597 if (in_set)
598 {
599 if (MEM_P (SET_DEST (in_set)))
600 {
601 out_set = single_set (out_insn);
602 if (!out_set)
603 {
604 out_pat = PATTERN (out_insn);
605 if (GET_CODE (out_pat) == PARALLEL)
606 {
607 for (i = 0; i < XVECLEN (out_pat, 0); i++)
608 {
609 out_exp = XVECEXP (out_pat, 0, i);
610 if ((GET_CODE (out_exp) == CLOBBER)
611 || (GET_CODE (out_exp) == USE))
612 continue;
613 else if (GET_CODE (out_exp) != SET)
614 return false;
615 }
616 }
617 }
618 }
619 }
620 else
621 {
622 in_pat = PATTERN (in_insn);
623 if (GET_CODE (in_pat) != PARALLEL)
624 return false;
625
626 for (i = 0; i < XVECLEN (in_pat, 0); i++)
627 {
628 in_exp = XVECEXP (in_pat, 0, i);
629 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
630 continue;
631 else if (GET_CODE (in_exp) != SET)
632 return false;
633
634 if (MEM_P (SET_DEST (in_exp)))
635 {
636 out_set = single_set (out_insn);
637 if (!out_set)
638 {
639 out_pat = PATTERN (out_insn);
640 if (GET_CODE (out_pat) != PARALLEL)
641 return false;
642 for (j = 0; j < XVECLEN (out_pat, 0); j++)
643 {
644 out_exp = XVECEXP (out_pat, 0, j);
645 if ((GET_CODE (out_exp) == CLOBBER)
646 || (GET_CODE (out_exp) == USE))
647 continue;
648 else if (GET_CODE (out_exp) != SET)
649 return false;
650 }
651 }
652 }
653 }
654 }
655 return store_data_bypass_p (out_insn, in_insn);
656 }
657
658 \f
659 /* Processor costs (relative to an add) */
660
661 const struct processor_costs *rs6000_cost;
662
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
681 };
682
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
701 };
702
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
721 };
722
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
741 };
742
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
761 };
762
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
781 };
782
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
801 };
802
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
821 };
822
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
841 };
842
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
861 };
862
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
881 };
882
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
901 };
902
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
921 };
922
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
941 };
942
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
962 };
963
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
982 };
983
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1002 };
1003
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1022 };
1023
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1037 32,
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1042 };
1043
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1062 };
1063
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1082 };
1083
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1102 };
1103
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1122 };
1123
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1142 };
1143
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1162 };
1163
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1182 };
1183
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1202 };
1203
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1222 };
1223
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1242 };
1243
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1257 64,
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1262 };
1263
1264 \f
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_X
1275
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1278
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1281
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1284
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1287
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1290
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1293
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1296
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1299
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1302
1303 struct rs6000_builtin_info_type {
1304 const char *name;
1305 const enum insn_code icode;
1306 const HOST_WIDE_INT mask;
1307 const unsigned attr;
1308 };
1309
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1311 {
1312 #include "rs6000-builtin.def"
1313 };
1314
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_X
1324
1325 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1326 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1327
1328 \f
1329 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1330 static struct machine_function * rs6000_init_machine_status (void);
1331 static int rs6000_ra_ever_killed (void);
1332 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1333 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1334 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1336 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1337 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1338 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1339 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1340 bool);
1341 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1342 unsigned int);
1343 static bool is_microcoded_insn (rtx_insn *);
1344 static bool is_nonpipeline_insn (rtx_insn *);
1345 static bool is_cracked_insn (rtx_insn *);
1346 static bool is_load_insn (rtx, rtx *);
1347 static bool is_store_insn (rtx, rtx *);
1348 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1349 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1350 static bool insn_must_be_first_in_group (rtx_insn *);
1351 static bool insn_must_be_last_in_group (rtx_insn *);
1352 static void altivec_init_builtins (void);
1353 static tree builtin_function_type (machine_mode, machine_mode,
1354 machine_mode, machine_mode,
1355 enum rs6000_builtins, const char *name);
1356 static void rs6000_common_init_builtins (void);
1357 static void htm_init_builtins (void);
1358 static rs6000_stack_t *rs6000_stack_info (void);
1359 static void is_altivec_return_reg (rtx, void *);
1360 int easy_vector_constant (rtx, machine_mode);
1361 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1362 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1363 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1364 bool, bool);
1365 #if TARGET_MACHO
1366 static void macho_branch_islands (void);
1367 #endif
1368 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1369 int, int *);
1370 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1371 int, int, int *);
1372 static bool rs6000_mode_dependent_address (const_rtx);
1373 static bool rs6000_debug_mode_dependent_address (const_rtx);
1374 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1375 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1376 machine_mode, rtx);
1377 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1378 machine_mode,
1379 rtx);
1380 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1381 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1382 enum reg_class);
1383 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1384 reg_class_t,
1385 reg_class_t);
1386 static bool rs6000_debug_can_change_mode_class (machine_mode,
1387 machine_mode,
1388 reg_class_t);
1389 static bool rs6000_save_toc_in_prologue_p (void);
1390 static rtx rs6000_internal_arg_pointer (void);
1391
1392 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1393 int, int *)
1394 = rs6000_legitimize_reload_address;
1395
1396 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1397 = rs6000_mode_dependent_address;
1398
1399 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1400 machine_mode, rtx)
1401 = rs6000_secondary_reload_class;
1402
1403 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1404 = rs6000_preferred_reload_class;
1405
1406 const int INSN_NOT_AVAILABLE = -1;
1407
1408 static void rs6000_print_isa_options (FILE *, int, const char *,
1409 HOST_WIDE_INT);
1410 static void rs6000_print_builtin_options (FILE *, int, const char *,
1411 HOST_WIDE_INT);
1412 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1413
1414 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1415 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1416 enum rs6000_reg_type,
1417 machine_mode,
1418 secondary_reload_info *,
1419 bool);
1420 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1421 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1422 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1423
1424 /* Hash table stuff for keeping track of TOC entries. */
1425
1426 struct GTY((for_user)) toc_hash_struct
1427 {
1428 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1429 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1430 rtx key;
1431 machine_mode key_mode;
1432 int labelno;
1433 };
1434
1435 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1436 {
1437 static hashval_t hash (toc_hash_struct *);
1438 static bool equal (toc_hash_struct *, toc_hash_struct *);
1439 };
1440
1441 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1442
1443 /* Hash table to keep track of the argument types for builtin functions. */
1444
1445 struct GTY((for_user)) builtin_hash_struct
1446 {
1447 tree type;
1448 machine_mode mode[4]; /* return value + 3 arguments. */
1449 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1450 };
1451
1452 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1453 {
1454 static hashval_t hash (builtin_hash_struct *);
1455 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1456 };
1457
1458 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1459
1460 \f
1461 /* Default register names. */
1462 char rs6000_reg_names[][8] =
1463 {
1464 "0", "1", "2", "3", "4", "5", "6", "7",
1465 "8", "9", "10", "11", "12", "13", "14", "15",
1466 "16", "17", "18", "19", "20", "21", "22", "23",
1467 "24", "25", "26", "27", "28", "29", "30", "31",
1468 "0", "1", "2", "3", "4", "5", "6", "7",
1469 "8", "9", "10", "11", "12", "13", "14", "15",
1470 "16", "17", "18", "19", "20", "21", "22", "23",
1471 "24", "25", "26", "27", "28", "29", "30", "31",
1472 "mq", "lr", "ctr","ap",
1473 "0", "1", "2", "3", "4", "5", "6", "7",
1474 "ca",
1475 /* AltiVec registers. */
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "8", "9", "10", "11", "12", "13", "14", "15",
1478 "16", "17", "18", "19", "20", "21", "22", "23",
1479 "24", "25", "26", "27", "28", "29", "30", "31",
1480 "vrsave", "vscr",
1481 /* Soft frame pointer. */
1482 "sfp",
1483 /* HTM SPR registers. */
1484 "tfhar", "tfiar", "texasr"
1485 };
1486
1487 #ifdef TARGET_REGNAMES
1488 static const char alt_reg_names[][8] =
1489 {
1490 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1491 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1492 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1493 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1494 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1495 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1496 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1497 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1498 "mq", "lr", "ctr", "ap",
1499 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1500 "ca",
1501 /* AltiVec registers. */
1502 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1503 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1504 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1505 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1506 "vrsave", "vscr",
1507 /* Soft frame pointer. */
1508 "sfp",
1509 /* HTM SPR registers. */
1510 "tfhar", "tfiar", "texasr"
1511 };
1512 #endif
1513
1514 /* Table of valid machine attributes. */
1515
1516 static const struct attribute_spec rs6000_attribute_table[] =
1517 {
1518 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1519 affects_type_identity, handler, exclude } */
1520 { "altivec", 1, 1, false, true, false, false,
1521 rs6000_handle_altivec_attribute, NULL },
1522 { "longcall", 0, 0, false, true, true, false,
1523 rs6000_handle_longcall_attribute, NULL },
1524 { "shortcall", 0, 0, false, true, true, false,
1525 rs6000_handle_longcall_attribute, NULL },
1526 { "ms_struct", 0, 0, false, false, false, false,
1527 rs6000_handle_struct_attribute, NULL },
1528 { "gcc_struct", 0, 0, false, false, false, false,
1529 rs6000_handle_struct_attribute, NULL },
1530 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1531 SUBTARGET_ATTRIBUTE_TABLE,
1532 #endif
1533 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1534 };
1535 \f
1536 #ifndef TARGET_PROFILE_KERNEL
1537 #define TARGET_PROFILE_KERNEL 0
1538 #endif
1539
1540 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1541 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1542 \f
1543 /* Initialize the GCC target structure. */
1544 #undef TARGET_ATTRIBUTE_TABLE
1545 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1546 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1547 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1548 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1549 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1550
1551 #undef TARGET_ASM_ALIGNED_DI_OP
1552 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1553
1554 /* Default unaligned ops are only provided for ELF. Find the ops needed
1555 for non-ELF systems. */
1556 #ifndef OBJECT_FORMAT_ELF
1557 #if TARGET_XCOFF
1558 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1559 64-bit targets. */
1560 #undef TARGET_ASM_UNALIGNED_HI_OP
1561 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1562 #undef TARGET_ASM_UNALIGNED_SI_OP
1563 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1564 #undef TARGET_ASM_UNALIGNED_DI_OP
1565 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1566 #else
1567 /* For Darwin. */
1568 #undef TARGET_ASM_UNALIGNED_HI_OP
1569 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1570 #undef TARGET_ASM_UNALIGNED_SI_OP
1571 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1572 #undef TARGET_ASM_UNALIGNED_DI_OP
1573 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1574 #undef TARGET_ASM_ALIGNED_DI_OP
1575 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1576 #endif
1577 #endif
1578
1579 /* This hook deals with fixups for relocatable code and DI-mode objects
1580 in 64-bit code. */
1581 #undef TARGET_ASM_INTEGER
1582 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1583
1584 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1585 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1586 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1587 #endif
1588
1589 #undef TARGET_SET_UP_BY_PROLOGUE
1590 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1591
1592 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1593 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1594 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1595 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1596 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1597 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1598 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1599 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1600 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1601 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1602 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1604
1605 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1606 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1607
1608 #undef TARGET_INTERNAL_ARG_POINTER
1609 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1610
1611 #undef TARGET_HAVE_TLS
1612 #define TARGET_HAVE_TLS HAVE_AS_TLS
1613
1614 #undef TARGET_CANNOT_FORCE_CONST_MEM
1615 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1616
1617 #undef TARGET_DELEGITIMIZE_ADDRESS
1618 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1619
1620 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1621 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1622
1623 #undef TARGET_LEGITIMATE_COMBINED_INSN
1624 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1625
1626 #undef TARGET_ASM_FUNCTION_PROLOGUE
1627 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1628 #undef TARGET_ASM_FUNCTION_EPILOGUE
1629 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1630
1631 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1632 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1633
1634 #undef TARGET_LEGITIMIZE_ADDRESS
1635 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1636
1637 #undef TARGET_SCHED_VARIABLE_ISSUE
1638 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1639
1640 #undef TARGET_SCHED_ISSUE_RATE
1641 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1642 #undef TARGET_SCHED_ADJUST_COST
1643 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1644 #undef TARGET_SCHED_ADJUST_PRIORITY
1645 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1646 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1647 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1648 #undef TARGET_SCHED_INIT
1649 #define TARGET_SCHED_INIT rs6000_sched_init
1650 #undef TARGET_SCHED_FINISH
1651 #define TARGET_SCHED_FINISH rs6000_sched_finish
1652 #undef TARGET_SCHED_REORDER
1653 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1654 #undef TARGET_SCHED_REORDER2
1655 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1656
1657 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1658 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1659
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1662
1663 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1664 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1665 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1666 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1667 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1668 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1669 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1670 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1671
1672 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1673 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1674
1675 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1676 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1677 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1678 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1679 rs6000_builtin_support_vector_misalignment
1680 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1681 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1682 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1683 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1684 rs6000_builtin_vectorization_cost
1685 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1686 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1687 rs6000_preferred_simd_mode
1688 #undef TARGET_VECTORIZE_INIT_COST
1689 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1690 #undef TARGET_VECTORIZE_ADD_STMT_COST
1691 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1692 #undef TARGET_VECTORIZE_FINISH_COST
1693 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1694 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1695 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1696
1697 #undef TARGET_INIT_BUILTINS
1698 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1699 #undef TARGET_BUILTIN_DECL
1700 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1701
1702 #undef TARGET_FOLD_BUILTIN
1703 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1704 #undef TARGET_GIMPLE_FOLD_BUILTIN
1705 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1706
1707 #undef TARGET_EXPAND_BUILTIN
1708 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1709
1710 #undef TARGET_MANGLE_TYPE
1711 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1712
1713 #undef TARGET_INIT_LIBFUNCS
1714 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1715
1716 #if TARGET_MACHO
1717 #undef TARGET_BINDS_LOCAL_P
1718 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1719 #endif
1720
1721 #undef TARGET_MS_BITFIELD_LAYOUT_P
1722 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1723
1724 #undef TARGET_ASM_OUTPUT_MI_THUNK
1725 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1726
1727 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1729
1730 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1731 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1732
1733 #undef TARGET_REGISTER_MOVE_COST
1734 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1735 #undef TARGET_MEMORY_MOVE_COST
1736 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1737 #undef TARGET_CANNOT_COPY_INSN_P
1738 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1739 #undef TARGET_RTX_COSTS
1740 #define TARGET_RTX_COSTS rs6000_rtx_costs
1741 #undef TARGET_ADDRESS_COST
1742 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1743 #undef TARGET_INSN_COST
1744 #define TARGET_INSN_COST rs6000_insn_cost
1745
1746 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1747 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1748
1749 #undef TARGET_PROMOTE_FUNCTION_MODE
1750 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1751
1752 #undef TARGET_RETURN_IN_MEMORY
1753 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1754
1755 #undef TARGET_RETURN_IN_MSB
1756 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1757
1758 #undef TARGET_SETUP_INCOMING_VARARGS
1759 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1760
1761 /* Always strict argument naming on rs6000. */
1762 #undef TARGET_STRICT_ARGUMENT_NAMING
1763 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1764 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1765 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1766 #undef TARGET_SPLIT_COMPLEX_ARG
1767 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1768 #undef TARGET_MUST_PASS_IN_STACK
1769 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1770 #undef TARGET_PASS_BY_REFERENCE
1771 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1772 #undef TARGET_ARG_PARTIAL_BYTES
1773 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1774 #undef TARGET_FUNCTION_ARG_ADVANCE
1775 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1776 #undef TARGET_FUNCTION_ARG
1777 #define TARGET_FUNCTION_ARG rs6000_function_arg
1778 #undef TARGET_FUNCTION_ARG_PADDING
1779 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1780 #undef TARGET_FUNCTION_ARG_BOUNDARY
1781 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1782
1783 #undef TARGET_BUILD_BUILTIN_VA_LIST
1784 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1785
1786 #undef TARGET_EXPAND_BUILTIN_VA_START
1787 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1788
1789 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1790 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1791
1792 #undef TARGET_EH_RETURN_FILTER_MODE
1793 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1794
1795 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1796 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1797
1798 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1799 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1800
1801 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1802 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1803
1804 #undef TARGET_FLOATN_MODE
1805 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1806
1807 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1808 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1809
1810 #undef TARGET_MD_ASM_ADJUST
1811 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1812
1813 #undef TARGET_OPTION_OVERRIDE
1814 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1815
1816 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1817 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1818 rs6000_builtin_vectorized_function
1819
1820 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1821 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1822 rs6000_builtin_md_vectorized_function
1823
1824 #undef TARGET_STACK_PROTECT_GUARD
1825 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1826
1827 #if !TARGET_MACHO
1828 #undef TARGET_STACK_PROTECT_FAIL
1829 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1830 #endif
1831
1832 #ifdef HAVE_AS_TLS
1833 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1834 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1835 #endif
1836
1837 /* Use a 32-bit anchor range. This leads to sequences like:
1838
1839 addis tmp,anchor,high
1840 add dest,tmp,low
1841
1842 where tmp itself acts as an anchor, and can be shared between
1843 accesses to the same 64k page. */
1844 #undef TARGET_MIN_ANCHOR_OFFSET
1845 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1846 #undef TARGET_MAX_ANCHOR_OFFSET
1847 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1848 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1849 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1850 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1851 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1852
1853 #undef TARGET_BUILTIN_RECIPROCAL
1854 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1855
1856 #undef TARGET_SECONDARY_RELOAD
1857 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1858 #undef TARGET_SECONDARY_MEMORY_NEEDED
1859 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1860 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1861 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1862
1863 #undef TARGET_LEGITIMATE_ADDRESS_P
1864 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1865
1866 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1867 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1868
1869 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1870 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1871
1872 #undef TARGET_CAN_ELIMINATE
1873 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1874
1875 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1876 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1877
1878 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1879 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1880
1881 #undef TARGET_TRAMPOLINE_INIT
1882 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1883
1884 #undef TARGET_FUNCTION_VALUE
1885 #define TARGET_FUNCTION_VALUE rs6000_function_value
1886
1887 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1888 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1889
1890 #undef TARGET_OPTION_SAVE
1891 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1892
1893 #undef TARGET_OPTION_RESTORE
1894 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1895
1896 #undef TARGET_OPTION_PRINT
1897 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1898
1899 #undef TARGET_CAN_INLINE_P
1900 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1901
1902 #undef TARGET_SET_CURRENT_FUNCTION
1903 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1904
1905 #undef TARGET_LEGITIMATE_CONSTANT_P
1906 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1907
1908 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1909 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1910
1911 #undef TARGET_CAN_USE_DOLOOP_P
1912 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1913
1914 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1915 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1916
1917 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1918 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1919 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1920 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1921 #undef TARGET_UNWIND_WORD_MODE
1922 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1923
1924 #undef TARGET_OFFLOAD_OPTIONS
1925 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1926
1927 #undef TARGET_C_MODE_FOR_SUFFIX
1928 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1929
1930 #undef TARGET_INVALID_BINARY_OP
1931 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1932
1933 #undef TARGET_OPTAB_SUPPORTED_P
1934 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1935
1936 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1937 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1938
1939 #undef TARGET_COMPARE_VERSION_PRIORITY
1940 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1941
1942 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1943 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1944 rs6000_generate_version_dispatcher_body
1945
1946 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1947 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1948 rs6000_get_function_versions_dispatcher
1949
1950 #undef TARGET_OPTION_FUNCTION_VERSIONS
1951 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1952
1953 #undef TARGET_HARD_REGNO_NREGS
1954 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1955 #undef TARGET_HARD_REGNO_MODE_OK
1956 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1957
1958 #undef TARGET_MODES_TIEABLE_P
1959 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1960
1961 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1962 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1963 rs6000_hard_regno_call_part_clobbered
1964
1965 #undef TARGET_SLOW_UNALIGNED_ACCESS
1966 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1967
1968 #undef TARGET_CAN_CHANGE_MODE_CLASS
1969 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1970
1971 #undef TARGET_CONSTANT_ALIGNMENT
1972 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1973
1974 #undef TARGET_STARTING_FRAME_OFFSET
1975 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1976
1977 #if TARGET_ELF && RS6000_WEAK
1978 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1979 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1980 #endif
1981 \f
1982
1983 /* Processor table. */
1984 struct rs6000_ptt
1985 {
1986 const char *const name; /* Canonical processor name. */
1987 const enum processor_type processor; /* Processor type enum value. */
1988 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1989 };
1990
1991 static struct rs6000_ptt const processor_target_table[] =
1992 {
1993 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1994 #include "rs6000-cpus.def"
1995 #undef RS6000_CPU
1996 };
1997
1998 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1999 name is invalid. */
2000
2001 static int
2002 rs6000_cpu_name_lookup (const char *name)
2003 {
2004 size_t i;
2005
2006 if (name != NULL)
2007 {
2008 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2009 if (! strcmp (name, processor_target_table[i].name))
2010 return (int)i;
2011 }
2012
2013 return -1;
2014 }
2015
2016 \f
2017 /* Return number of consecutive hard regs needed starting at reg REGNO
2018 to hold something of mode MODE.
2019 This is ordinarily the length in words of a value of mode MODE
2020 but can be less for certain modes in special long registers.
2021
2022 POWER and PowerPC GPRs hold 32 bits worth;
2023 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2024
2025 static int
2026 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2027 {
2028 unsigned HOST_WIDE_INT reg_size;
2029
2030 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2031 128-bit floating point that can go in vector registers, which has VSX
2032 memory addressing. */
2033 if (FP_REGNO_P (regno))
2034 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2035 ? UNITS_PER_VSX_WORD
2036 : UNITS_PER_FP_WORD);
2037
2038 else if (ALTIVEC_REGNO_P (regno))
2039 reg_size = UNITS_PER_ALTIVEC_WORD;
2040
2041 else
2042 reg_size = UNITS_PER_WORD;
2043
2044 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2045 }
2046
2047 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2048 MODE. */
2049 static int
2050 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2051 {
2052 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2053
2054 if (COMPLEX_MODE_P (mode))
2055 mode = GET_MODE_INNER (mode);
2056
2057 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2058 register combinations, and use PTImode where we need to deal with quad
2059 word memory operations. Don't allow quad words in the argument or frame
2060 pointer registers, just registers 0..31. */
2061 if (mode == PTImode)
2062 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2063 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2064 && ((regno & 1) == 0));
2065
2066 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2067 implementations. Don't allow an item to be split between a FP register
2068 and an Altivec register. Allow TImode in all VSX registers if the user
2069 asked for it. */
2070 if (TARGET_VSX && VSX_REGNO_P (regno)
2071 && (VECTOR_MEM_VSX_P (mode)
2072 || FLOAT128_VECTOR_P (mode)
2073 || reg_addr[mode].scalar_in_vmx_p
2074 || mode == TImode
2075 || (TARGET_VADDUQM && mode == V1TImode)))
2076 {
2077 if (FP_REGNO_P (regno))
2078 return FP_REGNO_P (last_regno);
2079
2080 if (ALTIVEC_REGNO_P (regno))
2081 {
2082 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2083 return 0;
2084
2085 return ALTIVEC_REGNO_P (last_regno);
2086 }
2087 }
2088
2089 /* The GPRs can hold any mode, but values bigger than one register
2090 cannot go past R31. */
2091 if (INT_REGNO_P (regno))
2092 return INT_REGNO_P (last_regno);
2093
2094 /* The float registers (except for VSX vector modes) can only hold floating
2095 modes and DImode. */
2096 if (FP_REGNO_P (regno))
2097 {
2098 if (FLOAT128_VECTOR_P (mode))
2099 return false;
2100
2101 if (SCALAR_FLOAT_MODE_P (mode)
2102 && (mode != TDmode || (regno % 2) == 0)
2103 && FP_REGNO_P (last_regno))
2104 return 1;
2105
2106 if (GET_MODE_CLASS (mode) == MODE_INT)
2107 {
2108 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2109 return 1;
2110
2111 if (TARGET_P8_VECTOR && (mode == SImode))
2112 return 1;
2113
2114 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2115 return 1;
2116 }
2117
2118 return 0;
2119 }
2120
2121 /* The CR register can only hold CC modes. */
2122 if (CR_REGNO_P (regno))
2123 return GET_MODE_CLASS (mode) == MODE_CC;
2124
2125 if (CA_REGNO_P (regno))
2126 return mode == Pmode || mode == SImode;
2127
2128 /* AltiVec only in AldyVec registers. */
2129 if (ALTIVEC_REGNO_P (regno))
2130 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2131 || mode == V1TImode);
2132
2133 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2134 and it must be able to fit within the register set. */
2135
2136 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2137 }
2138
2139 /* Implement TARGET_HARD_REGNO_NREGS. */
2140
2141 static unsigned int
2142 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2143 {
2144 return rs6000_hard_regno_nregs[mode][regno];
2145 }
2146
2147 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2148
2149 static bool
2150 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2151 {
2152 return rs6000_hard_regno_mode_ok_p[mode][regno];
2153 }
2154
2155 /* Implement TARGET_MODES_TIEABLE_P.
2156
2157 PTImode cannot tie with other modes because PTImode is restricted to even
2158 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2159 57744).
2160
2161 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2162 128-bit floating point on VSX systems ties with other vectors. */
2163
2164 static bool
2165 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2166 {
2167 if (mode1 == PTImode)
2168 return mode2 == PTImode;
2169 if (mode2 == PTImode)
2170 return false;
2171
2172 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2173 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2174 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2175 return false;
2176
2177 if (SCALAR_FLOAT_MODE_P (mode1))
2178 return SCALAR_FLOAT_MODE_P (mode2);
2179 if (SCALAR_FLOAT_MODE_P (mode2))
2180 return false;
2181
2182 if (GET_MODE_CLASS (mode1) == MODE_CC)
2183 return GET_MODE_CLASS (mode2) == MODE_CC;
2184 if (GET_MODE_CLASS (mode2) == MODE_CC)
2185 return false;
2186
2187 return true;
2188 }
2189
2190 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2191
2192 static bool
2193 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2194 {
2195 if (TARGET_32BIT
2196 && TARGET_POWERPC64
2197 && GET_MODE_SIZE (mode) > 4
2198 && INT_REGNO_P (regno))
2199 return true;
2200
2201 if (TARGET_VSX
2202 && FP_REGNO_P (regno)
2203 && GET_MODE_SIZE (mode) > 8
2204 && !FLOAT128_2REG_P (mode))
2205 return true;
2206
2207 return false;
2208 }
2209
2210 /* Print interesting facts about registers. */
2211 static void
2212 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2213 {
2214 int r, m;
2215
2216 for (r = first_regno; r <= last_regno; ++r)
2217 {
2218 const char *comma = "";
2219 int len;
2220
2221 if (first_regno == last_regno)
2222 fprintf (stderr, "%s:\t", reg_name);
2223 else
2224 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2225
2226 len = 8;
2227 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2228 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2229 {
2230 if (len > 70)
2231 {
2232 fprintf (stderr, ",\n\t");
2233 len = 8;
2234 comma = "";
2235 }
2236
2237 if (rs6000_hard_regno_nregs[m][r] > 1)
2238 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2239 rs6000_hard_regno_nregs[m][r]);
2240 else
2241 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2242
2243 comma = ", ";
2244 }
2245
2246 if (call_used_regs[r])
2247 {
2248 if (len > 70)
2249 {
2250 fprintf (stderr, ",\n\t");
2251 len = 8;
2252 comma = "";
2253 }
2254
2255 len += fprintf (stderr, "%s%s", comma, "call-used");
2256 comma = ", ";
2257 }
2258
2259 if (fixed_regs[r])
2260 {
2261 if (len > 70)
2262 {
2263 fprintf (stderr, ",\n\t");
2264 len = 8;
2265 comma = "";
2266 }
2267
2268 len += fprintf (stderr, "%s%s", comma, "fixed");
2269 comma = ", ";
2270 }
2271
2272 if (len > 70)
2273 {
2274 fprintf (stderr, ",\n\t");
2275 comma = "";
2276 }
2277
2278 len += fprintf (stderr, "%sreg-class = %s", comma,
2279 reg_class_names[(int)rs6000_regno_regclass[r]]);
2280 comma = ", ";
2281
2282 if (len > 70)
2283 {
2284 fprintf (stderr, ",\n\t");
2285 comma = "";
2286 }
2287
2288 fprintf (stderr, "%sregno = %d\n", comma, r);
2289 }
2290 }
2291
2292 static const char *
2293 rs6000_debug_vector_unit (enum rs6000_vector v)
2294 {
2295 const char *ret;
2296
2297 switch (v)
2298 {
2299 case VECTOR_NONE: ret = "none"; break;
2300 case VECTOR_ALTIVEC: ret = "altivec"; break;
2301 case VECTOR_VSX: ret = "vsx"; break;
2302 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2303 default: ret = "unknown"; break;
2304 }
2305
2306 return ret;
2307 }
2308
2309 /* Inner function printing just the address mask for a particular reload
2310 register class. */
2311 DEBUG_FUNCTION char *
2312 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2313 {
2314 static char ret[8];
2315 char *p = ret;
2316
2317 if ((mask & RELOAD_REG_VALID) != 0)
2318 *p++ = 'v';
2319 else if (keep_spaces)
2320 *p++ = ' ';
2321
2322 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2323 *p++ = 'm';
2324 else if (keep_spaces)
2325 *p++ = ' ';
2326
2327 if ((mask & RELOAD_REG_INDEXED) != 0)
2328 *p++ = 'i';
2329 else if (keep_spaces)
2330 *p++ = ' ';
2331
2332 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2333 *p++ = 'O';
2334 else if ((mask & RELOAD_REG_OFFSET) != 0)
2335 *p++ = 'o';
2336 else if (keep_spaces)
2337 *p++ = ' ';
2338
2339 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2340 *p++ = '+';
2341 else if (keep_spaces)
2342 *p++ = ' ';
2343
2344 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2345 *p++ = '+';
2346 else if (keep_spaces)
2347 *p++ = ' ';
2348
2349 if ((mask & RELOAD_REG_AND_M16) != 0)
2350 *p++ = '&';
2351 else if (keep_spaces)
2352 *p++ = ' ';
2353
2354 *p = '\0';
2355
2356 return ret;
2357 }
2358
2359 /* Print the address masks in a human readble fashion. */
2360 DEBUG_FUNCTION void
2361 rs6000_debug_print_mode (ssize_t m)
2362 {
2363 ssize_t rc;
2364 int spaces = 0;
2365
2366 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2367 for (rc = 0; rc < N_RELOAD_REG; rc++)
2368 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2369 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2370
2371 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2372 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2373 {
2374 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2375 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2376 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2377 spaces = 0;
2378 }
2379 else
2380 spaces += sizeof (" Reload=sl") - 1;
2381
2382 if (reg_addr[m].scalar_in_vmx_p)
2383 {
2384 fprintf (stderr, "%*s Upper=y", spaces, "");
2385 spaces = 0;
2386 }
2387 else
2388 spaces += sizeof (" Upper=y") - 1;
2389
2390 if (rs6000_vector_unit[m] != VECTOR_NONE
2391 || rs6000_vector_mem[m] != VECTOR_NONE)
2392 {
2393 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2394 spaces, "",
2395 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2396 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2397 }
2398
2399 fputs ("\n", stderr);
2400 }
2401
2402 #define DEBUG_FMT_ID "%-32s= "
2403 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2404 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2405 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2406
2407 /* Print various interesting information with -mdebug=reg. */
2408 static void
2409 rs6000_debug_reg_global (void)
2410 {
2411 static const char *const tf[2] = { "false", "true" };
2412 const char *nl = (const char *)0;
2413 int m;
2414 size_t m1, m2, v;
2415 char costly_num[20];
2416 char nop_num[20];
2417 char flags_buffer[40];
2418 const char *costly_str;
2419 const char *nop_str;
2420 const char *trace_str;
2421 const char *abi_str;
2422 const char *cmodel_str;
2423 struct cl_target_option cl_opts;
2424
2425 /* Modes we want tieable information on. */
2426 static const machine_mode print_tieable_modes[] = {
2427 QImode,
2428 HImode,
2429 SImode,
2430 DImode,
2431 TImode,
2432 PTImode,
2433 SFmode,
2434 DFmode,
2435 TFmode,
2436 IFmode,
2437 KFmode,
2438 SDmode,
2439 DDmode,
2440 TDmode,
2441 V16QImode,
2442 V8HImode,
2443 V4SImode,
2444 V2DImode,
2445 V1TImode,
2446 V32QImode,
2447 V16HImode,
2448 V8SImode,
2449 V4DImode,
2450 V2TImode,
2451 V4SFmode,
2452 V2DFmode,
2453 V8SFmode,
2454 V4DFmode,
2455 CCmode,
2456 CCUNSmode,
2457 CCEQmode,
2458 };
2459
2460 /* Virtual regs we are interested in. */
2461 const static struct {
2462 int regno; /* register number. */
2463 const char *name; /* register name. */
2464 } virtual_regs[] = {
2465 { STACK_POINTER_REGNUM, "stack pointer:" },
2466 { TOC_REGNUM, "toc: " },
2467 { STATIC_CHAIN_REGNUM, "static chain: " },
2468 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2469 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2470 { ARG_POINTER_REGNUM, "arg pointer: " },
2471 { FRAME_POINTER_REGNUM, "frame pointer:" },
2472 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2473 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2474 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2475 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2476 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2477 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2478 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2479 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2480 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2481 };
2482
2483 fputs ("\nHard register information:\n", stderr);
2484 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2485 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2486 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2487 LAST_ALTIVEC_REGNO,
2488 "vs");
2489 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2490 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2491 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2492 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2493 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2494 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2495
2496 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2497 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2498 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2499
2500 fprintf (stderr,
2501 "\n"
2502 "d reg_class = %s\n"
2503 "f reg_class = %s\n"
2504 "v reg_class = %s\n"
2505 "wa reg_class = %s\n"
2506 "wb reg_class = %s\n"
2507 "wd reg_class = %s\n"
2508 "we reg_class = %s\n"
2509 "wf reg_class = %s\n"
2510 "wg reg_class = %s\n"
2511 "wh reg_class = %s\n"
2512 "wi reg_class = %s\n"
2513 "wj reg_class = %s\n"
2514 "wk reg_class = %s\n"
2515 "wl reg_class = %s\n"
2516 "wm reg_class = %s\n"
2517 "wo reg_class = %s\n"
2518 "wp reg_class = %s\n"
2519 "wq reg_class = %s\n"
2520 "wr reg_class = %s\n"
2521 "ws reg_class = %s\n"
2522 "wt reg_class = %s\n"
2523 "wu reg_class = %s\n"
2524 "wv reg_class = %s\n"
2525 "ww reg_class = %s\n"
2526 "wx reg_class = %s\n"
2527 "wy reg_class = %s\n"
2528 "wz reg_class = %s\n"
2529 "wA reg_class = %s\n"
2530 "wH reg_class = %s\n"
2531 "wI reg_class = %s\n"
2532 "wJ reg_class = %s\n"
2533 "wK reg_class = %s\n"
2534 "\n",
2535 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2536 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2537 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2538 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2539 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2540 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2541 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2567
2568 nl = "\n";
2569 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2570 rs6000_debug_print_mode (m);
2571
2572 fputs ("\n", stderr);
2573
2574 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2575 {
2576 machine_mode mode1 = print_tieable_modes[m1];
2577 bool first_time = true;
2578
2579 nl = (const char *)0;
2580 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2581 {
2582 machine_mode mode2 = print_tieable_modes[m2];
2583 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2584 {
2585 if (first_time)
2586 {
2587 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2588 nl = "\n";
2589 first_time = false;
2590 }
2591
2592 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2593 }
2594 }
2595
2596 if (!first_time)
2597 fputs ("\n", stderr);
2598 }
2599
2600 if (nl)
2601 fputs (nl, stderr);
2602
2603 if (rs6000_recip_control)
2604 {
2605 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2606
2607 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2608 if (rs6000_recip_bits[m])
2609 {
2610 fprintf (stderr,
2611 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2612 GET_MODE_NAME (m),
2613 (RS6000_RECIP_AUTO_RE_P (m)
2614 ? "auto"
2615 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2616 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2617 ? "auto"
2618 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2619 }
2620
2621 fputs ("\n", stderr);
2622 }
2623
2624 if (rs6000_cpu_index >= 0)
2625 {
2626 const char *name = processor_target_table[rs6000_cpu_index].name;
2627 HOST_WIDE_INT flags
2628 = processor_target_table[rs6000_cpu_index].target_enable;
2629
2630 sprintf (flags_buffer, "-mcpu=%s flags", name);
2631 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2632 }
2633 else
2634 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2635
2636 if (rs6000_tune_index >= 0)
2637 {
2638 const char *name = processor_target_table[rs6000_tune_index].name;
2639 HOST_WIDE_INT flags
2640 = processor_target_table[rs6000_tune_index].target_enable;
2641
2642 sprintf (flags_buffer, "-mtune=%s flags", name);
2643 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2644 }
2645 else
2646 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2647
2648 cl_target_option_save (&cl_opts, &global_options);
2649 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2650 rs6000_isa_flags);
2651
2652 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2653 rs6000_isa_flags_explicit);
2654
2655 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2656 rs6000_builtin_mask);
2657
2658 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2659
2660 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2661 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2662
2663 switch (rs6000_sched_costly_dep)
2664 {
2665 case max_dep_latency:
2666 costly_str = "max_dep_latency";
2667 break;
2668
2669 case no_dep_costly:
2670 costly_str = "no_dep_costly";
2671 break;
2672
2673 case all_deps_costly:
2674 costly_str = "all_deps_costly";
2675 break;
2676
2677 case true_store_to_load_dep_costly:
2678 costly_str = "true_store_to_load_dep_costly";
2679 break;
2680
2681 case store_to_load_dep_costly:
2682 costly_str = "store_to_load_dep_costly";
2683 break;
2684
2685 default:
2686 costly_str = costly_num;
2687 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2688 break;
2689 }
2690
2691 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2692
2693 switch (rs6000_sched_insert_nops)
2694 {
2695 case sched_finish_regroup_exact:
2696 nop_str = "sched_finish_regroup_exact";
2697 break;
2698
2699 case sched_finish_pad_groups:
2700 nop_str = "sched_finish_pad_groups";
2701 break;
2702
2703 case sched_finish_none:
2704 nop_str = "sched_finish_none";
2705 break;
2706
2707 default:
2708 nop_str = nop_num;
2709 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2710 break;
2711 }
2712
2713 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2714
2715 switch (rs6000_sdata)
2716 {
2717 default:
2718 case SDATA_NONE:
2719 break;
2720
2721 case SDATA_DATA:
2722 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2723 break;
2724
2725 case SDATA_SYSV:
2726 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2727 break;
2728
2729 case SDATA_EABI:
2730 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2731 break;
2732
2733 }
2734
2735 switch (rs6000_traceback)
2736 {
2737 case traceback_default: trace_str = "default"; break;
2738 case traceback_none: trace_str = "none"; break;
2739 case traceback_part: trace_str = "part"; break;
2740 case traceback_full: trace_str = "full"; break;
2741 default: trace_str = "unknown"; break;
2742 }
2743
2744 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2745
2746 switch (rs6000_current_cmodel)
2747 {
2748 case CMODEL_SMALL: cmodel_str = "small"; break;
2749 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2750 case CMODEL_LARGE: cmodel_str = "large"; break;
2751 default: cmodel_str = "unknown"; break;
2752 }
2753
2754 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2755
2756 switch (rs6000_current_abi)
2757 {
2758 case ABI_NONE: abi_str = "none"; break;
2759 case ABI_AIX: abi_str = "aix"; break;
2760 case ABI_ELFv2: abi_str = "ELFv2"; break;
2761 case ABI_V4: abi_str = "V4"; break;
2762 case ABI_DARWIN: abi_str = "darwin"; break;
2763 default: abi_str = "unknown"; break;
2764 }
2765
2766 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2767
2768 if (rs6000_altivec_abi)
2769 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2770
2771 if (rs6000_darwin64_abi)
2772 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2773
2774 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2775 (TARGET_SOFT_FLOAT ? "true" : "false"));
2776
2777 if (TARGET_LINK_STACK)
2778 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2779
2780 if (TARGET_P8_FUSION)
2781 {
2782 char options[80];
2783
2784 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2785 if (TARGET_P8_FUSION_SIGN)
2786 strcat (options, ", sign");
2787
2788 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2789 }
2790
2791 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2792 TARGET_SECURE_PLT ? "secure" : "bss");
2793 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2794 aix_struct_return ? "aix" : "sysv");
2795 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2796 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2797 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2798 tf[!!rs6000_align_branch_targets]);
2799 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2800 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2801 rs6000_long_double_type_size);
2802 if (rs6000_long_double_type_size > 64)
2803 {
2804 fprintf (stderr, DEBUG_FMT_S, "long double type",
2805 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2806 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2807 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2808 }
2809 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2810 (int)rs6000_sched_restricted_insns_priority);
2811 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2812 (int)END_BUILTINS);
2813 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2814 (int)RS6000_BUILTIN_COUNT);
2815
2816 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2817 (int)TARGET_FLOAT128_ENABLE_TYPE);
2818
2819 if (TARGET_VSX)
2820 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2821 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2822
2823 if (TARGET_DIRECT_MOVE_128)
2824 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2825 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2826 }
2827
2828 \f
2829 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2830 legitimate address support to figure out the appropriate addressing to
2831 use. */
2832
2833 static void
2834 rs6000_setup_reg_addr_masks (void)
2835 {
2836 ssize_t rc, reg, m, nregs;
2837 addr_mask_type any_addr_mask, addr_mask;
2838
2839 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2840 {
2841 machine_mode m2 = (machine_mode) m;
2842 bool complex_p = false;
2843 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2844 size_t msize;
2845
2846 if (COMPLEX_MODE_P (m2))
2847 {
2848 complex_p = true;
2849 m2 = GET_MODE_INNER (m2);
2850 }
2851
2852 msize = GET_MODE_SIZE (m2);
2853
2854 /* SDmode is special in that we want to access it only via REG+REG
2855 addressing on power7 and above, since we want to use the LFIWZX and
2856 STFIWZX instructions to load it. */
2857 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2858
2859 any_addr_mask = 0;
2860 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2861 {
2862 addr_mask = 0;
2863 reg = reload_reg_map[rc].reg;
2864
2865 /* Can mode values go in the GPR/FPR/Altivec registers? */
2866 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2867 {
2868 bool small_int_vsx_p = (small_int_p
2869 && (rc == RELOAD_REG_FPR
2870 || rc == RELOAD_REG_VMX));
2871
2872 nregs = rs6000_hard_regno_nregs[m][reg];
2873 addr_mask |= RELOAD_REG_VALID;
2874
2875 /* Indicate if the mode takes more than 1 physical register. If
2876 it takes a single register, indicate it can do REG+REG
2877 addressing. Small integers in VSX registers can only do
2878 REG+REG addressing. */
2879 if (small_int_vsx_p)
2880 addr_mask |= RELOAD_REG_INDEXED;
2881 else if (nregs > 1 || m == BLKmode || complex_p)
2882 addr_mask |= RELOAD_REG_MULTIPLE;
2883 else
2884 addr_mask |= RELOAD_REG_INDEXED;
2885
2886 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2887 addressing. If we allow scalars into Altivec registers,
2888 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2889
2890 For VSX systems, we don't allow update addressing for
2891 DFmode/SFmode if those registers can go in both the
2892 traditional floating point registers and Altivec registers.
2893 The load/store instructions for the Altivec registers do not
2894 have update forms. If we allowed update addressing, it seems
2895 to break IV-OPT code using floating point if the index type is
2896 int instead of long (PR target/81550 and target/84042). */
2897
2898 if (TARGET_UPDATE
2899 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2900 && msize <= 8
2901 && !VECTOR_MODE_P (m2)
2902 && !FLOAT128_VECTOR_P (m2)
2903 && !complex_p
2904 && (m != E_DFmode || !TARGET_VSX)
2905 && (m != E_SFmode || !TARGET_P8_VECTOR)
2906 && !small_int_vsx_p)
2907 {
2908 addr_mask |= RELOAD_REG_PRE_INCDEC;
2909
2910 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2911 we don't allow PRE_MODIFY for some multi-register
2912 operations. */
2913 switch (m)
2914 {
2915 default:
2916 addr_mask |= RELOAD_REG_PRE_MODIFY;
2917 break;
2918
2919 case E_DImode:
2920 if (TARGET_POWERPC64)
2921 addr_mask |= RELOAD_REG_PRE_MODIFY;
2922 break;
2923
2924 case E_DFmode:
2925 case E_DDmode:
2926 if (TARGET_HARD_FLOAT)
2927 addr_mask |= RELOAD_REG_PRE_MODIFY;
2928 break;
2929 }
2930 }
2931 }
2932
2933 /* GPR and FPR registers can do REG+OFFSET addressing, except
2934 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2935 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2936 if ((addr_mask != 0) && !indexed_only_p
2937 && msize <= 8
2938 && (rc == RELOAD_REG_GPR
2939 || ((msize == 8 || m2 == SFmode)
2940 && (rc == RELOAD_REG_FPR
2941 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2942 addr_mask |= RELOAD_REG_OFFSET;
2943
2944 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2945 instructions are enabled. The offset for 128-bit VSX registers is
2946 only 12-bits. While GPRs can handle the full offset range, VSX
2947 registers can only handle the restricted range. */
2948 else if ((addr_mask != 0) && !indexed_only_p
2949 && msize == 16 && TARGET_P9_VECTOR
2950 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2951 || (m2 == TImode && TARGET_VSX)))
2952 {
2953 addr_mask |= RELOAD_REG_OFFSET;
2954 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2955 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2956 }
2957
2958 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2959 addressing on 128-bit types. */
2960 if (rc == RELOAD_REG_VMX && msize == 16
2961 && (addr_mask & RELOAD_REG_VALID) != 0)
2962 addr_mask |= RELOAD_REG_AND_M16;
2963
2964 reg_addr[m].addr_mask[rc] = addr_mask;
2965 any_addr_mask |= addr_mask;
2966 }
2967
2968 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2969 }
2970 }
2971
2972 \f
2973 /* Initialize the various global tables that are based on register size. */
2974 static void
2975 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2976 {
2977 ssize_t r, m, c;
2978 int align64;
2979 int align32;
2980
2981 /* Precalculate REGNO_REG_CLASS. */
2982 rs6000_regno_regclass[0] = GENERAL_REGS;
2983 for (r = 1; r < 32; ++r)
2984 rs6000_regno_regclass[r] = BASE_REGS;
2985
2986 for (r = 32; r < 64; ++r)
2987 rs6000_regno_regclass[r] = FLOAT_REGS;
2988
2989 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2990 rs6000_regno_regclass[r] = NO_REGS;
2991
2992 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2993 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2994
2995 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2996 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2997 rs6000_regno_regclass[r] = CR_REGS;
2998
2999 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3000 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3001 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3002 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3003 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3004 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3005 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3006 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3007 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3008 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3009
3010 /* Precalculate register class to simpler reload register class. We don't
3011 need all of the register classes that are combinations of different
3012 classes, just the simple ones that have constraint letters. */
3013 for (c = 0; c < N_REG_CLASSES; c++)
3014 reg_class_to_reg_type[c] = NO_REG_TYPE;
3015
3016 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3017 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3018 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3019 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3020 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3021 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3022 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3023 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3024 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3025 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3026
3027 if (TARGET_VSX)
3028 {
3029 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3030 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3031 }
3032 else
3033 {
3034 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3035 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3036 }
3037
3038 /* Precalculate the valid memory formats as well as the vector information,
3039 this must be set up before the rs6000_hard_regno_nregs_internal calls
3040 below. */
3041 gcc_assert ((int)VECTOR_NONE == 0);
3042 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3043 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3044
3045 gcc_assert ((int)CODE_FOR_nothing == 0);
3046 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3047
3048 gcc_assert ((int)NO_REGS == 0);
3049 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3050
3051 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3052 believes it can use native alignment or still uses 128-bit alignment. */
3053 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3054 {
3055 align64 = 64;
3056 align32 = 32;
3057 }
3058 else
3059 {
3060 align64 = 128;
3061 align32 = 128;
3062 }
3063
3064 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3065 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3066 if (TARGET_FLOAT128_TYPE)
3067 {
3068 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3069 rs6000_vector_align[KFmode] = 128;
3070
3071 if (FLOAT128_IEEE_P (TFmode))
3072 {
3073 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3074 rs6000_vector_align[TFmode] = 128;
3075 }
3076 }
3077
3078 /* V2DF mode, VSX only. */
3079 if (TARGET_VSX)
3080 {
3081 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3082 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3083 rs6000_vector_align[V2DFmode] = align64;
3084 }
3085
3086 /* V4SF mode, either VSX or Altivec. */
3087 if (TARGET_VSX)
3088 {
3089 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3090 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3091 rs6000_vector_align[V4SFmode] = align32;
3092 }
3093 else if (TARGET_ALTIVEC)
3094 {
3095 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3096 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3097 rs6000_vector_align[V4SFmode] = align32;
3098 }
3099
3100 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3101 and stores. */
3102 if (TARGET_ALTIVEC)
3103 {
3104 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3105 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3106 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3107 rs6000_vector_align[V4SImode] = align32;
3108 rs6000_vector_align[V8HImode] = align32;
3109 rs6000_vector_align[V16QImode] = align32;
3110
3111 if (TARGET_VSX)
3112 {
3113 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3114 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3115 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3116 }
3117 else
3118 {
3119 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3120 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3121 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3122 }
3123 }
3124
3125 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3126 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3127 if (TARGET_VSX)
3128 {
3129 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3130 rs6000_vector_unit[V2DImode]
3131 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3132 rs6000_vector_align[V2DImode] = align64;
3133
3134 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3135 rs6000_vector_unit[V1TImode]
3136 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3137 rs6000_vector_align[V1TImode] = 128;
3138 }
3139
3140 /* DFmode, see if we want to use the VSX unit. Memory is handled
3141 differently, so don't set rs6000_vector_mem. */
3142 if (TARGET_VSX)
3143 {
3144 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3145 rs6000_vector_align[DFmode] = 64;
3146 }
3147
3148 /* SFmode, see if we want to use the VSX unit. */
3149 if (TARGET_P8_VECTOR)
3150 {
3151 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3152 rs6000_vector_align[SFmode] = 32;
3153 }
3154
3155 /* Allow TImode in VSX register and set the VSX memory macros. */
3156 if (TARGET_VSX)
3157 {
3158 rs6000_vector_mem[TImode] = VECTOR_VSX;
3159 rs6000_vector_align[TImode] = align64;
3160 }
3161
3162 /* Register class constraints for the constraints that depend on compile
3163 switches. When the VSX code was added, different constraints were added
3164 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3165 of the VSX registers are used. The register classes for scalar floating
3166 point types is set, based on whether we allow that type into the upper
3167 (Altivec) registers. GCC has register classes to target the Altivec
3168 registers for load/store operations, to select using a VSX memory
3169 operation instead of the traditional floating point operation. The
3170 constraints are:
3171
3172 d - Register class to use with traditional DFmode instructions.
3173 f - Register class to use with traditional SFmode instructions.
3174 v - Altivec register.
3175 wa - Any VSX register.
3176 wc - Reserved to represent individual CR bits (used in LLVM).
3177 wd - Preferred register class for V2DFmode.
3178 wf - Preferred register class for V4SFmode.
3179 wg - Float register for power6x move insns.
3180 wh - FP register for direct move instructions.
3181 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3182 wj - FP or VSX register to hold 64-bit integers for direct moves.
3183 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3184 wl - Float register if we can do 32-bit signed int loads.
3185 wm - VSX register for ISA 2.07 direct move operations.
3186 wn - always NO_REGS.
3187 wr - GPR if 64-bit mode is permitted.
3188 ws - Register class to do ISA 2.06 DF operations.
3189 wt - VSX register for TImode in VSX registers.
3190 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3191 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3192 ww - Register class to do SF conversions in with VSX operations.
3193 wx - Float register if we can do 32-bit int stores.
3194 wy - Register class to do ISA 2.07 SF operations.
3195 wz - Float register if we can do 32-bit unsigned int loads.
3196 wH - Altivec register if SImode is allowed in VSX registers.
3197 wI - VSX register if SImode is allowed in VSX registers.
3198 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3199 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3200
3201 if (TARGET_HARD_FLOAT)
3202 {
3203 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3204 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3205 }
3206
3207 if (TARGET_VSX)
3208 {
3209 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3210 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3211 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3212 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3213 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3214 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3215 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3216 }
3217
3218 /* Add conditional constraints based on various options, to allow us to
3219 collapse multiple insn patterns. */
3220 if (TARGET_ALTIVEC)
3221 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3222
3223 if (TARGET_MFPGPR) /* DFmode */
3224 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3225
3226 if (TARGET_LFIWAX)
3227 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3228
3229 if (TARGET_DIRECT_MOVE)
3230 {
3231 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3232 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3233 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3234 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3235 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3236 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3237 }
3238
3239 if (TARGET_POWERPC64)
3240 {
3241 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3242 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3243 }
3244
3245 if (TARGET_P8_VECTOR) /* SFmode */
3246 {
3247 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3248 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3249 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3250 }
3251 else if (TARGET_VSX)
3252 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3253
3254 if (TARGET_STFIWX)
3255 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3256
3257 if (TARGET_LFIWZX)
3258 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3259
3260 if (TARGET_FLOAT128_TYPE)
3261 {
3262 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3263 if (FLOAT128_IEEE_P (TFmode))
3264 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3265 }
3266
3267 if (TARGET_P9_VECTOR)
3268 {
3269 /* Support for new D-form instructions. */
3270 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3271
3272 /* Support for ISA 3.0 (power9) vectors. */
3273 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3274 }
3275
3276 /* Support for new direct moves (ISA 3.0 + 64bit). */
3277 if (TARGET_DIRECT_MOVE_128)
3278 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3279
3280 /* Support small integers in VSX registers. */
3281 if (TARGET_P8_VECTOR)
3282 {
3283 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3284 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3285 if (TARGET_P9_VECTOR)
3286 {
3287 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3288 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3289 }
3290 }
3291
3292 /* Set up the reload helper and direct move functions. */
3293 if (TARGET_VSX || TARGET_ALTIVEC)
3294 {
3295 if (TARGET_64BIT)
3296 {
3297 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3298 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3299 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3300 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3301 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3302 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3303 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3304 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3305 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3306 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3307 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3308 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3309 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3310 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3311 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3312 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3313 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3314 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3315 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3316 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3317
3318 if (FLOAT128_VECTOR_P (KFmode))
3319 {
3320 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3321 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3322 }
3323
3324 if (FLOAT128_VECTOR_P (TFmode))
3325 {
3326 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3327 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3328 }
3329
3330 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3331 available. */
3332 if (TARGET_NO_SDMODE_STACK)
3333 {
3334 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3335 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3336 }
3337
3338 if (TARGET_VSX)
3339 {
3340 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3341 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3342 }
3343
3344 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3345 {
3346 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3347 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3348 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3349 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3350 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3351 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3352 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3353 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3354 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3355
3356 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3357 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3358 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3359 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3360 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3361 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3362 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3363 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3364 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3365
3366 if (FLOAT128_VECTOR_P (KFmode))
3367 {
3368 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3369 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3370 }
3371
3372 if (FLOAT128_VECTOR_P (TFmode))
3373 {
3374 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3375 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3376 }
3377 }
3378 }
3379 else
3380 {
3381 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3382 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3383 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3384 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3385 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3386 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3387 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3388 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3389 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3390 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3391 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3392 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3393 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3394 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3395 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3396 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3397 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3398 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3399 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3400 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3401
3402 if (FLOAT128_VECTOR_P (KFmode))
3403 {
3404 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3405 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3406 }
3407
3408 if (FLOAT128_IEEE_P (TFmode))
3409 {
3410 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3411 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3412 }
3413
3414 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3415 available. */
3416 if (TARGET_NO_SDMODE_STACK)
3417 {
3418 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3419 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3420 }
3421
3422 if (TARGET_VSX)
3423 {
3424 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3425 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3426 }
3427
3428 if (TARGET_DIRECT_MOVE)
3429 {
3430 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3431 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3432 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3433 }
3434 }
3435
3436 reg_addr[DFmode].scalar_in_vmx_p = true;
3437 reg_addr[DImode].scalar_in_vmx_p = true;
3438
3439 if (TARGET_P8_VECTOR)
3440 {
3441 reg_addr[SFmode].scalar_in_vmx_p = true;
3442 reg_addr[SImode].scalar_in_vmx_p = true;
3443
3444 if (TARGET_P9_VECTOR)
3445 {
3446 reg_addr[HImode].scalar_in_vmx_p = true;
3447 reg_addr[QImode].scalar_in_vmx_p = true;
3448 }
3449 }
3450 }
3451
3452 /* Precalculate HARD_REGNO_NREGS. */
3453 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3454 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3455 rs6000_hard_regno_nregs[m][r]
3456 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3457
3458 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3459 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3460 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3461 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3462 rs6000_hard_regno_mode_ok_p[m][r] = true;
3463
3464 /* Precalculate CLASS_MAX_NREGS sizes. */
3465 for (c = 0; c < LIM_REG_CLASSES; ++c)
3466 {
3467 int reg_size;
3468
3469 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3470 reg_size = UNITS_PER_VSX_WORD;
3471
3472 else if (c == ALTIVEC_REGS)
3473 reg_size = UNITS_PER_ALTIVEC_WORD;
3474
3475 else if (c == FLOAT_REGS)
3476 reg_size = UNITS_PER_FP_WORD;
3477
3478 else
3479 reg_size = UNITS_PER_WORD;
3480
3481 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3482 {
3483 machine_mode m2 = (machine_mode)m;
3484 int reg_size2 = reg_size;
3485
3486 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3487 in VSX. */
3488 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3489 reg_size2 = UNITS_PER_FP_WORD;
3490
3491 rs6000_class_max_nregs[m][c]
3492 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3493 }
3494 }
3495
3496 /* Calculate which modes to automatically generate code to use a the
3497 reciprocal divide and square root instructions. In the future, possibly
3498 automatically generate the instructions even if the user did not specify
3499 -mrecip. The older machines double precision reciprocal sqrt estimate is
3500 not accurate enough. */
3501 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3502 if (TARGET_FRES)
3503 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3504 if (TARGET_FRE)
3505 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3506 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3507 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3508 if (VECTOR_UNIT_VSX_P (V2DFmode))
3509 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3510
3511 if (TARGET_FRSQRTES)
3512 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3513 if (TARGET_FRSQRTE)
3514 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3515 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3516 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3517 if (VECTOR_UNIT_VSX_P (V2DFmode))
3518 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3519
3520 if (rs6000_recip_control)
3521 {
3522 if (!flag_finite_math_only)
3523 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3524 "-ffast-math");
3525 if (flag_trapping_math)
3526 warning (0, "%qs requires %qs or %qs", "-mrecip",
3527 "-fno-trapping-math", "-ffast-math");
3528 if (!flag_reciprocal_math)
3529 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3530 "-ffast-math");
3531 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3532 {
3533 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3534 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3535 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3536
3537 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3538 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3539 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3540
3541 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3542 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3543 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3544
3545 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3546 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3547 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3548
3549 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3550 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3551 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3552
3553 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3554 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3555 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3556
3557 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3558 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3559 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3560
3561 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3562 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3563 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3564 }
3565 }
3566
3567 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3568 legitimate address support to figure out the appropriate addressing to
3569 use. */
3570 rs6000_setup_reg_addr_masks ();
3571
3572 if (global_init_p || TARGET_DEBUG_TARGET)
3573 {
3574 if (TARGET_DEBUG_REG)
3575 rs6000_debug_reg_global ();
3576
3577 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3578 fprintf (stderr,
3579 "SImode variable mult cost = %d\n"
3580 "SImode constant mult cost = %d\n"
3581 "SImode short constant mult cost = %d\n"
3582 "DImode multipliciation cost = %d\n"
3583 "SImode division cost = %d\n"
3584 "DImode division cost = %d\n"
3585 "Simple fp operation cost = %d\n"
3586 "DFmode multiplication cost = %d\n"
3587 "SFmode division cost = %d\n"
3588 "DFmode division cost = %d\n"
3589 "cache line size = %d\n"
3590 "l1 cache size = %d\n"
3591 "l2 cache size = %d\n"
3592 "simultaneous prefetches = %d\n"
3593 "\n",
3594 rs6000_cost->mulsi,
3595 rs6000_cost->mulsi_const,
3596 rs6000_cost->mulsi_const9,
3597 rs6000_cost->muldi,
3598 rs6000_cost->divsi,
3599 rs6000_cost->divdi,
3600 rs6000_cost->fp,
3601 rs6000_cost->dmul,
3602 rs6000_cost->sdiv,
3603 rs6000_cost->ddiv,
3604 rs6000_cost->cache_line_size,
3605 rs6000_cost->l1_cache_size,
3606 rs6000_cost->l2_cache_size,
3607 rs6000_cost->simultaneous_prefetches);
3608 }
3609 }
3610
3611 #if TARGET_MACHO
3612 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3613
3614 static void
3615 darwin_rs6000_override_options (void)
3616 {
3617 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3618 off. */
3619 rs6000_altivec_abi = 1;
3620 TARGET_ALTIVEC_VRSAVE = 1;
3621 rs6000_current_abi = ABI_DARWIN;
3622
3623 if (DEFAULT_ABI == ABI_DARWIN
3624 && TARGET_64BIT)
3625 darwin_one_byte_bool = 1;
3626
3627 if (TARGET_64BIT && ! TARGET_POWERPC64)
3628 {
3629 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3630 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3631 }
3632 if (flag_mkernel)
3633 {
3634 rs6000_default_long_calls = 1;
3635 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3636 }
3637
3638 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3639 Altivec. */
3640 if (!flag_mkernel && !flag_apple_kext
3641 && TARGET_64BIT
3642 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3643 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3644
3645 /* Unless the user (not the configurer) has explicitly overridden
3646 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3647 G4 unless targeting the kernel. */
3648 if (!flag_mkernel
3649 && !flag_apple_kext
3650 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3651 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3652 && ! global_options_set.x_rs6000_cpu_index)
3653 {
3654 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3655 }
3656 }
3657 #endif
3658
3659 /* If not otherwise specified by a target, make 'long double' equivalent to
3660 'double'. */
3661
3662 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3663 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3664 #endif
3665
3666 /* Return the builtin mask of the various options used that could affect which
3667 builtins were used. In the past we used target_flags, but we've run out of
3668 bits, and some options are no longer in target_flags. */
3669
3670 HOST_WIDE_INT
3671 rs6000_builtin_mask_calculate (void)
3672 {
3673 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3674 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3675 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3676 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3677 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3678 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3679 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3680 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3681 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3682 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3683 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3684 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3685 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3686 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3687 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3688 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3689 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3690 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3691 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3692 | ((TARGET_LONG_DOUBLE_128
3693 && TARGET_HARD_FLOAT
3694 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3695 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3696 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3697 }
3698
3699 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3700 to clobber the XER[CA] bit because clobbering that bit without telling
3701 the compiler worked just fine with versions of GCC before GCC 5, and
3702 breaking a lot of older code in ways that are hard to track down is
3703 not such a great idea. */
3704
3705 static rtx_insn *
3706 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3707 vec<const char *> &/*constraints*/,
3708 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3709 {
3710 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3711 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3712 return NULL;
3713 }
3714
3715 /* Override command line options.
3716
3717 Combine build-specific configuration information with options
3718 specified on the command line to set various state variables which
3719 influence code generation, optimization, and expansion of built-in
3720 functions. Assure that command-line configuration preferences are
3721 compatible with each other and with the build configuration; issue
3722 warnings while adjusting configuration or error messages while
3723 rejecting configuration.
3724
3725 Upon entry to this function:
3726
3727 This function is called once at the beginning of
3728 compilation, and then again at the start and end of compiling
3729 each section of code that has a different configuration, as
3730 indicated, for example, by adding the
3731
3732 __attribute__((__target__("cpu=power9")))
3733
3734 qualifier to a function definition or, for example, by bracketing
3735 code between
3736
3737 #pragma GCC target("altivec")
3738
3739 and
3740
3741 #pragma GCC reset_options
3742
3743 directives. Parameter global_init_p is true for the initial
3744 invocation, which initializes global variables, and false for all
3745 subsequent invocations.
3746
3747
3748 Various global state information is assumed to be valid. This
3749 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3750 default CPU specified at build configure time, TARGET_DEFAULT,
3751 representing the default set of option flags for the default
3752 target, and global_options_set.x_rs6000_isa_flags, representing
3753 which options were requested on the command line.
3754
3755 Upon return from this function:
3756
3757 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3758 was set by name on the command line. Additionally, if certain
3759 attributes are automatically enabled or disabled by this function
3760 in order to assure compatibility between options and
3761 configuration, the flags associated with those attributes are
3762 also set. By setting these "explicit bits", we avoid the risk
3763 that other code might accidentally overwrite these particular
3764 attributes with "default values".
3765
3766 The various bits of rs6000_isa_flags are set to indicate the
3767 target options that have been selected for the most current
3768 compilation efforts. This has the effect of also turning on the
3769 associated TARGET_XXX values since these are macros which are
3770 generally defined to test the corresponding bit of the
3771 rs6000_isa_flags variable.
3772
3773 The variable rs6000_builtin_mask is set to represent the target
3774 options for the most current compilation efforts, consistent with
3775 the current contents of rs6000_isa_flags. This variable controls
3776 expansion of built-in functions.
3777
3778 Various other global variables and fields of global structures
3779 (over 50 in all) are initialized to reflect the desired options
3780 for the most current compilation efforts. */
3781
3782 static bool
3783 rs6000_option_override_internal (bool global_init_p)
3784 {
3785 bool ret = true;
3786
3787 HOST_WIDE_INT set_masks;
3788 HOST_WIDE_INT ignore_masks;
3789 int cpu_index = -1;
3790 int tune_index;
3791 struct cl_target_option *main_target_opt
3792 = ((global_init_p || target_option_default_node == NULL)
3793 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3794
3795 /* Print defaults. */
3796 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3797 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3798
3799 /* Remember the explicit arguments. */
3800 if (global_init_p)
3801 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3802
3803 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3804 library functions, so warn about it. The flag may be useful for
3805 performance studies from time to time though, so don't disable it
3806 entirely. */
3807 if (global_options_set.x_rs6000_alignment_flags
3808 && rs6000_alignment_flags == MASK_ALIGN_POWER
3809 && DEFAULT_ABI == ABI_DARWIN
3810 && TARGET_64BIT)
3811 warning (0, "%qs is not supported for 64-bit Darwin;"
3812 " it is incompatible with the installed C and C++ libraries",
3813 "-malign-power");
3814
3815 /* Numerous experiment shows that IRA based loop pressure
3816 calculation works better for RTL loop invariant motion on targets
3817 with enough (>= 32) registers. It is an expensive optimization.
3818 So it is on only for peak performance. */
3819 if (optimize >= 3 && global_init_p
3820 && !global_options_set.x_flag_ira_loop_pressure)
3821 flag_ira_loop_pressure = 1;
3822
3823 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3824 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3825 options were already specified. */
3826 if (flag_sanitize & SANITIZE_USER_ADDRESS
3827 && !global_options_set.x_flag_asynchronous_unwind_tables)
3828 flag_asynchronous_unwind_tables = 1;
3829
3830 /* Set the pointer size. */
3831 if (TARGET_64BIT)
3832 {
3833 rs6000_pmode = DImode;
3834 rs6000_pointer_size = 64;
3835 }
3836 else
3837 {
3838 rs6000_pmode = SImode;
3839 rs6000_pointer_size = 32;
3840 }
3841
3842 /* Some OSs don't support saving the high part of 64-bit registers on context
3843 switch. Other OSs don't support saving Altivec registers. On those OSs,
3844 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3845 if the user wants either, the user must explicitly specify them and we
3846 won't interfere with the user's specification. */
3847
3848 set_masks = POWERPC_MASKS;
3849 #ifdef OS_MISSING_POWERPC64
3850 if (OS_MISSING_POWERPC64)
3851 set_masks &= ~OPTION_MASK_POWERPC64;
3852 #endif
3853 #ifdef OS_MISSING_ALTIVEC
3854 if (OS_MISSING_ALTIVEC)
3855 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3856 | OTHER_VSX_VECTOR_MASKS);
3857 #endif
3858
3859 /* Don't override by the processor default if given explicitly. */
3860 set_masks &= ~rs6000_isa_flags_explicit;
3861
3862 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3863 the cpu in a target attribute or pragma, but did not specify a tuning
3864 option, use the cpu for the tuning option rather than the option specified
3865 with -mtune on the command line. Process a '--with-cpu' configuration
3866 request as an implicit --cpu. */
3867 if (rs6000_cpu_index >= 0)
3868 cpu_index = rs6000_cpu_index;
3869 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3870 cpu_index = main_target_opt->x_rs6000_cpu_index;
3871 else if (OPTION_TARGET_CPU_DEFAULT)
3872 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3873
3874 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3875 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3876 with those from the cpu, except for options that were explicitly set. If
3877 we don't have a cpu, do not override the target bits set in
3878 TARGET_DEFAULT. */
3879 if (cpu_index >= 0)
3880 {
3881 rs6000_cpu_index = cpu_index;
3882 rs6000_isa_flags &= ~set_masks;
3883 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3884 & set_masks);
3885 }
3886 else
3887 {
3888 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3889 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3890 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3891 to using rs6000_isa_flags, we need to do the initialization here.
3892
3893 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3894 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3895 HOST_WIDE_INT flags;
3896 if (TARGET_DEFAULT)
3897 flags = TARGET_DEFAULT;
3898 else
3899 {
3900 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3901 const char *default_cpu = (!TARGET_POWERPC64
3902 ? "powerpc"
3903 : (BYTES_BIG_ENDIAN
3904 ? "powerpc64"
3905 : "powerpc64le"));
3906 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3907 flags = processor_target_table[default_cpu_index].target_enable;
3908 }
3909 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3910 }
3911
3912 if (rs6000_tune_index >= 0)
3913 tune_index = rs6000_tune_index;
3914 else if (cpu_index >= 0)
3915 rs6000_tune_index = tune_index = cpu_index;
3916 else
3917 {
3918 size_t i;
3919 enum processor_type tune_proc
3920 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3921
3922 tune_index = -1;
3923 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3924 if (processor_target_table[i].processor == tune_proc)
3925 {
3926 tune_index = i;
3927 break;
3928 }
3929 }
3930
3931 if (cpu_index >= 0)
3932 rs6000_cpu = processor_target_table[cpu_index].processor;
3933 else
3934 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3935
3936 gcc_assert (tune_index >= 0);
3937 rs6000_tune = processor_target_table[tune_index].processor;
3938
3939 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3940 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3941 || rs6000_cpu == PROCESSOR_PPCE5500)
3942 {
3943 if (TARGET_ALTIVEC)
3944 error ("AltiVec not supported in this target");
3945 }
3946
3947 /* If we are optimizing big endian systems for space, use the load/store
3948 multiple instructions. */
3949 if (BYTES_BIG_ENDIAN && optimize_size)
3950 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3951
3952 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3953 because the hardware doesn't support the instructions used in little
3954 endian mode, and causes an alignment trap. The 750 does not cause an
3955 alignment trap (except when the target is unaligned). */
3956
3957 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3958 {
3959 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3960 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3961 warning (0, "%qs is not supported on little endian systems",
3962 "-mmultiple");
3963 }
3964
3965 /* If little-endian, default to -mstrict-align on older processors.
3966 Testing for htm matches power8 and later. */
3967 if (!BYTES_BIG_ENDIAN
3968 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3969 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3970
3971 if (!rs6000_fold_gimple)
3972 fprintf (stderr,
3973 "gimple folding of rs6000 builtins has been disabled.\n");
3974
3975 /* Add some warnings for VSX. */
3976 if (TARGET_VSX)
3977 {
3978 const char *msg = NULL;
3979 if (!TARGET_HARD_FLOAT)
3980 {
3981 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3982 msg = N_("-mvsx requires hardware floating point");
3983 else
3984 {
3985 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3986 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3987 }
3988 }
3989 else if (TARGET_AVOID_XFORM > 0)
3990 msg = N_("-mvsx needs indexed addressing");
3991 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3992 & OPTION_MASK_ALTIVEC))
3993 {
3994 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3995 msg = N_("-mvsx and -mno-altivec are incompatible");
3996 else
3997 msg = N_("-mno-altivec disables vsx");
3998 }
3999
4000 if (msg)
4001 {
4002 warning (0, msg);
4003 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4004 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4005 }
4006 }
4007
4008 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4009 the -mcpu setting to enable options that conflict. */
4010 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4011 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4012 | OPTION_MASK_ALTIVEC
4013 | OPTION_MASK_VSX)) != 0)
4014 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4015 | OPTION_MASK_DIRECT_MOVE)
4016 & ~rs6000_isa_flags_explicit);
4017
4018 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4019 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4020
4021 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4022 off all of the options that depend on those flags. */
4023 ignore_masks = rs6000_disable_incompatible_switches ();
4024
4025 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4026 unless the user explicitly used the -mno-<option> to disable the code. */
4027 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4028 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4029 else if (TARGET_P9_MINMAX)
4030 {
4031 if (cpu_index >= 0)
4032 {
4033 if (cpu_index == PROCESSOR_POWER9)
4034 {
4035 /* legacy behavior: allow -mcpu=power9 with certain
4036 capabilities explicitly disabled. */
4037 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4038 }
4039 else
4040 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4041 "for <xxx> less than power9", "-mcpu");
4042 }
4043 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4044 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4045 & rs6000_isa_flags_explicit))
4046 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4047 were explicitly cleared. */
4048 error ("%qs incompatible with explicitly disabled options",
4049 "-mpower9-minmax");
4050 else
4051 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4052 }
4053 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4054 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4055 else if (TARGET_VSX)
4056 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4057 else if (TARGET_POPCNTD)
4058 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4059 else if (TARGET_DFP)
4060 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4061 else if (TARGET_CMPB)
4062 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4063 else if (TARGET_FPRND)
4064 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4065 else if (TARGET_POPCNTB)
4066 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4067 else if (TARGET_ALTIVEC)
4068 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4069
4070 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4071 {
4072 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4073 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4074 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4075 }
4076
4077 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4078 {
4079 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4080 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4081 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4082 }
4083
4084 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4085 {
4086 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4087 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4088 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4089 }
4090
4091 if (TARGET_P8_VECTOR && !TARGET_VSX)
4092 {
4093 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4094 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4095 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4096 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4097 {
4098 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4099 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4100 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4101 }
4102 else
4103 {
4104 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4105 not explicit. */
4106 rs6000_isa_flags |= OPTION_MASK_VSX;
4107 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4108 }
4109 }
4110
4111 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4112 {
4113 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4114 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4115 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4116 }
4117
4118 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4119 silently turn off quad memory mode. */
4120 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4121 {
4122 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4123 warning (0, N_("-mquad-memory requires 64-bit mode"));
4124
4125 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4126 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4127
4128 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4129 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4130 }
4131
4132 /* Non-atomic quad memory load/store are disabled for little endian, since
4133 the words are reversed, but atomic operations can still be done by
4134 swapping the words. */
4135 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4136 {
4137 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4138 warning (0, N_("-mquad-memory is not available in little endian "
4139 "mode"));
4140
4141 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4142 }
4143
4144 /* Assume if the user asked for normal quad memory instructions, they want
4145 the atomic versions as well, unless they explicity told us not to use quad
4146 word atomic instructions. */
4147 if (TARGET_QUAD_MEMORY
4148 && !TARGET_QUAD_MEMORY_ATOMIC
4149 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4150 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4151
4152 /* If we can shrink-wrap the TOC register save separately, then use
4153 -msave-toc-indirect unless explicitly disabled. */
4154 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4155 && flag_shrink_wrap_separate
4156 && optimize_function_for_speed_p (cfun))
4157 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4158
4159 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4160 generating power8 instructions. */
4161 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4162 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4163 & OPTION_MASK_P8_FUSION);
4164
4165 /* Setting additional fusion flags turns on base fusion. */
4166 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4167 {
4168 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4169 {
4170 if (TARGET_P8_FUSION_SIGN)
4171 error ("%qs requires %qs", "-mpower8-fusion-sign",
4172 "-mpower8-fusion");
4173
4174 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4175 }
4176 else
4177 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4178 }
4179
4180 /* Power9 fusion is a superset over power8 fusion. */
4181 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4182 {
4183 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4184 {
4185 /* We prefer to not mention undocumented options in
4186 error messages. However, if users have managed to select
4187 power9-fusion without selecting power8-fusion, they
4188 already know about undocumented flags. */
4189 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4190 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4191 }
4192 else
4193 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4194 }
4195
4196 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4197 generating power9 instructions. */
4198 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4199 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4200 & OPTION_MASK_P9_FUSION);
4201
4202 /* Power8 does not fuse sign extended loads with the addis. If we are
4203 optimizing at high levels for speed, convert a sign extended load into a
4204 zero extending load, and an explicit sign extension. */
4205 if (TARGET_P8_FUSION
4206 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4207 && optimize_function_for_speed_p (cfun)
4208 && optimize >= 3)
4209 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4210
4211 /* ISA 3.0 vector instructions include ISA 2.07. */
4212 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4213 {
4214 /* We prefer to not mention undocumented options in
4215 error messages. However, if users have managed to select
4216 power9-vector without selecting power8-vector, they
4217 already know about undocumented flags. */
4218 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4219 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4220 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4221 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4222 {
4223 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4224 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4225 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4226 }
4227 else
4228 {
4229 /* OPTION_MASK_P9_VECTOR is explicit and
4230 OPTION_MASK_P8_VECTOR is not explicit. */
4231 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4232 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4233 }
4234 }
4235
4236 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4237 support. If we only have ISA 2.06 support, and the user did not specify
4238 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4239 but we don't enable the full vectorization support */
4240 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4241 TARGET_ALLOW_MOVMISALIGN = 1;
4242
4243 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4244 {
4245 if (TARGET_ALLOW_MOVMISALIGN > 0
4246 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4247 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4248
4249 TARGET_ALLOW_MOVMISALIGN = 0;
4250 }
4251
4252 /* Determine when unaligned vector accesses are permitted, and when
4253 they are preferred over masked Altivec loads. Note that if
4254 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4255 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4256 not true. */
4257 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4258 {
4259 if (!TARGET_VSX)
4260 {
4261 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4262 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4263
4264 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4265 }
4266
4267 else if (!TARGET_ALLOW_MOVMISALIGN)
4268 {
4269 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4270 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4271 "-mallow-movmisalign");
4272
4273 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4274 }
4275 }
4276
4277 /* Use long double size to select the appropriate long double. We use
4278 TYPE_PRECISION to differentiate the 3 different long double types. We map
4279 128 into the precision used for TFmode. */
4280 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4281 ? 64
4282 : FLOAT_PRECISION_TFmode);
4283
4284 /* Set long double size before the IEEE 128-bit tests. */
4285 if (!global_options_set.x_rs6000_long_double_type_size)
4286 {
4287 if (main_target_opt != NULL
4288 && (main_target_opt->x_rs6000_long_double_type_size
4289 != default_long_double_size))
4290 error ("target attribute or pragma changes long double size");
4291 else
4292 rs6000_long_double_type_size = default_long_double_size;
4293 }
4294 else if (rs6000_long_double_type_size == 128)
4295 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4296
4297 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4298 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4299 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4300 those systems will not pick up this default. Warn if the user changes the
4301 default unless -Wno-psabi. */
4302 if (!global_options_set.x_rs6000_ieeequad)
4303 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4304
4305 else if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4306 {
4307 static bool warned_change_long_double;
4308 if (!warned_change_long_double)
4309 {
4310 warned_change_long_double = true;
4311 if (TARGET_IEEEQUAD)
4312 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4313 else
4314 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4315 }
4316 }
4317
4318 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4319 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4320 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4321 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4322 the keyword as well as the type. */
4323 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4324
4325 /* IEEE 128-bit floating point requires VSX support. */
4326 if (TARGET_FLOAT128_KEYWORD)
4327 {
4328 if (!TARGET_VSX)
4329 {
4330 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4331 error ("%qs requires VSX support", "-mfloat128");
4332
4333 TARGET_FLOAT128_TYPE = 0;
4334 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4335 | OPTION_MASK_FLOAT128_HW);
4336 }
4337 else if (!TARGET_FLOAT128_TYPE)
4338 {
4339 TARGET_FLOAT128_TYPE = 1;
4340 warning (0, "The -mfloat128 option may not be fully supported");
4341 }
4342 }
4343
4344 /* Enable the __float128 keyword under Linux by default. */
4345 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4346 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4347 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4348
4349 /* If we have are supporting the float128 type and full ISA 3.0 support,
4350 enable -mfloat128-hardware by default. However, don't enable the
4351 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4352 because sometimes the compiler wants to put things in an integer
4353 container, and if we don't have __int128 support, it is impossible. */
4354 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4355 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4356 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4357 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4358
4359 if (TARGET_FLOAT128_HW
4360 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4361 {
4362 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4363 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4364
4365 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4366 }
4367
4368 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4369 {
4370 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4371 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4372
4373 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4374 }
4375
4376 /* Print the options after updating the defaults. */
4377 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4378 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4379
4380 /* E500mc does "better" if we inline more aggressively. Respect the
4381 user's opinion, though. */
4382 if (rs6000_block_move_inline_limit == 0
4383 && (rs6000_tune == PROCESSOR_PPCE500MC
4384 || rs6000_tune == PROCESSOR_PPCE500MC64
4385 || rs6000_tune == PROCESSOR_PPCE5500
4386 || rs6000_tune == PROCESSOR_PPCE6500))
4387 rs6000_block_move_inline_limit = 128;
4388
4389 /* store_one_arg depends on expand_block_move to handle at least the
4390 size of reg_parm_stack_space. */
4391 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4392 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4393
4394 if (global_init_p)
4395 {
4396 /* If the appropriate debug option is enabled, replace the target hooks
4397 with debug versions that call the real version and then prints
4398 debugging information. */
4399 if (TARGET_DEBUG_COST)
4400 {
4401 targetm.rtx_costs = rs6000_debug_rtx_costs;
4402 targetm.address_cost = rs6000_debug_address_cost;
4403 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4404 }
4405
4406 if (TARGET_DEBUG_ADDR)
4407 {
4408 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4409 targetm.legitimize_address = rs6000_debug_legitimize_address;
4410 rs6000_secondary_reload_class_ptr
4411 = rs6000_debug_secondary_reload_class;
4412 targetm.secondary_memory_needed
4413 = rs6000_debug_secondary_memory_needed;
4414 targetm.can_change_mode_class
4415 = rs6000_debug_can_change_mode_class;
4416 rs6000_preferred_reload_class_ptr
4417 = rs6000_debug_preferred_reload_class;
4418 rs6000_legitimize_reload_address_ptr
4419 = rs6000_debug_legitimize_reload_address;
4420 rs6000_mode_dependent_address_ptr
4421 = rs6000_debug_mode_dependent_address;
4422 }
4423
4424 if (rs6000_veclibabi_name)
4425 {
4426 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4427 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4428 else
4429 {
4430 error ("unknown vectorization library ABI type (%qs) for "
4431 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4432 ret = false;
4433 }
4434 }
4435 }
4436
4437 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4438 target attribute or pragma which automatically enables both options,
4439 unless the altivec ABI was set. This is set by default for 64-bit, but
4440 not for 32-bit. */
4441 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4442 {
4443 TARGET_FLOAT128_TYPE = 0;
4444 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4445 | OPTION_MASK_FLOAT128_KEYWORD)
4446 & ~rs6000_isa_flags_explicit);
4447 }
4448
4449 /* Enable Altivec ABI for AIX -maltivec. */
4450 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4451 {
4452 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4453 error ("target attribute or pragma changes AltiVec ABI");
4454 else
4455 rs6000_altivec_abi = 1;
4456 }
4457
4458 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4459 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4460 be explicitly overridden in either case. */
4461 if (TARGET_ELF)
4462 {
4463 if (!global_options_set.x_rs6000_altivec_abi
4464 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4465 {
4466 if (main_target_opt != NULL &&
4467 !main_target_opt->x_rs6000_altivec_abi)
4468 error ("target attribute or pragma changes AltiVec ABI");
4469 else
4470 rs6000_altivec_abi = 1;
4471 }
4472 }
4473
4474 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4475 So far, the only darwin64 targets are also MACH-O. */
4476 if (TARGET_MACHO
4477 && DEFAULT_ABI == ABI_DARWIN
4478 && TARGET_64BIT)
4479 {
4480 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4481 error ("target attribute or pragma changes darwin64 ABI");
4482 else
4483 {
4484 rs6000_darwin64_abi = 1;
4485 /* Default to natural alignment, for better performance. */
4486 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4487 }
4488 }
4489
4490 /* Place FP constants in the constant pool instead of TOC
4491 if section anchors enabled. */
4492 if (flag_section_anchors
4493 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4494 TARGET_NO_FP_IN_TOC = 1;
4495
4496 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4497 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4498
4499 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4500 SUBTARGET_OVERRIDE_OPTIONS;
4501 #endif
4502 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4503 SUBSUBTARGET_OVERRIDE_OPTIONS;
4504 #endif
4505 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4506 SUB3TARGET_OVERRIDE_OPTIONS;
4507 #endif
4508
4509 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4510 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4511
4512 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4513 && rs6000_tune != PROCESSOR_POWER5
4514 && rs6000_tune != PROCESSOR_POWER6
4515 && rs6000_tune != PROCESSOR_POWER7
4516 && rs6000_tune != PROCESSOR_POWER8
4517 && rs6000_tune != PROCESSOR_POWER9
4518 && rs6000_tune != PROCESSOR_PPCA2
4519 && rs6000_tune != PROCESSOR_CELL
4520 && rs6000_tune != PROCESSOR_PPC476);
4521 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4522 || rs6000_tune == PROCESSOR_POWER5
4523 || rs6000_tune == PROCESSOR_POWER7
4524 || rs6000_tune == PROCESSOR_POWER8);
4525 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4526 || rs6000_tune == PROCESSOR_POWER5
4527 || rs6000_tune == PROCESSOR_POWER6
4528 || rs6000_tune == PROCESSOR_POWER7
4529 || rs6000_tune == PROCESSOR_POWER8
4530 || rs6000_tune == PROCESSOR_POWER9
4531 || rs6000_tune == PROCESSOR_PPCE500MC
4532 || rs6000_tune == PROCESSOR_PPCE500MC64
4533 || rs6000_tune == PROCESSOR_PPCE5500
4534 || rs6000_tune == PROCESSOR_PPCE6500);
4535
4536 /* Allow debug switches to override the above settings. These are set to -1
4537 in rs6000.opt to indicate the user hasn't directly set the switch. */
4538 if (TARGET_ALWAYS_HINT >= 0)
4539 rs6000_always_hint = TARGET_ALWAYS_HINT;
4540
4541 if (TARGET_SCHED_GROUPS >= 0)
4542 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4543
4544 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4545 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4546
4547 rs6000_sched_restricted_insns_priority
4548 = (rs6000_sched_groups ? 1 : 0);
4549
4550 /* Handle -msched-costly-dep option. */
4551 rs6000_sched_costly_dep
4552 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4553
4554 if (rs6000_sched_costly_dep_str)
4555 {
4556 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4557 rs6000_sched_costly_dep = no_dep_costly;
4558 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4559 rs6000_sched_costly_dep = all_deps_costly;
4560 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4561 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4562 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4563 rs6000_sched_costly_dep = store_to_load_dep_costly;
4564 else
4565 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4566 atoi (rs6000_sched_costly_dep_str));
4567 }
4568
4569 /* Handle -minsert-sched-nops option. */
4570 rs6000_sched_insert_nops
4571 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4572
4573 if (rs6000_sched_insert_nops_str)
4574 {
4575 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4576 rs6000_sched_insert_nops = sched_finish_none;
4577 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4578 rs6000_sched_insert_nops = sched_finish_pad_groups;
4579 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4580 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4581 else
4582 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4583 atoi (rs6000_sched_insert_nops_str));
4584 }
4585
4586 /* Handle stack protector */
4587 if (!global_options_set.x_rs6000_stack_protector_guard)
4588 #ifdef TARGET_THREAD_SSP_OFFSET
4589 rs6000_stack_protector_guard = SSP_TLS;
4590 #else
4591 rs6000_stack_protector_guard = SSP_GLOBAL;
4592 #endif
4593
4594 #ifdef TARGET_THREAD_SSP_OFFSET
4595 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4596 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4597 #endif
4598
4599 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4600 {
4601 char *endp;
4602 const char *str = rs6000_stack_protector_guard_offset_str;
4603
4604 errno = 0;
4605 long offset = strtol (str, &endp, 0);
4606 if (!*str || *endp || errno)
4607 error ("%qs is not a valid number in %qs", str,
4608 "-mstack-protector-guard-offset=");
4609
4610 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4611 || (TARGET_64BIT && (offset & 3)))
4612 error ("%qs is not a valid offset in %qs", str,
4613 "-mstack-protector-guard-offset=");
4614
4615 rs6000_stack_protector_guard_offset = offset;
4616 }
4617
4618 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4619 {
4620 const char *str = rs6000_stack_protector_guard_reg_str;
4621 int reg = decode_reg_name (str);
4622
4623 if (!IN_RANGE (reg, 1, 31))
4624 error ("%qs is not a valid base register in %qs", str,
4625 "-mstack-protector-guard-reg=");
4626
4627 rs6000_stack_protector_guard_reg = reg;
4628 }
4629
4630 if (rs6000_stack_protector_guard == SSP_TLS
4631 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4632 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4633
4634 if (global_init_p)
4635 {
4636 #ifdef TARGET_REGNAMES
4637 /* If the user desires alternate register names, copy in the
4638 alternate names now. */
4639 if (TARGET_REGNAMES)
4640 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4641 #endif
4642
4643 /* Set aix_struct_return last, after the ABI is determined.
4644 If -maix-struct-return or -msvr4-struct-return was explicitly
4645 used, don't override with the ABI default. */
4646 if (!global_options_set.x_aix_struct_return)
4647 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4648
4649 #if 0
4650 /* IBM XL compiler defaults to unsigned bitfields. */
4651 if (TARGET_XL_COMPAT)
4652 flag_signed_bitfields = 0;
4653 #endif
4654
4655 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4656 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4657
4658 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4659
4660 /* We can only guarantee the availability of DI pseudo-ops when
4661 assembling for 64-bit targets. */
4662 if (!TARGET_64BIT)
4663 {
4664 targetm.asm_out.aligned_op.di = NULL;
4665 targetm.asm_out.unaligned_op.di = NULL;
4666 }
4667
4668
4669 /* Set branch target alignment, if not optimizing for size. */
4670 if (!optimize_size)
4671 {
4672 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4673 aligned 8byte to avoid misprediction by the branch predictor. */
4674 if (rs6000_tune == PROCESSOR_TITAN
4675 || rs6000_tune == PROCESSOR_CELL)
4676 {
4677 if (flag_align_functions && !str_align_functions)
4678 str_align_functions = "8";
4679 if (flag_align_jumps && !str_align_jumps)
4680 str_align_jumps = "8";
4681 if (flag_align_loops && !str_align_loops)
4682 str_align_loops = "8";
4683 }
4684 if (rs6000_align_branch_targets)
4685 {
4686 if (flag_align_functions && !str_align_functions)
4687 str_align_functions = "16";
4688 if (flag_align_jumps && !str_align_jumps)
4689 str_align_jumps = "16";
4690 if (flag_align_loops && !str_align_loops)
4691 {
4692 can_override_loop_align = 1;
4693 str_align_loops = "16";
4694 }
4695 }
4696
4697 if (flag_align_jumps && !str_align_jumps)
4698 str_align_jumps = "16";
4699 if (flag_align_loops && !str_align_loops)
4700 str_align_loops = "16";
4701 }
4702
4703 /* Arrange to save and restore machine status around nested functions. */
4704 init_machine_status = rs6000_init_machine_status;
4705
4706 /* We should always be splitting complex arguments, but we can't break
4707 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4708 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4709 targetm.calls.split_complex_arg = NULL;
4710
4711 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4712 if (DEFAULT_ABI == ABI_AIX)
4713 targetm.calls.custom_function_descriptors = 0;
4714 }
4715
4716 /* Initialize rs6000_cost with the appropriate target costs. */
4717 if (optimize_size)
4718 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4719 else
4720 switch (rs6000_tune)
4721 {
4722 case PROCESSOR_RS64A:
4723 rs6000_cost = &rs64a_cost;
4724 break;
4725
4726 case PROCESSOR_MPCCORE:
4727 rs6000_cost = &mpccore_cost;
4728 break;
4729
4730 case PROCESSOR_PPC403:
4731 rs6000_cost = &ppc403_cost;
4732 break;
4733
4734 case PROCESSOR_PPC405:
4735 rs6000_cost = &ppc405_cost;
4736 break;
4737
4738 case PROCESSOR_PPC440:
4739 rs6000_cost = &ppc440_cost;
4740 break;
4741
4742 case PROCESSOR_PPC476:
4743 rs6000_cost = &ppc476_cost;
4744 break;
4745
4746 case PROCESSOR_PPC601:
4747 rs6000_cost = &ppc601_cost;
4748 break;
4749
4750 case PROCESSOR_PPC603:
4751 rs6000_cost = &ppc603_cost;
4752 break;
4753
4754 case PROCESSOR_PPC604:
4755 rs6000_cost = &ppc604_cost;
4756 break;
4757
4758 case PROCESSOR_PPC604e:
4759 rs6000_cost = &ppc604e_cost;
4760 break;
4761
4762 case PROCESSOR_PPC620:
4763 rs6000_cost = &ppc620_cost;
4764 break;
4765
4766 case PROCESSOR_PPC630:
4767 rs6000_cost = &ppc630_cost;
4768 break;
4769
4770 case PROCESSOR_CELL:
4771 rs6000_cost = &ppccell_cost;
4772 break;
4773
4774 case PROCESSOR_PPC750:
4775 case PROCESSOR_PPC7400:
4776 rs6000_cost = &ppc750_cost;
4777 break;
4778
4779 case PROCESSOR_PPC7450:
4780 rs6000_cost = &ppc7450_cost;
4781 break;
4782
4783 case PROCESSOR_PPC8540:
4784 case PROCESSOR_PPC8548:
4785 rs6000_cost = &ppc8540_cost;
4786 break;
4787
4788 case PROCESSOR_PPCE300C2:
4789 case PROCESSOR_PPCE300C3:
4790 rs6000_cost = &ppce300c2c3_cost;
4791 break;
4792
4793 case PROCESSOR_PPCE500MC:
4794 rs6000_cost = &ppce500mc_cost;
4795 break;
4796
4797 case PROCESSOR_PPCE500MC64:
4798 rs6000_cost = &ppce500mc64_cost;
4799 break;
4800
4801 case PROCESSOR_PPCE5500:
4802 rs6000_cost = &ppce5500_cost;
4803 break;
4804
4805 case PROCESSOR_PPCE6500:
4806 rs6000_cost = &ppce6500_cost;
4807 break;
4808
4809 case PROCESSOR_TITAN:
4810 rs6000_cost = &titan_cost;
4811 break;
4812
4813 case PROCESSOR_POWER4:
4814 case PROCESSOR_POWER5:
4815 rs6000_cost = &power4_cost;
4816 break;
4817
4818 case PROCESSOR_POWER6:
4819 rs6000_cost = &power6_cost;
4820 break;
4821
4822 case PROCESSOR_POWER7:
4823 rs6000_cost = &power7_cost;
4824 break;
4825
4826 case PROCESSOR_POWER8:
4827 rs6000_cost = &power8_cost;
4828 break;
4829
4830 case PROCESSOR_POWER9:
4831 rs6000_cost = &power9_cost;
4832 break;
4833
4834 case PROCESSOR_PPCA2:
4835 rs6000_cost = &ppca2_cost;
4836 break;
4837
4838 default:
4839 gcc_unreachable ();
4840 }
4841
4842 if (global_init_p)
4843 {
4844 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4845 rs6000_cost->simultaneous_prefetches,
4846 global_options.x_param_values,
4847 global_options_set.x_param_values);
4848 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4849 global_options.x_param_values,
4850 global_options_set.x_param_values);
4851 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4852 rs6000_cost->cache_line_size,
4853 global_options.x_param_values,
4854 global_options_set.x_param_values);
4855 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4856 global_options.x_param_values,
4857 global_options_set.x_param_values);
4858
4859 /* Increase loop peeling limits based on performance analysis. */
4860 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4861 global_options.x_param_values,
4862 global_options_set.x_param_values);
4863 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4864 global_options.x_param_values,
4865 global_options_set.x_param_values);
4866
4867 /* Use the 'model' -fsched-pressure algorithm by default. */
4868 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4869 SCHED_PRESSURE_MODEL,
4870 global_options.x_param_values,
4871 global_options_set.x_param_values);
4872
4873 /* If using typedef char *va_list, signal that
4874 __builtin_va_start (&ap, 0) can be optimized to
4875 ap = __builtin_next_arg (0). */
4876 if (DEFAULT_ABI != ABI_V4)
4877 targetm.expand_builtin_va_start = NULL;
4878 }
4879
4880 /* If not explicitly specified via option, decide whether to generate indexed
4881 load/store instructions. A value of -1 indicates that the
4882 initial value of this variable has not been overwritten. During
4883 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4884 if (TARGET_AVOID_XFORM == -1)
4885 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4886 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4887 need indexed accesses and the type used is the scalar type of the element
4888 being loaded or stored. */
4889 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4890 && !TARGET_ALTIVEC);
4891
4892 /* Set the -mrecip options. */
4893 if (rs6000_recip_name)
4894 {
4895 char *p = ASTRDUP (rs6000_recip_name);
4896 char *q;
4897 unsigned int mask, i;
4898 bool invert;
4899
4900 while ((q = strtok (p, ",")) != NULL)
4901 {
4902 p = NULL;
4903 if (*q == '!')
4904 {
4905 invert = true;
4906 q++;
4907 }
4908 else
4909 invert = false;
4910
4911 if (!strcmp (q, "default"))
4912 mask = ((TARGET_RECIP_PRECISION)
4913 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4914 else
4915 {
4916 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4917 if (!strcmp (q, recip_options[i].string))
4918 {
4919 mask = recip_options[i].mask;
4920 break;
4921 }
4922
4923 if (i == ARRAY_SIZE (recip_options))
4924 {
4925 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4926 invert = false;
4927 mask = 0;
4928 ret = false;
4929 }
4930 }
4931
4932 if (invert)
4933 rs6000_recip_control &= ~mask;
4934 else
4935 rs6000_recip_control |= mask;
4936 }
4937 }
4938
4939 /* Set the builtin mask of the various options used that could affect which
4940 builtins were used. In the past we used target_flags, but we've run out
4941 of bits, and some options are no longer in target_flags. */
4942 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4943 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4944 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4945 rs6000_builtin_mask);
4946
4947 /* Initialize all of the registers. */
4948 rs6000_init_hard_regno_mode_ok (global_init_p);
4949
4950 /* Save the initial options in case the user does function specific options */
4951 if (global_init_p)
4952 target_option_default_node = target_option_current_node
4953 = build_target_option_node (&global_options);
4954
4955 /* If not explicitly specified via option, decide whether to generate the
4956 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4957 if (TARGET_LINK_STACK == -1)
4958 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4959
4960 /* Deprecate use of -mno-speculate-indirect-jumps. */
4961 if (!rs6000_speculate_indirect_jumps)
4962 warning (0, "%qs is deprecated and not recommended in any circumstances",
4963 "-mno-speculate-indirect-jumps");
4964
4965 return ret;
4966 }
4967
4968 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4969 define the target cpu type. */
4970
4971 static void
4972 rs6000_option_override (void)
4973 {
4974 (void) rs6000_option_override_internal (true);
4975 }
4976
4977 \f
4978 /* Implement targetm.vectorize.builtin_mask_for_load. */
4979 static tree
4980 rs6000_builtin_mask_for_load (void)
4981 {
4982 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4983 if ((TARGET_ALTIVEC && !TARGET_VSX)
4984 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4985 return altivec_builtin_mask_for_load;
4986 else
4987 return 0;
4988 }
4989
4990 /* Implement LOOP_ALIGN. */
4991 align_flags
4992 rs6000_loop_align (rtx label)
4993 {
4994 basic_block bb;
4995 int ninsns;
4996
4997 /* Don't override loop alignment if -falign-loops was specified. */
4998 if (!can_override_loop_align)
4999 return align_loops;
5000
5001 bb = BLOCK_FOR_INSN (label);
5002 ninsns = num_loop_insns(bb->loop_father);
5003
5004 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5005 if (ninsns > 4 && ninsns <= 8
5006 && (rs6000_tune == PROCESSOR_POWER4
5007 || rs6000_tune == PROCESSOR_POWER5
5008 || rs6000_tune == PROCESSOR_POWER6
5009 || rs6000_tune == PROCESSOR_POWER7
5010 || rs6000_tune == PROCESSOR_POWER8))
5011 return align_flags (5);
5012 else
5013 return align_loops;
5014 }
5015
5016 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5017 after applying N number of iterations. This routine does not determine
5018 how may iterations are required to reach desired alignment. */
5019
5020 static bool
5021 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5022 {
5023 if (is_packed)
5024 return false;
5025
5026 if (TARGET_32BIT)
5027 {
5028 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5029 return true;
5030
5031 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5032 return true;
5033
5034 return false;
5035 }
5036 else
5037 {
5038 if (TARGET_MACHO)
5039 return false;
5040
5041 /* Assuming that all other types are naturally aligned. CHECKME! */
5042 return true;
5043 }
5044 }
5045
5046 /* Return true if the vector misalignment factor is supported by the
5047 target. */
5048 static bool
5049 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5050 const_tree type,
5051 int misalignment,
5052 bool is_packed)
5053 {
5054 if (TARGET_VSX)
5055 {
5056 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5057 return true;
5058
5059 /* Return if movmisalign pattern is not supported for this mode. */
5060 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5061 return false;
5062
5063 if (misalignment == -1)
5064 {
5065 /* Misalignment factor is unknown at compile time but we know
5066 it's word aligned. */
5067 if (rs6000_vector_alignment_reachable (type, is_packed))
5068 {
5069 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5070
5071 if (element_size == 64 || element_size == 32)
5072 return true;
5073 }
5074
5075 return false;
5076 }
5077
5078 /* VSX supports word-aligned vector. */
5079 if (misalignment % 4 == 0)
5080 return true;
5081 }
5082 return false;
5083 }
5084
5085 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5086 static int
5087 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5088 tree vectype, int misalign)
5089 {
5090 unsigned elements;
5091 tree elem_type;
5092
5093 switch (type_of_cost)
5094 {
5095 case scalar_stmt:
5096 case scalar_load:
5097 case scalar_store:
5098 case vector_stmt:
5099 case vector_load:
5100 case vector_store:
5101 case vec_to_scalar:
5102 case scalar_to_vec:
5103 case cond_branch_not_taken:
5104 return 1;
5105
5106 case vec_perm:
5107 if (TARGET_VSX)
5108 return 3;
5109 else
5110 return 1;
5111
5112 case vec_promote_demote:
5113 if (TARGET_VSX)
5114 return 4;
5115 else
5116 return 1;
5117
5118 case cond_branch_taken:
5119 return 3;
5120
5121 case unaligned_load:
5122 case vector_gather_load:
5123 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5124 return 1;
5125
5126 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5127 {
5128 elements = TYPE_VECTOR_SUBPARTS (vectype);
5129 if (elements == 2)
5130 /* Double word aligned. */
5131 return 2;
5132
5133 if (elements == 4)
5134 {
5135 switch (misalign)
5136 {
5137 case 8:
5138 /* Double word aligned. */
5139 return 2;
5140
5141 case -1:
5142 /* Unknown misalignment. */
5143 case 4:
5144 case 12:
5145 /* Word aligned. */
5146 return 22;
5147
5148 default:
5149 gcc_unreachable ();
5150 }
5151 }
5152 }
5153
5154 if (TARGET_ALTIVEC)
5155 /* Misaligned loads are not supported. */
5156 gcc_unreachable ();
5157
5158 return 2;
5159
5160 case unaligned_store:
5161 case vector_scatter_store:
5162 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5163 return 1;
5164
5165 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5166 {
5167 elements = TYPE_VECTOR_SUBPARTS (vectype);
5168 if (elements == 2)
5169 /* Double word aligned. */
5170 return 2;
5171
5172 if (elements == 4)
5173 {
5174 switch (misalign)
5175 {
5176 case 8:
5177 /* Double word aligned. */
5178 return 2;
5179
5180 case -1:
5181 /* Unknown misalignment. */
5182 case 4:
5183 case 12:
5184 /* Word aligned. */
5185 return 23;
5186
5187 default:
5188 gcc_unreachable ();
5189 }
5190 }
5191 }
5192
5193 if (TARGET_ALTIVEC)
5194 /* Misaligned stores are not supported. */
5195 gcc_unreachable ();
5196
5197 return 2;
5198
5199 case vec_construct:
5200 /* This is a rough approximation assuming non-constant elements
5201 constructed into a vector via element insertion. FIXME:
5202 vec_construct is not granular enough for uniformly good
5203 decisions. If the initialization is a splat, this is
5204 cheaper than we estimate. Improve this someday. */
5205 elem_type = TREE_TYPE (vectype);
5206 /* 32-bit vectors loaded into registers are stored as double
5207 precision, so we need 2 permutes, 2 converts, and 1 merge
5208 to construct a vector of short floats from them. */
5209 if (SCALAR_FLOAT_TYPE_P (elem_type)
5210 && TYPE_PRECISION (elem_type) == 32)
5211 return 5;
5212 /* On POWER9, integer vector types are built up in GPRs and then
5213 use a direct move (2 cycles). For POWER8 this is even worse,
5214 as we need two direct moves and a merge, and the direct moves
5215 are five cycles. */
5216 else if (INTEGRAL_TYPE_P (elem_type))
5217 {
5218 if (TARGET_P9_VECTOR)
5219 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5220 else
5221 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5222 }
5223 else
5224 /* V2DFmode doesn't need a direct move. */
5225 return 2;
5226
5227 default:
5228 gcc_unreachable ();
5229 }
5230 }
5231
5232 /* Implement targetm.vectorize.preferred_simd_mode. */
5233
5234 static machine_mode
5235 rs6000_preferred_simd_mode (scalar_mode mode)
5236 {
5237 if (TARGET_VSX)
5238 switch (mode)
5239 {
5240 case E_DFmode:
5241 return V2DFmode;
5242 default:;
5243 }
5244 if (TARGET_ALTIVEC || TARGET_VSX)
5245 switch (mode)
5246 {
5247 case E_SFmode:
5248 return V4SFmode;
5249 case E_TImode:
5250 return V1TImode;
5251 case E_DImode:
5252 return V2DImode;
5253 case E_SImode:
5254 return V4SImode;
5255 case E_HImode:
5256 return V8HImode;
5257 case E_QImode:
5258 return V16QImode;
5259 default:;
5260 }
5261 return word_mode;
5262 }
5263
5264 typedef struct _rs6000_cost_data
5265 {
5266 struct loop *loop_info;
5267 unsigned cost[3];
5268 } rs6000_cost_data;
5269
5270 /* Test for likely overcommitment of vector hardware resources. If a
5271 loop iteration is relatively large, and too large a percentage of
5272 instructions in the loop are vectorized, the cost model may not
5273 adequately reflect delays from unavailable vector resources.
5274 Penalize the loop body cost for this case. */
5275
5276 static void
5277 rs6000_density_test (rs6000_cost_data *data)
5278 {
5279 const int DENSITY_PCT_THRESHOLD = 85;
5280 const int DENSITY_SIZE_THRESHOLD = 70;
5281 const int DENSITY_PENALTY = 10;
5282 struct loop *loop = data->loop_info;
5283 basic_block *bbs = get_loop_body (loop);
5284 int nbbs = loop->num_nodes;
5285 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5286 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5287 int i, density_pct;
5288
5289 for (i = 0; i < nbbs; i++)
5290 {
5291 basic_block bb = bbs[i];
5292 gimple_stmt_iterator gsi;
5293
5294 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5295 {
5296 gimple *stmt = gsi_stmt (gsi);
5297 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5298
5299 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5300 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5301 not_vec_cost++;
5302 }
5303 }
5304
5305 free (bbs);
5306 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5307
5308 if (density_pct > DENSITY_PCT_THRESHOLD
5309 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5310 {
5311 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5312 if (dump_enabled_p ())
5313 dump_printf_loc (MSG_NOTE, vect_location,
5314 "density %d%%, cost %d exceeds threshold, penalizing "
5315 "loop body cost by %d%%", density_pct,
5316 vec_cost + not_vec_cost, DENSITY_PENALTY);
5317 }
5318 }
5319
5320 /* Implement targetm.vectorize.init_cost. */
5321
5322 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5323 instruction is needed by the vectorization. */
5324 static bool rs6000_vect_nonmem;
5325
5326 static void *
5327 rs6000_init_cost (struct loop *loop_info)
5328 {
5329 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5330 data->loop_info = loop_info;
5331 data->cost[vect_prologue] = 0;
5332 data->cost[vect_body] = 0;
5333 data->cost[vect_epilogue] = 0;
5334 rs6000_vect_nonmem = false;
5335 return data;
5336 }
5337
5338 /* Implement targetm.vectorize.add_stmt_cost. */
5339
5340 static unsigned
5341 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5342 struct _stmt_vec_info *stmt_info, int misalign,
5343 enum vect_cost_model_location where)
5344 {
5345 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5346 unsigned retval = 0;
5347
5348 if (flag_vect_cost_model)
5349 {
5350 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5351 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5352 misalign);
5353 /* Statements in an inner loop relative to the loop being
5354 vectorized are weighted more heavily. The value here is
5355 arbitrary and could potentially be improved with analysis. */
5356 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5357 count *= 50; /* FIXME. */
5358
5359 retval = (unsigned) (count * stmt_cost);
5360 cost_data->cost[where] += retval;
5361
5362 /* Check whether we're doing something other than just a copy loop.
5363 Not all such loops may be profitably vectorized; see
5364 rs6000_finish_cost. */
5365 if ((kind == vec_to_scalar || kind == vec_perm
5366 || kind == vec_promote_demote || kind == vec_construct
5367 || kind == scalar_to_vec)
5368 || (where == vect_body && kind == vector_stmt))
5369 rs6000_vect_nonmem = true;
5370 }
5371
5372 return retval;
5373 }
5374
5375 /* Implement targetm.vectorize.finish_cost. */
5376
5377 static void
5378 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5379 unsigned *body_cost, unsigned *epilogue_cost)
5380 {
5381 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5382
5383 if (cost_data->loop_info)
5384 rs6000_density_test (cost_data);
5385
5386 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5387 that require versioning for any reason. The vectorization is at
5388 best a wash inside the loop, and the versioning checks make
5389 profitability highly unlikely and potentially quite harmful. */
5390 if (cost_data->loop_info)
5391 {
5392 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5393 if (!rs6000_vect_nonmem
5394 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5395 && LOOP_REQUIRES_VERSIONING (vec_info))
5396 cost_data->cost[vect_body] += 10000;
5397 }
5398
5399 *prologue_cost = cost_data->cost[vect_prologue];
5400 *body_cost = cost_data->cost[vect_body];
5401 *epilogue_cost = cost_data->cost[vect_epilogue];
5402 }
5403
5404 /* Implement targetm.vectorize.destroy_cost_data. */
5405
5406 static void
5407 rs6000_destroy_cost_data (void *data)
5408 {
5409 free (data);
5410 }
5411
5412 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5413 library with vectorized intrinsics. */
5414
5415 static tree
5416 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5417 tree type_in)
5418 {
5419 char name[32];
5420 const char *suffix = NULL;
5421 tree fntype, new_fndecl, bdecl = NULL_TREE;
5422 int n_args = 1;
5423 const char *bname;
5424 machine_mode el_mode, in_mode;
5425 int n, in_n;
5426
5427 /* Libmass is suitable for unsafe math only as it does not correctly support
5428 parts of IEEE with the required precision such as denormals. Only support
5429 it if we have VSX to use the simd d2 or f4 functions.
5430 XXX: Add variable length support. */
5431 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5432 return NULL_TREE;
5433
5434 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5435 n = TYPE_VECTOR_SUBPARTS (type_out);
5436 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5437 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5438 if (el_mode != in_mode
5439 || n != in_n)
5440 return NULL_TREE;
5441
5442 switch (fn)
5443 {
5444 CASE_CFN_ATAN2:
5445 CASE_CFN_HYPOT:
5446 CASE_CFN_POW:
5447 n_args = 2;
5448 gcc_fallthrough ();
5449
5450 CASE_CFN_ACOS:
5451 CASE_CFN_ACOSH:
5452 CASE_CFN_ASIN:
5453 CASE_CFN_ASINH:
5454 CASE_CFN_ATAN:
5455 CASE_CFN_ATANH:
5456 CASE_CFN_CBRT:
5457 CASE_CFN_COS:
5458 CASE_CFN_COSH:
5459 CASE_CFN_ERF:
5460 CASE_CFN_ERFC:
5461 CASE_CFN_EXP2:
5462 CASE_CFN_EXP:
5463 CASE_CFN_EXPM1:
5464 CASE_CFN_LGAMMA:
5465 CASE_CFN_LOG10:
5466 CASE_CFN_LOG1P:
5467 CASE_CFN_LOG2:
5468 CASE_CFN_LOG:
5469 CASE_CFN_SIN:
5470 CASE_CFN_SINH:
5471 CASE_CFN_SQRT:
5472 CASE_CFN_TAN:
5473 CASE_CFN_TANH:
5474 if (el_mode == DFmode && n == 2)
5475 {
5476 bdecl = mathfn_built_in (double_type_node, fn);
5477 suffix = "d2"; /* pow -> powd2 */
5478 }
5479 else if (el_mode == SFmode && n == 4)
5480 {
5481 bdecl = mathfn_built_in (float_type_node, fn);
5482 suffix = "4"; /* powf -> powf4 */
5483 }
5484 else
5485 return NULL_TREE;
5486 if (!bdecl)
5487 return NULL_TREE;
5488 break;
5489
5490 default:
5491 return NULL_TREE;
5492 }
5493
5494 gcc_assert (suffix != NULL);
5495 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5496 if (!bname)
5497 return NULL_TREE;
5498
5499 strcpy (name, bname + sizeof ("__builtin_") - 1);
5500 strcat (name, suffix);
5501
5502 if (n_args == 1)
5503 fntype = build_function_type_list (type_out, type_in, NULL);
5504 else if (n_args == 2)
5505 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5506 else
5507 gcc_unreachable ();
5508
5509 /* Build a function declaration for the vectorized function. */
5510 new_fndecl = build_decl (BUILTINS_LOCATION,
5511 FUNCTION_DECL, get_identifier (name), fntype);
5512 TREE_PUBLIC (new_fndecl) = 1;
5513 DECL_EXTERNAL (new_fndecl) = 1;
5514 DECL_IS_NOVOPS (new_fndecl) = 1;
5515 TREE_READONLY (new_fndecl) = 1;
5516
5517 return new_fndecl;
5518 }
5519
5520 /* Returns a function decl for a vectorized version of the builtin function
5521 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5522 if it is not available. */
5523
5524 static tree
5525 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5526 tree type_in)
5527 {
5528 machine_mode in_mode, out_mode;
5529 int in_n, out_n;
5530
5531 if (TARGET_DEBUG_BUILTIN)
5532 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5533 combined_fn_name (combined_fn (fn)),
5534 GET_MODE_NAME (TYPE_MODE (type_out)),
5535 GET_MODE_NAME (TYPE_MODE (type_in)));
5536
5537 if (TREE_CODE (type_out) != VECTOR_TYPE
5538 || TREE_CODE (type_in) != VECTOR_TYPE)
5539 return NULL_TREE;
5540
5541 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5542 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5543 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5544 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5545
5546 switch (fn)
5547 {
5548 CASE_CFN_COPYSIGN:
5549 if (VECTOR_UNIT_VSX_P (V2DFmode)
5550 && out_mode == DFmode && out_n == 2
5551 && in_mode == DFmode && in_n == 2)
5552 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5553 if (VECTOR_UNIT_VSX_P (V4SFmode)
5554 && out_mode == SFmode && out_n == 4
5555 && in_mode == SFmode && in_n == 4)
5556 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5557 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5558 && out_mode == SFmode && out_n == 4
5559 && in_mode == SFmode && in_n == 4)
5560 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5561 break;
5562 CASE_CFN_CEIL:
5563 if (VECTOR_UNIT_VSX_P (V2DFmode)
5564 && out_mode == DFmode && out_n == 2
5565 && in_mode == DFmode && in_n == 2)
5566 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5567 if (VECTOR_UNIT_VSX_P (V4SFmode)
5568 && out_mode == SFmode && out_n == 4
5569 && in_mode == SFmode && in_n == 4)
5570 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5571 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5572 && out_mode == SFmode && out_n == 4
5573 && in_mode == SFmode && in_n == 4)
5574 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5575 break;
5576 CASE_CFN_FLOOR:
5577 if (VECTOR_UNIT_VSX_P (V2DFmode)
5578 && out_mode == DFmode && out_n == 2
5579 && in_mode == DFmode && in_n == 2)
5580 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5581 if (VECTOR_UNIT_VSX_P (V4SFmode)
5582 && out_mode == SFmode && out_n == 4
5583 && in_mode == SFmode && in_n == 4)
5584 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5585 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5586 && out_mode == SFmode && out_n == 4
5587 && in_mode == SFmode && in_n == 4)
5588 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5589 break;
5590 CASE_CFN_FMA:
5591 if (VECTOR_UNIT_VSX_P (V2DFmode)
5592 && out_mode == DFmode && out_n == 2
5593 && in_mode == DFmode && in_n == 2)
5594 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5595 if (VECTOR_UNIT_VSX_P (V4SFmode)
5596 && out_mode == SFmode && out_n == 4
5597 && in_mode == SFmode && in_n == 4)
5598 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5599 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5600 && out_mode == SFmode && out_n == 4
5601 && in_mode == SFmode && in_n == 4)
5602 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5603 break;
5604 CASE_CFN_TRUNC:
5605 if (VECTOR_UNIT_VSX_P (V2DFmode)
5606 && out_mode == DFmode && out_n == 2
5607 && in_mode == DFmode && in_n == 2)
5608 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5609 if (VECTOR_UNIT_VSX_P (V4SFmode)
5610 && out_mode == SFmode && out_n == 4
5611 && in_mode == SFmode && in_n == 4)
5612 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5613 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5614 && out_mode == SFmode && out_n == 4
5615 && in_mode == SFmode && in_n == 4)
5616 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5617 break;
5618 CASE_CFN_NEARBYINT:
5619 if (VECTOR_UNIT_VSX_P (V2DFmode)
5620 && flag_unsafe_math_optimizations
5621 && out_mode == DFmode && out_n == 2
5622 && in_mode == DFmode && in_n == 2)
5623 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5624 if (VECTOR_UNIT_VSX_P (V4SFmode)
5625 && flag_unsafe_math_optimizations
5626 && out_mode == SFmode && out_n == 4
5627 && in_mode == SFmode && in_n == 4)
5628 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5629 break;
5630 CASE_CFN_RINT:
5631 if (VECTOR_UNIT_VSX_P (V2DFmode)
5632 && !flag_trapping_math
5633 && out_mode == DFmode && out_n == 2
5634 && in_mode == DFmode && in_n == 2)
5635 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5636 if (VECTOR_UNIT_VSX_P (V4SFmode)
5637 && !flag_trapping_math
5638 && out_mode == SFmode && out_n == 4
5639 && in_mode == SFmode && in_n == 4)
5640 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5641 break;
5642 default:
5643 break;
5644 }
5645
5646 /* Generate calls to libmass if appropriate. */
5647 if (rs6000_veclib_handler)
5648 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5649
5650 return NULL_TREE;
5651 }
5652
5653 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5654
5655 static tree
5656 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5657 tree type_in)
5658 {
5659 machine_mode in_mode, out_mode;
5660 int in_n, out_n;
5661
5662 if (TARGET_DEBUG_BUILTIN)
5663 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5664 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5665 GET_MODE_NAME (TYPE_MODE (type_out)),
5666 GET_MODE_NAME (TYPE_MODE (type_in)));
5667
5668 if (TREE_CODE (type_out) != VECTOR_TYPE
5669 || TREE_CODE (type_in) != VECTOR_TYPE)
5670 return NULL_TREE;
5671
5672 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5673 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5674 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5675 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5676
5677 enum rs6000_builtins fn
5678 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5679 switch (fn)
5680 {
5681 case RS6000_BUILTIN_RSQRTF:
5682 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5683 && out_mode == SFmode && out_n == 4
5684 && in_mode == SFmode && in_n == 4)
5685 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5686 break;
5687 case RS6000_BUILTIN_RSQRT:
5688 if (VECTOR_UNIT_VSX_P (V2DFmode)
5689 && out_mode == DFmode && out_n == 2
5690 && in_mode == DFmode && in_n == 2)
5691 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5692 break;
5693 case RS6000_BUILTIN_RECIPF:
5694 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5695 && out_mode == SFmode && out_n == 4
5696 && in_mode == SFmode && in_n == 4)
5697 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5698 break;
5699 case RS6000_BUILTIN_RECIP:
5700 if (VECTOR_UNIT_VSX_P (V2DFmode)
5701 && out_mode == DFmode && out_n == 2
5702 && in_mode == DFmode && in_n == 2)
5703 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5704 break;
5705 default:
5706 break;
5707 }
5708 return NULL_TREE;
5709 }
5710 \f
5711 /* Default CPU string for rs6000*_file_start functions. */
5712 static const char *rs6000_default_cpu;
5713
5714 /* Do anything needed at the start of the asm file. */
5715
5716 static void
5717 rs6000_file_start (void)
5718 {
5719 char buffer[80];
5720 const char *start = buffer;
5721 FILE *file = asm_out_file;
5722
5723 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5724
5725 default_file_start ();
5726
5727 if (flag_verbose_asm)
5728 {
5729 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5730
5731 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5732 {
5733 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5734 start = "";
5735 }
5736
5737 if (global_options_set.x_rs6000_cpu_index)
5738 {
5739 fprintf (file, "%s -mcpu=%s", start,
5740 processor_target_table[rs6000_cpu_index].name);
5741 start = "";
5742 }
5743
5744 if (global_options_set.x_rs6000_tune_index)
5745 {
5746 fprintf (file, "%s -mtune=%s", start,
5747 processor_target_table[rs6000_tune_index].name);
5748 start = "";
5749 }
5750
5751 if (PPC405_ERRATUM77)
5752 {
5753 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5754 start = "";
5755 }
5756
5757 #ifdef USING_ELFOS_H
5758 switch (rs6000_sdata)
5759 {
5760 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5761 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5762 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5763 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5764 }
5765
5766 if (rs6000_sdata && g_switch_value)
5767 {
5768 fprintf (file, "%s -G %d", start,
5769 g_switch_value);
5770 start = "";
5771 }
5772 #endif
5773
5774 if (*start == '\0')
5775 putc ('\n', file);
5776 }
5777
5778 #ifdef USING_ELFOS_H
5779 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5780 && !global_options_set.x_rs6000_cpu_index)
5781 {
5782 fputs ("\t.machine ", asm_out_file);
5783 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5784 fputs ("power9\n", asm_out_file);
5785 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5786 fputs ("power8\n", asm_out_file);
5787 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5788 fputs ("power7\n", asm_out_file);
5789 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5790 fputs ("power6\n", asm_out_file);
5791 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5792 fputs ("power5\n", asm_out_file);
5793 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5794 fputs ("power4\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5796 fputs ("ppc64\n", asm_out_file);
5797 else
5798 fputs ("ppc\n", asm_out_file);
5799 }
5800 #endif
5801
5802 if (DEFAULT_ABI == ABI_ELFv2)
5803 fprintf (file, "\t.abiversion 2\n");
5804 }
5805
5806 \f
5807 /* Return nonzero if this function is known to have a null epilogue. */
5808
5809 int
5810 direct_return (void)
5811 {
5812 if (reload_completed)
5813 {
5814 rs6000_stack_t *info = rs6000_stack_info ();
5815
5816 if (info->first_gp_reg_save == 32
5817 && info->first_fp_reg_save == 64
5818 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5819 && ! info->lr_save_p
5820 && ! info->cr_save_p
5821 && info->vrsave_size == 0
5822 && ! info->push_p)
5823 return 1;
5824 }
5825
5826 return 0;
5827 }
5828
5829 /* Return the number of instructions it takes to form a constant in an
5830 integer register. */
5831
5832 int
5833 num_insns_constant_wide (HOST_WIDE_INT value)
5834 {
5835 /* signed constant loadable with addi */
5836 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5837 return 1;
5838
5839 /* constant loadable with addis */
5840 else if ((value & 0xffff) == 0
5841 && (value >> 31 == -1 || value >> 31 == 0))
5842 return 1;
5843
5844 else if (TARGET_POWERPC64)
5845 {
5846 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5847 HOST_WIDE_INT high = value >> 31;
5848
5849 if (high == 0 || high == -1)
5850 return 2;
5851
5852 high >>= 1;
5853
5854 if (low == 0)
5855 return num_insns_constant_wide (high) + 1;
5856 else if (high == 0)
5857 return num_insns_constant_wide (low) + 1;
5858 else
5859 return (num_insns_constant_wide (high)
5860 + num_insns_constant_wide (low) + 1);
5861 }
5862
5863 else
5864 return 2;
5865 }
5866
5867 int
5868 num_insns_constant (rtx op, machine_mode mode)
5869 {
5870 HOST_WIDE_INT low, high;
5871
5872 switch (GET_CODE (op))
5873 {
5874 case CONST_INT:
5875 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5876 && rs6000_is_valid_and_mask (op, mode))
5877 return 2;
5878 else
5879 return num_insns_constant_wide (INTVAL (op));
5880
5881 case CONST_WIDE_INT:
5882 {
5883 int i;
5884 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5885 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5886 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5887 return ins;
5888 }
5889
5890 case CONST_DOUBLE:
5891 if (mode == SFmode || mode == SDmode)
5892 {
5893 long l;
5894
5895 if (DECIMAL_FLOAT_MODE_P (mode))
5896 REAL_VALUE_TO_TARGET_DECIMAL32
5897 (*CONST_DOUBLE_REAL_VALUE (op), l);
5898 else
5899 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5900 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5901 }
5902
5903 long l[2];
5904 if (DECIMAL_FLOAT_MODE_P (mode))
5905 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
5906 else
5907 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5908 high = l[WORDS_BIG_ENDIAN == 0];
5909 low = l[WORDS_BIG_ENDIAN != 0];
5910
5911 if (TARGET_32BIT)
5912 return (num_insns_constant_wide (low)
5913 + num_insns_constant_wide (high));
5914 else
5915 {
5916 if ((high == 0 && low >= 0)
5917 || (high == -1 && low < 0))
5918 return num_insns_constant_wide (low);
5919
5920 else if (rs6000_is_valid_and_mask (op, mode))
5921 return 2;
5922
5923 else if (low == 0)
5924 return num_insns_constant_wide (high) + 1;
5925
5926 else
5927 return (num_insns_constant_wide (high)
5928 + num_insns_constant_wide (low) + 1);
5929 }
5930
5931 default:
5932 gcc_unreachable ();
5933 }
5934 }
5935
5936 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5937 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5938 corresponding element of the vector, but for V4SFmode, the
5939 corresponding "float" is interpreted as an SImode integer. */
5940
5941 HOST_WIDE_INT
5942 const_vector_elt_as_int (rtx op, unsigned int elt)
5943 {
5944 rtx tmp;
5945
5946 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5947 gcc_assert (GET_MODE (op) != V2DImode
5948 && GET_MODE (op) != V2DFmode);
5949
5950 tmp = CONST_VECTOR_ELT (op, elt);
5951 if (GET_MODE (op) == V4SFmode)
5952 tmp = gen_lowpart (SImode, tmp);
5953 return INTVAL (tmp);
5954 }
5955
5956 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5957 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5958 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5959 all items are set to the same value and contain COPIES replicas of the
5960 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5961 operand and the others are set to the value of the operand's msb. */
5962
5963 static bool
5964 vspltis_constant (rtx op, unsigned step, unsigned copies)
5965 {
5966 machine_mode mode = GET_MODE (op);
5967 machine_mode inner = GET_MODE_INNER (mode);
5968
5969 unsigned i;
5970 unsigned nunits;
5971 unsigned bitsize;
5972 unsigned mask;
5973
5974 HOST_WIDE_INT val;
5975 HOST_WIDE_INT splat_val;
5976 HOST_WIDE_INT msb_val;
5977
5978 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5979 return false;
5980
5981 nunits = GET_MODE_NUNITS (mode);
5982 bitsize = GET_MODE_BITSIZE (inner);
5983 mask = GET_MODE_MASK (inner);
5984
5985 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5986 splat_val = val;
5987 msb_val = val >= 0 ? 0 : -1;
5988
5989 /* Construct the value to be splatted, if possible. If not, return 0. */
5990 for (i = 2; i <= copies; i *= 2)
5991 {
5992 HOST_WIDE_INT small_val;
5993 bitsize /= 2;
5994 small_val = splat_val >> bitsize;
5995 mask >>= bitsize;
5996 if (splat_val != ((HOST_WIDE_INT)
5997 ((unsigned HOST_WIDE_INT) small_val << bitsize)
5998 | (small_val & mask)))
5999 return false;
6000 splat_val = small_val;
6001 }
6002
6003 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6004 if (EASY_VECTOR_15 (splat_val))
6005 ;
6006
6007 /* Also check if we can splat, and then add the result to itself. Do so if
6008 the value is positive, of if the splat instruction is using OP's mode;
6009 for splat_val < 0, the splat and the add should use the same mode. */
6010 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6011 && (splat_val >= 0 || (step == 1 && copies == 1)))
6012 ;
6013
6014 /* Also check if are loading up the most significant bit which can be done by
6015 loading up -1 and shifting the value left by -1. */
6016 else if (EASY_VECTOR_MSB (splat_val, inner))
6017 ;
6018
6019 else
6020 return false;
6021
6022 /* Check if VAL is present in every STEP-th element, and the
6023 other elements are filled with its most significant bit. */
6024 for (i = 1; i < nunits; ++i)
6025 {
6026 HOST_WIDE_INT desired_val;
6027 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6028 if ((i & (step - 1)) == 0)
6029 desired_val = val;
6030 else
6031 desired_val = msb_val;
6032
6033 if (desired_val != const_vector_elt_as_int (op, elt))
6034 return false;
6035 }
6036
6037 return true;
6038 }
6039
6040 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6041 instruction, filling in the bottom elements with 0 or -1.
6042
6043 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6044 for the number of zeroes to shift in, or negative for the number of 0xff
6045 bytes to shift in.
6046
6047 OP is a CONST_VECTOR. */
6048
6049 int
6050 vspltis_shifted (rtx op)
6051 {
6052 machine_mode mode = GET_MODE (op);
6053 machine_mode inner = GET_MODE_INNER (mode);
6054
6055 unsigned i, j;
6056 unsigned nunits;
6057 unsigned mask;
6058
6059 HOST_WIDE_INT val;
6060
6061 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6062 return false;
6063
6064 /* We need to create pseudo registers to do the shift, so don't recognize
6065 shift vector constants after reload. */
6066 if (!can_create_pseudo_p ())
6067 return false;
6068
6069 nunits = GET_MODE_NUNITS (mode);
6070 mask = GET_MODE_MASK (inner);
6071
6072 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6073
6074 /* Check if the value can really be the operand of a vspltis[bhw]. */
6075 if (EASY_VECTOR_15 (val))
6076 ;
6077
6078 /* Also check if we are loading up the most significant bit which can be done
6079 by loading up -1 and shifting the value left by -1. */
6080 else if (EASY_VECTOR_MSB (val, inner))
6081 ;
6082
6083 else
6084 return 0;
6085
6086 /* Check if VAL is present in every STEP-th element until we find elements
6087 that are 0 or all 1 bits. */
6088 for (i = 1; i < nunits; ++i)
6089 {
6090 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6091 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6092
6093 /* If the value isn't the splat value, check for the remaining elements
6094 being 0/-1. */
6095 if (val != elt_val)
6096 {
6097 if (elt_val == 0)
6098 {
6099 for (j = i+1; j < nunits; ++j)
6100 {
6101 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6102 if (const_vector_elt_as_int (op, elt2) != 0)
6103 return 0;
6104 }
6105
6106 return (nunits - i) * GET_MODE_SIZE (inner);
6107 }
6108
6109 else if ((elt_val & mask) == mask)
6110 {
6111 for (j = i+1; j < nunits; ++j)
6112 {
6113 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6114 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6115 return 0;
6116 }
6117
6118 return -((nunits - i) * GET_MODE_SIZE (inner));
6119 }
6120
6121 else
6122 return 0;
6123 }
6124 }
6125
6126 /* If all elements are equal, we don't need to do VLSDOI. */
6127 return 0;
6128 }
6129
6130
6131 /* Return true if OP is of the given MODE and can be synthesized
6132 with a vspltisb, vspltish or vspltisw. */
6133
6134 bool
6135 easy_altivec_constant (rtx op, machine_mode mode)
6136 {
6137 unsigned step, copies;
6138
6139 if (mode == VOIDmode)
6140 mode = GET_MODE (op);
6141 else if (mode != GET_MODE (op))
6142 return false;
6143
6144 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6145 constants. */
6146 if (mode == V2DFmode)
6147 return zero_constant (op, mode);
6148
6149 else if (mode == V2DImode)
6150 {
6151 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6152 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6153 return false;
6154
6155 if (zero_constant (op, mode))
6156 return true;
6157
6158 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6159 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6160 return true;
6161
6162 return false;
6163 }
6164
6165 /* V1TImode is a special container for TImode. Ignore for now. */
6166 else if (mode == V1TImode)
6167 return false;
6168
6169 /* Start with a vspltisw. */
6170 step = GET_MODE_NUNITS (mode) / 4;
6171 copies = 1;
6172
6173 if (vspltis_constant (op, step, copies))
6174 return true;
6175
6176 /* Then try with a vspltish. */
6177 if (step == 1)
6178 copies <<= 1;
6179 else
6180 step >>= 1;
6181
6182 if (vspltis_constant (op, step, copies))
6183 return true;
6184
6185 /* And finally a vspltisb. */
6186 if (step == 1)
6187 copies <<= 1;
6188 else
6189 step >>= 1;
6190
6191 if (vspltis_constant (op, step, copies))
6192 return true;
6193
6194 if (vspltis_shifted (op) != 0)
6195 return true;
6196
6197 return false;
6198 }
6199
6200 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6201 result is OP. Abort if it is not possible. */
6202
6203 rtx
6204 gen_easy_altivec_constant (rtx op)
6205 {
6206 machine_mode mode = GET_MODE (op);
6207 int nunits = GET_MODE_NUNITS (mode);
6208 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6209 unsigned step = nunits / 4;
6210 unsigned copies = 1;
6211
6212 /* Start with a vspltisw. */
6213 if (vspltis_constant (op, step, copies))
6214 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6215
6216 /* Then try with a vspltish. */
6217 if (step == 1)
6218 copies <<= 1;
6219 else
6220 step >>= 1;
6221
6222 if (vspltis_constant (op, step, copies))
6223 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6224
6225 /* And finally a vspltisb. */
6226 if (step == 1)
6227 copies <<= 1;
6228 else
6229 step >>= 1;
6230
6231 if (vspltis_constant (op, step, copies))
6232 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6233
6234 gcc_unreachable ();
6235 }
6236
6237 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6238 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6239
6240 Return the number of instructions needed (1 or 2) into the address pointed
6241 via NUM_INSNS_PTR.
6242
6243 Return the constant that is being split via CONSTANT_PTR. */
6244
6245 bool
6246 xxspltib_constant_p (rtx op,
6247 machine_mode mode,
6248 int *num_insns_ptr,
6249 int *constant_ptr)
6250 {
6251 size_t nunits = GET_MODE_NUNITS (mode);
6252 size_t i;
6253 HOST_WIDE_INT value;
6254 rtx element;
6255
6256 /* Set the returned values to out of bound values. */
6257 *num_insns_ptr = -1;
6258 *constant_ptr = 256;
6259
6260 if (!TARGET_P9_VECTOR)
6261 return false;
6262
6263 if (mode == VOIDmode)
6264 mode = GET_MODE (op);
6265
6266 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6267 return false;
6268
6269 /* Handle (vec_duplicate <constant>). */
6270 if (GET_CODE (op) == VEC_DUPLICATE)
6271 {
6272 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6273 && mode != V2DImode)
6274 return false;
6275
6276 element = XEXP (op, 0);
6277 if (!CONST_INT_P (element))
6278 return false;
6279
6280 value = INTVAL (element);
6281 if (!IN_RANGE (value, -128, 127))
6282 return false;
6283 }
6284
6285 /* Handle (const_vector [...]). */
6286 else if (GET_CODE (op) == CONST_VECTOR)
6287 {
6288 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6289 && mode != V2DImode)
6290 return false;
6291
6292 element = CONST_VECTOR_ELT (op, 0);
6293 if (!CONST_INT_P (element))
6294 return false;
6295
6296 value = INTVAL (element);
6297 if (!IN_RANGE (value, -128, 127))
6298 return false;
6299
6300 for (i = 1; i < nunits; i++)
6301 {
6302 element = CONST_VECTOR_ELT (op, i);
6303 if (!CONST_INT_P (element))
6304 return false;
6305
6306 if (value != INTVAL (element))
6307 return false;
6308 }
6309 }
6310
6311 /* Handle integer constants being loaded into the upper part of the VSX
6312 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6313 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6314 else if (CONST_INT_P (op))
6315 {
6316 if (!SCALAR_INT_MODE_P (mode))
6317 return false;
6318
6319 value = INTVAL (op);
6320 if (!IN_RANGE (value, -128, 127))
6321 return false;
6322
6323 if (!IN_RANGE (value, -1, 0))
6324 {
6325 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6326 return false;
6327
6328 if (EASY_VECTOR_15 (value))
6329 return false;
6330 }
6331 }
6332
6333 else
6334 return false;
6335
6336 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6337 sign extend. Special case 0/-1 to allow getting any VSX register instead
6338 of an Altivec register. */
6339 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6340 && EASY_VECTOR_15 (value))
6341 return false;
6342
6343 /* Return # of instructions and the constant byte for XXSPLTIB. */
6344 if (mode == V16QImode)
6345 *num_insns_ptr = 1;
6346
6347 else if (IN_RANGE (value, -1, 0))
6348 *num_insns_ptr = 1;
6349
6350 else
6351 *num_insns_ptr = 2;
6352
6353 *constant_ptr = (int) value;
6354 return true;
6355 }
6356
6357 const char *
6358 output_vec_const_move (rtx *operands)
6359 {
6360 int shift;
6361 machine_mode mode;
6362 rtx dest, vec;
6363
6364 dest = operands[0];
6365 vec = operands[1];
6366 mode = GET_MODE (dest);
6367
6368 if (TARGET_VSX)
6369 {
6370 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6371 int xxspltib_value = 256;
6372 int num_insns = -1;
6373
6374 if (zero_constant (vec, mode))
6375 {
6376 if (TARGET_P9_VECTOR)
6377 return "xxspltib %x0,0";
6378
6379 else if (dest_vmx_p)
6380 return "vspltisw %0,0";
6381
6382 else
6383 return "xxlxor %x0,%x0,%x0";
6384 }
6385
6386 if (all_ones_constant (vec, mode))
6387 {
6388 if (TARGET_P9_VECTOR)
6389 return "xxspltib %x0,255";
6390
6391 else if (dest_vmx_p)
6392 return "vspltisw %0,-1";
6393
6394 else if (TARGET_P8_VECTOR)
6395 return "xxlorc %x0,%x0,%x0";
6396
6397 else
6398 gcc_unreachable ();
6399 }
6400
6401 if (TARGET_P9_VECTOR
6402 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6403 {
6404 if (num_insns == 1)
6405 {
6406 operands[2] = GEN_INT (xxspltib_value & 0xff);
6407 return "xxspltib %x0,%2";
6408 }
6409
6410 return "#";
6411 }
6412 }
6413
6414 if (TARGET_ALTIVEC)
6415 {
6416 rtx splat_vec;
6417
6418 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6419 if (zero_constant (vec, mode))
6420 return "vspltisw %0,0";
6421
6422 if (all_ones_constant (vec, mode))
6423 return "vspltisw %0,-1";
6424
6425 /* Do we need to construct a value using VSLDOI? */
6426 shift = vspltis_shifted (vec);
6427 if (shift != 0)
6428 return "#";
6429
6430 splat_vec = gen_easy_altivec_constant (vec);
6431 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6432 operands[1] = XEXP (splat_vec, 0);
6433 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6434 return "#";
6435
6436 switch (GET_MODE (splat_vec))
6437 {
6438 case E_V4SImode:
6439 return "vspltisw %0,%1";
6440
6441 case E_V8HImode:
6442 return "vspltish %0,%1";
6443
6444 case E_V16QImode:
6445 return "vspltisb %0,%1";
6446
6447 default:
6448 gcc_unreachable ();
6449 }
6450 }
6451
6452 gcc_unreachable ();
6453 }
6454
6455 /* Initialize vector TARGET to VALS. */
6456
6457 void
6458 rs6000_expand_vector_init (rtx target, rtx vals)
6459 {
6460 machine_mode mode = GET_MODE (target);
6461 machine_mode inner_mode = GET_MODE_INNER (mode);
6462 int n_elts = GET_MODE_NUNITS (mode);
6463 int n_var = 0, one_var = -1;
6464 bool all_same = true, all_const_zero = true;
6465 rtx x, mem;
6466 int i;
6467
6468 for (i = 0; i < n_elts; ++i)
6469 {
6470 x = XVECEXP (vals, 0, i);
6471 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6472 ++n_var, one_var = i;
6473 else if (x != CONST0_RTX (inner_mode))
6474 all_const_zero = false;
6475
6476 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6477 all_same = false;
6478 }
6479
6480 if (n_var == 0)
6481 {
6482 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6483 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6484 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6485 {
6486 /* Zero register. */
6487 emit_move_insn (target, CONST0_RTX (mode));
6488 return;
6489 }
6490 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6491 {
6492 /* Splat immediate. */
6493 emit_insn (gen_rtx_SET (target, const_vec));
6494 return;
6495 }
6496 else
6497 {
6498 /* Load from constant pool. */
6499 emit_move_insn (target, const_vec);
6500 return;
6501 }
6502 }
6503
6504 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6505 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6506 {
6507 rtx op[2];
6508 size_t i;
6509 size_t num_elements = all_same ? 1 : 2;
6510 for (i = 0; i < num_elements; i++)
6511 {
6512 op[i] = XVECEXP (vals, 0, i);
6513 /* Just in case there is a SUBREG with a smaller mode, do a
6514 conversion. */
6515 if (GET_MODE (op[i]) != inner_mode)
6516 {
6517 rtx tmp = gen_reg_rtx (inner_mode);
6518 convert_move (tmp, op[i], 0);
6519 op[i] = tmp;
6520 }
6521 /* Allow load with splat double word. */
6522 else if (MEM_P (op[i]))
6523 {
6524 if (!all_same)
6525 op[i] = force_reg (inner_mode, op[i]);
6526 }
6527 else if (!REG_P (op[i]))
6528 op[i] = force_reg (inner_mode, op[i]);
6529 }
6530
6531 if (all_same)
6532 {
6533 if (mode == V2DFmode)
6534 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6535 else
6536 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6537 }
6538 else
6539 {
6540 if (mode == V2DFmode)
6541 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6542 else
6543 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6544 }
6545 return;
6546 }
6547
6548 /* Special case initializing vector int if we are on 64-bit systems with
6549 direct move or we have the ISA 3.0 instructions. */
6550 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6551 && TARGET_DIRECT_MOVE_64BIT)
6552 {
6553 if (all_same)
6554 {
6555 rtx element0 = XVECEXP (vals, 0, 0);
6556 if (MEM_P (element0))
6557 element0 = rs6000_address_for_fpconvert (element0);
6558 else
6559 element0 = force_reg (SImode, element0);
6560
6561 if (TARGET_P9_VECTOR)
6562 emit_insn (gen_vsx_splat_v4si (target, element0));
6563 else
6564 {
6565 rtx tmp = gen_reg_rtx (DImode);
6566 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6567 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6568 }
6569 return;
6570 }
6571 else
6572 {
6573 rtx elements[4];
6574 size_t i;
6575
6576 for (i = 0; i < 4; i++)
6577 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6578
6579 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6580 elements[2], elements[3]));
6581 return;
6582 }
6583 }
6584
6585 /* With single precision floating point on VSX, know that internally single
6586 precision is actually represented as a double, and either make 2 V2DF
6587 vectors, and convert these vectors to single precision, or do one
6588 conversion, and splat the result to the other elements. */
6589 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6590 {
6591 if (all_same)
6592 {
6593 rtx element0 = XVECEXP (vals, 0, 0);
6594
6595 if (TARGET_P9_VECTOR)
6596 {
6597 if (MEM_P (element0))
6598 element0 = rs6000_address_for_fpconvert (element0);
6599
6600 emit_insn (gen_vsx_splat_v4sf (target, element0));
6601 }
6602
6603 else
6604 {
6605 rtx freg = gen_reg_rtx (V4SFmode);
6606 rtx sreg = force_reg (SFmode, element0);
6607 rtx cvt = (TARGET_XSCVDPSPN
6608 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6609 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6610
6611 emit_insn (cvt);
6612 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6613 const0_rtx));
6614 }
6615 }
6616 else
6617 {
6618 rtx dbl_even = gen_reg_rtx (V2DFmode);
6619 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6620 rtx flt_even = gen_reg_rtx (V4SFmode);
6621 rtx flt_odd = gen_reg_rtx (V4SFmode);
6622 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6623 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6624 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6625 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6626
6627 /* Use VMRGEW if we can instead of doing a permute. */
6628 if (TARGET_P8_VECTOR)
6629 {
6630 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6631 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6632 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6633 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6634 if (BYTES_BIG_ENDIAN)
6635 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6636 else
6637 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6638 }
6639 else
6640 {
6641 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6642 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6643 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6644 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6645 rs6000_expand_extract_even (target, flt_even, flt_odd);
6646 }
6647 }
6648 return;
6649 }
6650
6651 /* Special case initializing vector short/char that are splats if we are on
6652 64-bit systems with direct move. */
6653 if (all_same && TARGET_DIRECT_MOVE_64BIT
6654 && (mode == V16QImode || mode == V8HImode))
6655 {
6656 rtx op0 = XVECEXP (vals, 0, 0);
6657 rtx di_tmp = gen_reg_rtx (DImode);
6658
6659 if (!REG_P (op0))
6660 op0 = force_reg (GET_MODE_INNER (mode), op0);
6661
6662 if (mode == V16QImode)
6663 {
6664 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6665 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6666 return;
6667 }
6668
6669 if (mode == V8HImode)
6670 {
6671 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6672 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6673 return;
6674 }
6675 }
6676
6677 /* Store value to stack temp. Load vector element. Splat. However, splat
6678 of 64-bit items is not supported on Altivec. */
6679 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6680 {
6681 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6682 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6683 XVECEXP (vals, 0, 0));
6684 x = gen_rtx_UNSPEC (VOIDmode,
6685 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6686 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6687 gen_rtvec (2,
6688 gen_rtx_SET (target, mem),
6689 x)));
6690 x = gen_rtx_VEC_SELECT (inner_mode, target,
6691 gen_rtx_PARALLEL (VOIDmode,
6692 gen_rtvec (1, const0_rtx)));
6693 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6694 return;
6695 }
6696
6697 /* One field is non-constant. Load constant then overwrite
6698 varying field. */
6699 if (n_var == 1)
6700 {
6701 rtx copy = copy_rtx (vals);
6702
6703 /* Load constant part of vector, substitute neighboring value for
6704 varying element. */
6705 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6706 rs6000_expand_vector_init (target, copy);
6707
6708 /* Insert variable. */
6709 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6710 return;
6711 }
6712
6713 /* Construct the vector in memory one field at a time
6714 and load the whole vector. */
6715 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6716 for (i = 0; i < n_elts; i++)
6717 emit_move_insn (adjust_address_nv (mem, inner_mode,
6718 i * GET_MODE_SIZE (inner_mode)),
6719 XVECEXP (vals, 0, i));
6720 emit_move_insn (target, mem);
6721 }
6722
6723 /* Set field ELT of TARGET to VAL. */
6724
6725 void
6726 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6727 {
6728 machine_mode mode = GET_MODE (target);
6729 machine_mode inner_mode = GET_MODE_INNER (mode);
6730 rtx reg = gen_reg_rtx (mode);
6731 rtx mask, mem, x;
6732 int width = GET_MODE_SIZE (inner_mode);
6733 int i;
6734
6735 val = force_reg (GET_MODE (val), val);
6736
6737 if (VECTOR_MEM_VSX_P (mode))
6738 {
6739 rtx insn = NULL_RTX;
6740 rtx elt_rtx = GEN_INT (elt);
6741
6742 if (mode == V2DFmode)
6743 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6744
6745 else if (mode == V2DImode)
6746 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6747
6748 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6749 {
6750 if (mode == V4SImode)
6751 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6752 else if (mode == V8HImode)
6753 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6754 else if (mode == V16QImode)
6755 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6756 else if (mode == V4SFmode)
6757 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6758 }
6759
6760 if (insn)
6761 {
6762 emit_insn (insn);
6763 return;
6764 }
6765 }
6766
6767 /* Simplify setting single element vectors like V1TImode. */
6768 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6769 {
6770 emit_move_insn (target, gen_lowpart (mode, val));
6771 return;
6772 }
6773
6774 /* Load single variable value. */
6775 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6776 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6777 x = gen_rtx_UNSPEC (VOIDmode,
6778 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6779 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6780 gen_rtvec (2,
6781 gen_rtx_SET (reg, mem),
6782 x)));
6783
6784 /* Linear sequence. */
6785 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6786 for (i = 0; i < 16; ++i)
6787 XVECEXP (mask, 0, i) = GEN_INT (i);
6788
6789 /* Set permute mask to insert element into target. */
6790 for (i = 0; i < width; ++i)
6791 XVECEXP (mask, 0, elt*width + i)
6792 = GEN_INT (i + 0x10);
6793 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6794
6795 if (BYTES_BIG_ENDIAN)
6796 x = gen_rtx_UNSPEC (mode,
6797 gen_rtvec (3, target, reg,
6798 force_reg (V16QImode, x)),
6799 UNSPEC_VPERM);
6800 else
6801 {
6802 if (TARGET_P9_VECTOR)
6803 x = gen_rtx_UNSPEC (mode,
6804 gen_rtvec (3, reg, target,
6805 force_reg (V16QImode, x)),
6806 UNSPEC_VPERMR);
6807 else
6808 {
6809 /* Invert selector. We prefer to generate VNAND on P8 so
6810 that future fusion opportunities can kick in, but must
6811 generate VNOR elsewhere. */
6812 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6813 rtx iorx = (TARGET_P8_VECTOR
6814 ? gen_rtx_IOR (V16QImode, notx, notx)
6815 : gen_rtx_AND (V16QImode, notx, notx));
6816 rtx tmp = gen_reg_rtx (V16QImode);
6817 emit_insn (gen_rtx_SET (tmp, iorx));
6818
6819 /* Permute with operands reversed and adjusted selector. */
6820 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6821 UNSPEC_VPERM);
6822 }
6823 }
6824
6825 emit_insn (gen_rtx_SET (target, x));
6826 }
6827
6828 /* Extract field ELT from VEC into TARGET. */
6829
6830 void
6831 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6832 {
6833 machine_mode mode = GET_MODE (vec);
6834 machine_mode inner_mode = GET_MODE_INNER (mode);
6835 rtx mem;
6836
6837 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6838 {
6839 switch (mode)
6840 {
6841 default:
6842 break;
6843 case E_V1TImode:
6844 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6845 emit_move_insn (target, gen_lowpart (TImode, vec));
6846 break;
6847 case E_V2DFmode:
6848 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6849 return;
6850 case E_V2DImode:
6851 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6852 return;
6853 case E_V4SFmode:
6854 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6855 return;
6856 case E_V16QImode:
6857 if (TARGET_DIRECT_MOVE_64BIT)
6858 {
6859 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6860 return;
6861 }
6862 else
6863 break;
6864 case E_V8HImode:
6865 if (TARGET_DIRECT_MOVE_64BIT)
6866 {
6867 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6868 return;
6869 }
6870 else
6871 break;
6872 case E_V4SImode:
6873 if (TARGET_DIRECT_MOVE_64BIT)
6874 {
6875 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6876 return;
6877 }
6878 break;
6879 }
6880 }
6881 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6882 && TARGET_DIRECT_MOVE_64BIT)
6883 {
6884 if (GET_MODE (elt) != DImode)
6885 {
6886 rtx tmp = gen_reg_rtx (DImode);
6887 convert_move (tmp, elt, 0);
6888 elt = tmp;
6889 }
6890 else if (!REG_P (elt))
6891 elt = force_reg (DImode, elt);
6892
6893 switch (mode)
6894 {
6895 case E_V2DFmode:
6896 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6897 return;
6898
6899 case E_V2DImode:
6900 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6901 return;
6902
6903 case E_V4SFmode:
6904 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6905 return;
6906
6907 case E_V4SImode:
6908 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6909 return;
6910
6911 case E_V8HImode:
6912 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6913 return;
6914
6915 case E_V16QImode:
6916 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6917 return;
6918
6919 default:
6920 gcc_unreachable ();
6921 }
6922 }
6923
6924 gcc_assert (CONST_INT_P (elt));
6925
6926 /* Allocate mode-sized buffer. */
6927 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6928
6929 emit_move_insn (mem, vec);
6930
6931 /* Add offset to field within buffer matching vector element. */
6932 mem = adjust_address_nv (mem, inner_mode,
6933 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6934
6935 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6936 }
6937
6938 /* Helper function to return the register number of a RTX. */
6939 static inline int
6940 regno_or_subregno (rtx op)
6941 {
6942 if (REG_P (op))
6943 return REGNO (op);
6944 else if (SUBREG_P (op))
6945 return subreg_regno (op);
6946 else
6947 gcc_unreachable ();
6948 }
6949
6950 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6951 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6952 temporary (BASE_TMP) to fixup the address. Return the new memory address
6953 that is valid for reads or writes to a given register (SCALAR_REG). */
6954
6955 rtx
6956 rs6000_adjust_vec_address (rtx scalar_reg,
6957 rtx mem,
6958 rtx element,
6959 rtx base_tmp,
6960 machine_mode scalar_mode)
6961 {
6962 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
6963 rtx addr = XEXP (mem, 0);
6964 rtx element_offset;
6965 rtx new_addr;
6966 bool valid_addr_p;
6967
6968 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6969 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
6970
6971 /* Calculate what we need to add to the address to get the element
6972 address. */
6973 if (CONST_INT_P (element))
6974 element_offset = GEN_INT (INTVAL (element) * scalar_size);
6975 else
6976 {
6977 int byte_shift = exact_log2 (scalar_size);
6978 gcc_assert (byte_shift >= 0);
6979
6980 if (byte_shift == 0)
6981 element_offset = element;
6982
6983 else
6984 {
6985 if (TARGET_POWERPC64)
6986 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
6987 else
6988 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
6989
6990 element_offset = base_tmp;
6991 }
6992 }
6993
6994 /* Create the new address pointing to the element within the vector. If we
6995 are adding 0, we don't have to change the address. */
6996 if (element_offset == const0_rtx)
6997 new_addr = addr;
6998
6999 /* A simple indirect address can be converted into a reg + offset
7000 address. */
7001 else if (REG_P (addr) || SUBREG_P (addr))
7002 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7003
7004 /* Optimize D-FORM addresses with constant offset with a constant element, to
7005 include the element offset in the address directly. */
7006 else if (GET_CODE (addr) == PLUS)
7007 {
7008 rtx op0 = XEXP (addr, 0);
7009 rtx op1 = XEXP (addr, 1);
7010 rtx insn;
7011
7012 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7013 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7014 {
7015 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7016 rtx offset_rtx = GEN_INT (offset);
7017
7018 if (IN_RANGE (offset, -32768, 32767)
7019 && (scalar_size < 8 || (offset & 0x3) == 0))
7020 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7021 else
7022 {
7023 emit_move_insn (base_tmp, offset_rtx);
7024 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7025 }
7026 }
7027 else
7028 {
7029 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7030 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7031
7032 /* Note, ADDI requires the register being added to be a base
7033 register. If the register was R0, load it up into the temporary
7034 and do the add. */
7035 if (op1_reg_p
7036 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7037 {
7038 insn = gen_add3_insn (base_tmp, op1, element_offset);
7039 gcc_assert (insn != NULL_RTX);
7040 emit_insn (insn);
7041 }
7042
7043 else if (ele_reg_p
7044 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7045 {
7046 insn = gen_add3_insn (base_tmp, element_offset, op1);
7047 gcc_assert (insn != NULL_RTX);
7048 emit_insn (insn);
7049 }
7050
7051 else
7052 {
7053 emit_move_insn (base_tmp, op1);
7054 emit_insn (gen_add2_insn (base_tmp, element_offset));
7055 }
7056
7057 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7058 }
7059 }
7060
7061 else
7062 {
7063 emit_move_insn (base_tmp, addr);
7064 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7065 }
7066
7067 /* If we have a PLUS, we need to see whether the particular register class
7068 allows for D-FORM or X-FORM addressing. */
7069 if (GET_CODE (new_addr) == PLUS)
7070 {
7071 rtx op1 = XEXP (new_addr, 1);
7072 addr_mask_type addr_mask;
7073 int scalar_regno = regno_or_subregno (scalar_reg);
7074
7075 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7076 if (INT_REGNO_P (scalar_regno))
7077 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7078
7079 else if (FP_REGNO_P (scalar_regno))
7080 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7081
7082 else if (ALTIVEC_REGNO_P (scalar_regno))
7083 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7084
7085 else
7086 gcc_unreachable ();
7087
7088 if (REG_P (op1) || SUBREG_P (op1))
7089 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7090 else
7091 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7092 }
7093
7094 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7095 valid_addr_p = true;
7096
7097 else
7098 valid_addr_p = false;
7099
7100 if (!valid_addr_p)
7101 {
7102 emit_move_insn (base_tmp, new_addr);
7103 new_addr = base_tmp;
7104 }
7105
7106 return change_address (mem, scalar_mode, new_addr);
7107 }
7108
7109 /* Split a variable vec_extract operation into the component instructions. */
7110
7111 void
7112 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7113 rtx tmp_altivec)
7114 {
7115 machine_mode mode = GET_MODE (src);
7116 machine_mode scalar_mode = GET_MODE (dest);
7117 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7118 int byte_shift = exact_log2 (scalar_size);
7119
7120 gcc_assert (byte_shift >= 0);
7121
7122 /* If we are given a memory address, optimize to load just the element. We
7123 don't have to adjust the vector element number on little endian
7124 systems. */
7125 if (MEM_P (src))
7126 {
7127 gcc_assert (REG_P (tmp_gpr));
7128 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7129 tmp_gpr, scalar_mode));
7130 return;
7131 }
7132
7133 else if (REG_P (src) || SUBREG_P (src))
7134 {
7135 int bit_shift = byte_shift + 3;
7136 rtx element2;
7137 int dest_regno = regno_or_subregno (dest);
7138 int src_regno = regno_or_subregno (src);
7139 int element_regno = regno_or_subregno (element);
7140
7141 gcc_assert (REG_P (tmp_gpr));
7142
7143 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7144 a general purpose register. */
7145 if (TARGET_P9_VECTOR
7146 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7147 && INT_REGNO_P (dest_regno)
7148 && ALTIVEC_REGNO_P (src_regno)
7149 && INT_REGNO_P (element_regno))
7150 {
7151 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7152 rtx element_si = gen_rtx_REG (SImode, element_regno);
7153
7154 if (mode == V16QImode)
7155 emit_insn (BYTES_BIG_ENDIAN
7156 ? gen_vextublx (dest_si, element_si, src)
7157 : gen_vextubrx (dest_si, element_si, src));
7158
7159 else if (mode == V8HImode)
7160 {
7161 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7162 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7163 emit_insn (BYTES_BIG_ENDIAN
7164 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7165 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7166 }
7167
7168
7169 else
7170 {
7171 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7172 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7173 emit_insn (BYTES_BIG_ENDIAN
7174 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7175 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7176 }
7177
7178 return;
7179 }
7180
7181
7182 gcc_assert (REG_P (tmp_altivec));
7183
7184 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7185 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7186 will shift the element into the upper position (adding 3 to convert a
7187 byte shift into a bit shift). */
7188 if (scalar_size == 8)
7189 {
7190 if (!BYTES_BIG_ENDIAN)
7191 {
7192 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7193 element2 = tmp_gpr;
7194 }
7195 else
7196 element2 = element;
7197
7198 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7199 bit. */
7200 emit_insn (gen_rtx_SET (tmp_gpr,
7201 gen_rtx_AND (DImode,
7202 gen_rtx_ASHIFT (DImode,
7203 element2,
7204 GEN_INT (6)),
7205 GEN_INT (64))));
7206 }
7207 else
7208 {
7209 if (!BYTES_BIG_ENDIAN)
7210 {
7211 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7212
7213 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7214 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7215 element2 = tmp_gpr;
7216 }
7217 else
7218 element2 = element;
7219
7220 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7221 }
7222
7223 /* Get the value into the lower byte of the Altivec register where VSLO
7224 expects it. */
7225 if (TARGET_P9_VECTOR)
7226 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7227 else if (can_create_pseudo_p ())
7228 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7229 else
7230 {
7231 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7232 emit_move_insn (tmp_di, tmp_gpr);
7233 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7234 }
7235
7236 /* Do the VSLO to get the value into the final location. */
7237 switch (mode)
7238 {
7239 case E_V2DFmode:
7240 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7241 return;
7242
7243 case E_V2DImode:
7244 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7245 return;
7246
7247 case E_V4SFmode:
7248 {
7249 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7250 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7251 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7252 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7253 tmp_altivec));
7254
7255 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7256 return;
7257 }
7258
7259 case E_V4SImode:
7260 case E_V8HImode:
7261 case E_V16QImode:
7262 {
7263 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7264 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7265 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7266 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7267 tmp_altivec));
7268 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7269 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7270 GEN_INT (64 - (8 * scalar_size))));
7271 return;
7272 }
7273
7274 default:
7275 gcc_unreachable ();
7276 }
7277
7278 return;
7279 }
7280 else
7281 gcc_unreachable ();
7282 }
7283
7284 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7285 selects whether the alignment is abi mandated, optional, or
7286 both abi and optional alignment. */
7287
7288 unsigned int
7289 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7290 {
7291 if (how != align_opt)
7292 {
7293 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7294 align = 128;
7295 }
7296
7297 if (how != align_abi)
7298 {
7299 if (TREE_CODE (type) == ARRAY_TYPE
7300 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7301 {
7302 if (align < BITS_PER_WORD)
7303 align = BITS_PER_WORD;
7304 }
7305 }
7306
7307 return align;
7308 }
7309
7310 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7311 instructions simply ignore the low bits; VSX memory instructions
7312 are aligned to 4 or 8 bytes. */
7313
7314 static bool
7315 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7316 {
7317 return (STRICT_ALIGNMENT
7318 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7319 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7320 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7321 && (int) align < VECTOR_ALIGN (mode)))));
7322 }
7323
7324 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7325
7326 bool
7327 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7328 {
7329 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7330 {
7331 if (computed != 128)
7332 {
7333 static bool warned;
7334 if (!warned && warn_psabi)
7335 {
7336 warned = true;
7337 inform (input_location,
7338 "the layout of aggregates containing vectors with"
7339 " %d-byte alignment has changed in GCC 5",
7340 computed / BITS_PER_UNIT);
7341 }
7342 }
7343 /* In current GCC there is no special case. */
7344 return false;
7345 }
7346
7347 return false;
7348 }
7349
7350 /* AIX increases natural record alignment to doubleword if the first
7351 field is an FP double while the FP fields remain word aligned. */
7352
7353 unsigned int
7354 rs6000_special_round_type_align (tree type, unsigned int computed,
7355 unsigned int specified)
7356 {
7357 unsigned int align = MAX (computed, specified);
7358 tree field = TYPE_FIELDS (type);
7359
7360 /* Skip all non field decls */
7361 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7362 field = DECL_CHAIN (field);
7363
7364 if (field != NULL && field != type)
7365 {
7366 type = TREE_TYPE (field);
7367 while (TREE_CODE (type) == ARRAY_TYPE)
7368 type = TREE_TYPE (type);
7369
7370 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7371 align = MAX (align, 64);
7372 }
7373
7374 return align;
7375 }
7376
7377 /* Darwin increases record alignment to the natural alignment of
7378 the first field. */
7379
7380 unsigned int
7381 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7382 unsigned int specified)
7383 {
7384 unsigned int align = MAX (computed, specified);
7385
7386 if (TYPE_PACKED (type))
7387 return align;
7388
7389 /* Find the first field, looking down into aggregates. */
7390 do {
7391 tree field = TYPE_FIELDS (type);
7392 /* Skip all non field decls */
7393 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7394 field = DECL_CHAIN (field);
7395 if (! field)
7396 break;
7397 /* A packed field does not contribute any extra alignment. */
7398 if (DECL_PACKED (field))
7399 return align;
7400 type = TREE_TYPE (field);
7401 while (TREE_CODE (type) == ARRAY_TYPE)
7402 type = TREE_TYPE (type);
7403 } while (AGGREGATE_TYPE_P (type));
7404
7405 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7406 align = MAX (align, TYPE_ALIGN (type));
7407
7408 return align;
7409 }
7410
7411 /* Return 1 for an operand in small memory on V.4/eabi. */
7412
7413 int
7414 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7415 machine_mode mode ATTRIBUTE_UNUSED)
7416 {
7417 #if TARGET_ELF
7418 rtx sym_ref;
7419
7420 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7421 return 0;
7422
7423 if (DEFAULT_ABI != ABI_V4)
7424 return 0;
7425
7426 if (GET_CODE (op) == SYMBOL_REF)
7427 sym_ref = op;
7428
7429 else if (GET_CODE (op) != CONST
7430 || GET_CODE (XEXP (op, 0)) != PLUS
7431 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7432 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7433 return 0;
7434
7435 else
7436 {
7437 rtx sum = XEXP (op, 0);
7438 HOST_WIDE_INT summand;
7439
7440 /* We have to be careful here, because it is the referenced address
7441 that must be 32k from _SDA_BASE_, not just the symbol. */
7442 summand = INTVAL (XEXP (sum, 1));
7443 if (summand < 0 || summand > g_switch_value)
7444 return 0;
7445
7446 sym_ref = XEXP (sum, 0);
7447 }
7448
7449 return SYMBOL_REF_SMALL_P (sym_ref);
7450 #else
7451 return 0;
7452 #endif
7453 }
7454
7455 /* Return true if either operand is a general purpose register. */
7456
7457 bool
7458 gpr_or_gpr_p (rtx op0, rtx op1)
7459 {
7460 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7461 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7462 }
7463
7464 /* Return true if this is a move direct operation between GPR registers and
7465 floating point/VSX registers. */
7466
7467 bool
7468 direct_move_p (rtx op0, rtx op1)
7469 {
7470 int regno0, regno1;
7471
7472 if (!REG_P (op0) || !REG_P (op1))
7473 return false;
7474
7475 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7476 return false;
7477
7478 regno0 = REGNO (op0);
7479 regno1 = REGNO (op1);
7480 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7481 return false;
7482
7483 if (INT_REGNO_P (regno0))
7484 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7485
7486 else if (INT_REGNO_P (regno1))
7487 {
7488 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7489 return true;
7490
7491 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7492 return true;
7493 }
7494
7495 return false;
7496 }
7497
7498 /* Return true if the OFFSET is valid for the quad address instructions that
7499 use d-form (register + offset) addressing. */
7500
7501 static inline bool
7502 quad_address_offset_p (HOST_WIDE_INT offset)
7503 {
7504 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7505 }
7506
7507 /* Return true if the ADDR is an acceptable address for a quad memory
7508 operation of mode MODE (either LQ/STQ for general purpose registers, or
7509 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7510 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7511 3.0 LXV/STXV instruction. */
7512
7513 bool
7514 quad_address_p (rtx addr, machine_mode mode, bool strict)
7515 {
7516 rtx op0, op1;
7517
7518 if (GET_MODE_SIZE (mode) != 16)
7519 return false;
7520
7521 if (legitimate_indirect_address_p (addr, strict))
7522 return true;
7523
7524 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7525 return false;
7526
7527 if (GET_CODE (addr) != PLUS)
7528 return false;
7529
7530 op0 = XEXP (addr, 0);
7531 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7532 return false;
7533
7534 op1 = XEXP (addr, 1);
7535 if (!CONST_INT_P (op1))
7536 return false;
7537
7538 return quad_address_offset_p (INTVAL (op1));
7539 }
7540
7541 /* Return true if this is a load or store quad operation. This function does
7542 not handle the atomic quad memory instructions. */
7543
7544 bool
7545 quad_load_store_p (rtx op0, rtx op1)
7546 {
7547 bool ret;
7548
7549 if (!TARGET_QUAD_MEMORY)
7550 ret = false;
7551
7552 else if (REG_P (op0) && MEM_P (op1))
7553 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7554 && quad_memory_operand (op1, GET_MODE (op1))
7555 && !reg_overlap_mentioned_p (op0, op1));
7556
7557 else if (MEM_P (op0) && REG_P (op1))
7558 ret = (quad_memory_operand (op0, GET_MODE (op0))
7559 && quad_int_reg_operand (op1, GET_MODE (op1)));
7560
7561 else
7562 ret = false;
7563
7564 if (TARGET_DEBUG_ADDR)
7565 {
7566 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7567 ret ? "true" : "false");
7568 debug_rtx (gen_rtx_SET (op0, op1));
7569 }
7570
7571 return ret;
7572 }
7573
7574 /* Given an address, return a constant offset term if one exists. */
7575
7576 static rtx
7577 address_offset (rtx op)
7578 {
7579 if (GET_CODE (op) == PRE_INC
7580 || GET_CODE (op) == PRE_DEC)
7581 op = XEXP (op, 0);
7582 else if (GET_CODE (op) == PRE_MODIFY
7583 || GET_CODE (op) == LO_SUM)
7584 op = XEXP (op, 1);
7585
7586 if (GET_CODE (op) == CONST)
7587 op = XEXP (op, 0);
7588
7589 if (GET_CODE (op) == PLUS)
7590 op = XEXP (op, 1);
7591
7592 if (CONST_INT_P (op))
7593 return op;
7594
7595 return NULL_RTX;
7596 }
7597
7598 /* Return true if the MEM operand is a memory operand suitable for use
7599 with a (full width, possibly multiple) gpr load/store. On
7600 powerpc64 this means the offset must be divisible by 4.
7601 Implements 'Y' constraint.
7602
7603 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7604 a constraint function we know the operand has satisfied a suitable
7605 memory predicate. Also accept some odd rtl generated by reload
7606 (see rs6000_legitimize_reload_address for various forms). It is
7607 important that reload rtl be accepted by appropriate constraints
7608 but not by the operand predicate.
7609
7610 Offsetting a lo_sum should not be allowed, except where we know by
7611 alignment that a 32k boundary is not crossed, but see the ???
7612 comment in rs6000_legitimize_reload_address. Note that by
7613 "offsetting" here we mean a further offset to access parts of the
7614 MEM. It's fine to have a lo_sum where the inner address is offset
7615 from a sym, since the same sym+offset will appear in the high part
7616 of the address calculation. */
7617
7618 bool
7619 mem_operand_gpr (rtx op, machine_mode mode)
7620 {
7621 unsigned HOST_WIDE_INT offset;
7622 int extra;
7623 rtx addr = XEXP (op, 0);
7624
7625 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7626 if (TARGET_UPDATE
7627 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7628 && mode_supports_pre_incdec_p (mode)
7629 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7630 return true;
7631
7632 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7633 if (!rs6000_offsettable_memref_p (op, mode, false))
7634 return false;
7635
7636 op = address_offset (addr);
7637 if (op == NULL_RTX)
7638 return true;
7639
7640 offset = INTVAL (op);
7641 if (TARGET_POWERPC64 && (offset & 3) != 0)
7642 return false;
7643
7644 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7645 if (extra < 0)
7646 extra = 0;
7647
7648 if (GET_CODE (addr) == LO_SUM)
7649 /* For lo_sum addresses, we must allow any offset except one that
7650 causes a wrap, so test only the low 16 bits. */
7651 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7652
7653 return offset + 0x8000 < 0x10000u - extra;
7654 }
7655
7656 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7657 enforce an offset divisible by 4 even for 32-bit. */
7658
7659 bool
7660 mem_operand_ds_form (rtx op, machine_mode mode)
7661 {
7662 unsigned HOST_WIDE_INT offset;
7663 int extra;
7664 rtx addr = XEXP (op, 0);
7665
7666 if (!offsettable_address_p (false, mode, addr))
7667 return false;
7668
7669 op = address_offset (addr);
7670 if (op == NULL_RTX)
7671 return true;
7672
7673 offset = INTVAL (op);
7674 if ((offset & 3) != 0)
7675 return false;
7676
7677 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7678 if (extra < 0)
7679 extra = 0;
7680
7681 if (GET_CODE (addr) == LO_SUM)
7682 /* For lo_sum addresses, we must allow any offset except one that
7683 causes a wrap, so test only the low 16 bits. */
7684 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7685
7686 return offset + 0x8000 < 0x10000u - extra;
7687 }
7688 \f
7689 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7690
7691 static bool
7692 reg_offset_addressing_ok_p (machine_mode mode)
7693 {
7694 switch (mode)
7695 {
7696 case E_V16QImode:
7697 case E_V8HImode:
7698 case E_V4SFmode:
7699 case E_V4SImode:
7700 case E_V2DFmode:
7701 case E_V2DImode:
7702 case E_V1TImode:
7703 case E_TImode:
7704 case E_TFmode:
7705 case E_KFmode:
7706 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7707 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7708 a vector mode, if we want to use the VSX registers to move it around,
7709 we need to restrict ourselves to reg+reg addressing. Similarly for
7710 IEEE 128-bit floating point that is passed in a single vector
7711 register. */
7712 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7713 return mode_supports_dq_form (mode);
7714 break;
7715
7716 case E_SDmode:
7717 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7718 addressing for the LFIWZX and STFIWX instructions. */
7719 if (TARGET_NO_SDMODE_STACK)
7720 return false;
7721 break;
7722
7723 default:
7724 break;
7725 }
7726
7727 return true;
7728 }
7729
7730 static bool
7731 virtual_stack_registers_memory_p (rtx op)
7732 {
7733 int regnum;
7734
7735 if (GET_CODE (op) == REG)
7736 regnum = REGNO (op);
7737
7738 else if (GET_CODE (op) == PLUS
7739 && GET_CODE (XEXP (op, 0)) == REG
7740 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7741 regnum = REGNO (XEXP (op, 0));
7742
7743 else
7744 return false;
7745
7746 return (regnum >= FIRST_VIRTUAL_REGISTER
7747 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7748 }
7749
7750 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7751 is known to not straddle a 32k boundary. This function is used
7752 to determine whether -mcmodel=medium code can use TOC pointer
7753 relative addressing for OP. This means the alignment of the TOC
7754 pointer must also be taken into account, and unfortunately that is
7755 only 8 bytes. */
7756
7757 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7758 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7759 #endif
7760
7761 static bool
7762 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7763 machine_mode mode)
7764 {
7765 tree decl;
7766 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7767
7768 if (GET_CODE (op) != SYMBOL_REF)
7769 return false;
7770
7771 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7772 SYMBOL_REF. */
7773 if (mode_supports_dq_form (mode))
7774 return false;
7775
7776 dsize = GET_MODE_SIZE (mode);
7777 decl = SYMBOL_REF_DECL (op);
7778 if (!decl)
7779 {
7780 if (dsize == 0)
7781 return false;
7782
7783 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7784 replacing memory addresses with an anchor plus offset. We
7785 could find the decl by rummaging around in the block->objects
7786 VEC for the given offset but that seems like too much work. */
7787 dalign = BITS_PER_UNIT;
7788 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7789 && SYMBOL_REF_ANCHOR_P (op)
7790 && SYMBOL_REF_BLOCK (op) != NULL)
7791 {
7792 struct object_block *block = SYMBOL_REF_BLOCK (op);
7793
7794 dalign = block->alignment;
7795 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7796 }
7797 else if (CONSTANT_POOL_ADDRESS_P (op))
7798 {
7799 /* It would be nice to have get_pool_align().. */
7800 machine_mode cmode = get_pool_mode (op);
7801
7802 dalign = GET_MODE_ALIGNMENT (cmode);
7803 }
7804 }
7805 else if (DECL_P (decl))
7806 {
7807 dalign = DECL_ALIGN (decl);
7808
7809 if (dsize == 0)
7810 {
7811 /* Allow BLKmode when the entire object is known to not
7812 cross a 32k boundary. */
7813 if (!DECL_SIZE_UNIT (decl))
7814 return false;
7815
7816 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7817 return false;
7818
7819 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7820 if (dsize > 32768)
7821 return false;
7822
7823 dalign /= BITS_PER_UNIT;
7824 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7825 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7826 return dalign >= dsize;
7827 }
7828 }
7829 else
7830 gcc_unreachable ();
7831
7832 /* Find how many bits of the alignment we know for this access. */
7833 dalign /= BITS_PER_UNIT;
7834 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7835 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7836 mask = dalign - 1;
7837 lsb = offset & -offset;
7838 mask &= lsb - 1;
7839 dalign = mask + 1;
7840
7841 return dalign >= dsize;
7842 }
7843
7844 static bool
7845 constant_pool_expr_p (rtx op)
7846 {
7847 rtx base, offset;
7848
7849 split_const (op, &base, &offset);
7850 return (GET_CODE (base) == SYMBOL_REF
7851 && CONSTANT_POOL_ADDRESS_P (base)
7852 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7853 }
7854
7855 /* These are only used to pass through from print_operand/print_operand_address
7856 to rs6000_output_addr_const_extra over the intervening function
7857 output_addr_const which is not target code. */
7858 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7859
7860 /* Return true if OP is a toc pointer relative address (the output
7861 of create_TOC_reference). If STRICT, do not match non-split
7862 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7863 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7864 TOCREL_OFFSET_RET respectively. */
7865
7866 bool
7867 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7868 const_rtx *tocrel_offset_ret)
7869 {
7870 if (!TARGET_TOC)
7871 return false;
7872
7873 if (TARGET_CMODEL != CMODEL_SMALL)
7874 {
7875 /* When strict ensure we have everything tidy. */
7876 if (strict
7877 && !(GET_CODE (op) == LO_SUM
7878 && REG_P (XEXP (op, 0))
7879 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7880 return false;
7881
7882 /* When not strict, allow non-split TOC addresses and also allow
7883 (lo_sum (high ..)) TOC addresses created during reload. */
7884 if (GET_CODE (op) == LO_SUM)
7885 op = XEXP (op, 1);
7886 }
7887
7888 const_rtx tocrel_base = op;
7889 const_rtx tocrel_offset = const0_rtx;
7890
7891 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7892 {
7893 tocrel_base = XEXP (op, 0);
7894 tocrel_offset = XEXP (op, 1);
7895 }
7896
7897 if (tocrel_base_ret)
7898 *tocrel_base_ret = tocrel_base;
7899 if (tocrel_offset_ret)
7900 *tocrel_offset_ret = tocrel_offset;
7901
7902 return (GET_CODE (tocrel_base) == UNSPEC
7903 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7904 && REG_P (XVECEXP (tocrel_base, 0, 1))
7905 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7906 }
7907
7908 /* Return true if X is a constant pool address, and also for cmodel=medium
7909 if X is a toc-relative address known to be offsettable within MODE. */
7910
7911 bool
7912 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7913 bool strict)
7914 {
7915 const_rtx tocrel_base, tocrel_offset;
7916 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7917 && (TARGET_CMODEL != CMODEL_MEDIUM
7918 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7919 || mode == QImode
7920 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7921 INTVAL (tocrel_offset), mode)));
7922 }
7923
7924 static bool
7925 legitimate_small_data_p (machine_mode mode, rtx x)
7926 {
7927 return (DEFAULT_ABI == ABI_V4
7928 && !flag_pic && !TARGET_TOC
7929 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7930 && small_data_operand (x, mode));
7931 }
7932
7933 bool
7934 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7935 bool strict, bool worst_case)
7936 {
7937 unsigned HOST_WIDE_INT offset;
7938 unsigned int extra;
7939
7940 if (GET_CODE (x) != PLUS)
7941 return false;
7942 if (!REG_P (XEXP (x, 0)))
7943 return false;
7944 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7945 return false;
7946 if (mode_supports_dq_form (mode))
7947 return quad_address_p (x, mode, strict);
7948 if (!reg_offset_addressing_ok_p (mode))
7949 return virtual_stack_registers_memory_p (x);
7950 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7951 return true;
7952 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
7953 return false;
7954
7955 offset = INTVAL (XEXP (x, 1));
7956 extra = 0;
7957 switch (mode)
7958 {
7959 case E_DFmode:
7960 case E_DDmode:
7961 case E_DImode:
7962 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7963 addressing. */
7964 if (VECTOR_MEM_VSX_P (mode))
7965 return false;
7966
7967 if (!worst_case)
7968 break;
7969 if (!TARGET_POWERPC64)
7970 extra = 4;
7971 else if (offset & 3)
7972 return false;
7973 break;
7974
7975 case E_TFmode:
7976 case E_IFmode:
7977 case E_KFmode:
7978 case E_TDmode:
7979 case E_TImode:
7980 case E_PTImode:
7981 extra = 8;
7982 if (!worst_case)
7983 break;
7984 if (!TARGET_POWERPC64)
7985 extra = 12;
7986 else if (offset & 3)
7987 return false;
7988 break;
7989
7990 default:
7991 break;
7992 }
7993
7994 offset += 0x8000;
7995 return offset < 0x10000 - extra;
7996 }
7997
7998 bool
7999 legitimate_indexed_address_p (rtx x, int strict)
8000 {
8001 rtx op0, op1;
8002
8003 if (GET_CODE (x) != PLUS)
8004 return false;
8005
8006 op0 = XEXP (x, 0);
8007 op1 = XEXP (x, 1);
8008
8009 return (REG_P (op0) && REG_P (op1)
8010 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8011 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8012 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8013 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8014 }
8015
8016 bool
8017 avoiding_indexed_address_p (machine_mode mode)
8018 {
8019 /* Avoid indexed addressing for modes that have non-indexed
8020 load/store instruction forms. */
8021 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8022 }
8023
8024 bool
8025 legitimate_indirect_address_p (rtx x, int strict)
8026 {
8027 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8028 }
8029
8030 bool
8031 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8032 {
8033 if (!TARGET_MACHO || !flag_pic
8034 || mode != SImode || GET_CODE (x) != MEM)
8035 return false;
8036 x = XEXP (x, 0);
8037
8038 if (GET_CODE (x) != LO_SUM)
8039 return false;
8040 if (GET_CODE (XEXP (x, 0)) != REG)
8041 return false;
8042 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8043 return false;
8044 x = XEXP (x, 1);
8045
8046 return CONSTANT_P (x);
8047 }
8048
8049 static bool
8050 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8051 {
8052 if (GET_CODE (x) != LO_SUM)
8053 return false;
8054 if (GET_CODE (XEXP (x, 0)) != REG)
8055 return false;
8056 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8057 return false;
8058 /* quad word addresses are restricted, and we can't use LO_SUM. */
8059 if (mode_supports_dq_form (mode))
8060 return false;
8061 x = XEXP (x, 1);
8062
8063 if (TARGET_ELF || TARGET_MACHO)
8064 {
8065 bool large_toc_ok;
8066
8067 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8068 return false;
8069 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8070 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8071 recognizes some LO_SUM addresses as valid although this
8072 function says opposite. In most cases, LRA through different
8073 transformations can generate correct code for address reloads.
8074 It can not manage only some LO_SUM cases. So we need to add
8075 code analogous to one in rs6000_legitimize_reload_address for
8076 LOW_SUM here saying that some addresses are still valid. */
8077 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8078 && small_toc_ref (x, VOIDmode));
8079 if (TARGET_TOC && ! large_toc_ok)
8080 return false;
8081 if (GET_MODE_NUNITS (mode) != 1)
8082 return false;
8083 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8084 && !(/* ??? Assume floating point reg based on mode? */
8085 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8086 return false;
8087
8088 return CONSTANT_P (x) || large_toc_ok;
8089 }
8090
8091 return false;
8092 }
8093
8094
8095 /* Try machine-dependent ways of modifying an illegitimate address
8096 to be legitimate. If we find one, return the new, valid address.
8097 This is used from only one place: `memory_address' in explow.c.
8098
8099 OLDX is the address as it was before break_out_memory_refs was
8100 called. In some cases it is useful to look at this to decide what
8101 needs to be done.
8102
8103 It is always safe for this function to do nothing. It exists to
8104 recognize opportunities to optimize the output.
8105
8106 On RS/6000, first check for the sum of a register with a constant
8107 integer that is out of range. If so, generate code to add the
8108 constant with the low-order 16 bits masked to the register and force
8109 this result into another register (this can be done with `cau').
8110 Then generate an address of REG+(CONST&0xffff), allowing for the
8111 possibility of bit 16 being a one.
8112
8113 Then check for the sum of a register and something not constant, try to
8114 load the other things into a register and return the sum. */
8115
8116 static rtx
8117 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8118 machine_mode mode)
8119 {
8120 unsigned int extra;
8121
8122 if (!reg_offset_addressing_ok_p (mode)
8123 || mode_supports_dq_form (mode))
8124 {
8125 if (virtual_stack_registers_memory_p (x))
8126 return x;
8127
8128 /* In theory we should not be seeing addresses of the form reg+0,
8129 but just in case it is generated, optimize it away. */
8130 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8131 return force_reg (Pmode, XEXP (x, 0));
8132
8133 /* For TImode with load/store quad, restrict addresses to just a single
8134 pointer, so it works with both GPRs and VSX registers. */
8135 /* Make sure both operands are registers. */
8136 else if (GET_CODE (x) == PLUS
8137 && (mode != TImode || !TARGET_VSX))
8138 return gen_rtx_PLUS (Pmode,
8139 force_reg (Pmode, XEXP (x, 0)),
8140 force_reg (Pmode, XEXP (x, 1)));
8141 else
8142 return force_reg (Pmode, x);
8143 }
8144 if (GET_CODE (x) == SYMBOL_REF)
8145 {
8146 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8147 if (model != 0)
8148 return rs6000_legitimize_tls_address (x, model);
8149 }
8150
8151 extra = 0;
8152 switch (mode)
8153 {
8154 case E_TFmode:
8155 case E_TDmode:
8156 case E_TImode:
8157 case E_PTImode:
8158 case E_IFmode:
8159 case E_KFmode:
8160 /* As in legitimate_offset_address_p we do not assume
8161 worst-case. The mode here is just a hint as to the registers
8162 used. A TImode is usually in gprs, but may actually be in
8163 fprs. Leave worst-case scenario for reload to handle via
8164 insn constraints. PTImode is only GPRs. */
8165 extra = 8;
8166 break;
8167 default:
8168 break;
8169 }
8170
8171 if (GET_CODE (x) == PLUS
8172 && GET_CODE (XEXP (x, 0)) == REG
8173 && GET_CODE (XEXP (x, 1)) == CONST_INT
8174 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8175 >= 0x10000 - extra))
8176 {
8177 HOST_WIDE_INT high_int, low_int;
8178 rtx sum;
8179 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8180 if (low_int >= 0x8000 - extra)
8181 low_int = 0;
8182 high_int = INTVAL (XEXP (x, 1)) - low_int;
8183 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8184 GEN_INT (high_int)), 0);
8185 return plus_constant (Pmode, sum, low_int);
8186 }
8187 else if (GET_CODE (x) == PLUS
8188 && GET_CODE (XEXP (x, 0)) == REG
8189 && GET_CODE (XEXP (x, 1)) != CONST_INT
8190 && GET_MODE_NUNITS (mode) == 1
8191 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8192 || (/* ??? Assume floating point reg based on mode? */
8193 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8194 && !avoiding_indexed_address_p (mode))
8195 {
8196 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8197 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8198 }
8199 else if ((TARGET_ELF
8200 #if TARGET_MACHO
8201 || !MACHO_DYNAMIC_NO_PIC_P
8202 #endif
8203 )
8204 && TARGET_32BIT
8205 && TARGET_NO_TOC
8206 && ! flag_pic
8207 && GET_CODE (x) != CONST_INT
8208 && GET_CODE (x) != CONST_WIDE_INT
8209 && GET_CODE (x) != CONST_DOUBLE
8210 && CONSTANT_P (x)
8211 && GET_MODE_NUNITS (mode) == 1
8212 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8213 || (/* ??? Assume floating point reg based on mode? */
8214 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8215 {
8216 rtx reg = gen_reg_rtx (Pmode);
8217 if (TARGET_ELF)
8218 emit_insn (gen_elf_high (reg, x));
8219 else
8220 emit_insn (gen_macho_high (reg, x));
8221 return gen_rtx_LO_SUM (Pmode, reg, x);
8222 }
8223 else if (TARGET_TOC
8224 && GET_CODE (x) == SYMBOL_REF
8225 && constant_pool_expr_p (x)
8226 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8227 return create_TOC_reference (x, NULL_RTX);
8228 else
8229 return x;
8230 }
8231
8232 /* Debug version of rs6000_legitimize_address. */
8233 static rtx
8234 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8235 {
8236 rtx ret;
8237 rtx_insn *insns;
8238
8239 start_sequence ();
8240 ret = rs6000_legitimize_address (x, oldx, mode);
8241 insns = get_insns ();
8242 end_sequence ();
8243
8244 if (ret != x)
8245 {
8246 fprintf (stderr,
8247 "\nrs6000_legitimize_address: mode %s, old code %s, "
8248 "new code %s, modified\n",
8249 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8250 GET_RTX_NAME (GET_CODE (ret)));
8251
8252 fprintf (stderr, "Original address:\n");
8253 debug_rtx (x);
8254
8255 fprintf (stderr, "oldx:\n");
8256 debug_rtx (oldx);
8257
8258 fprintf (stderr, "New address:\n");
8259 debug_rtx (ret);
8260
8261 if (insns)
8262 {
8263 fprintf (stderr, "Insns added:\n");
8264 debug_rtx_list (insns, 20);
8265 }
8266 }
8267 else
8268 {
8269 fprintf (stderr,
8270 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8271 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8272
8273 debug_rtx (x);
8274 }
8275
8276 if (insns)
8277 emit_insn (insns);
8278
8279 return ret;
8280 }
8281
8282 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8283 We need to emit DTP-relative relocations. */
8284
8285 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8286 static void
8287 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8288 {
8289 switch (size)
8290 {
8291 case 4:
8292 fputs ("\t.long\t", file);
8293 break;
8294 case 8:
8295 fputs (DOUBLE_INT_ASM_OP, file);
8296 break;
8297 default:
8298 gcc_unreachable ();
8299 }
8300 output_addr_const (file, x);
8301 if (TARGET_ELF)
8302 fputs ("@dtprel+0x8000", file);
8303 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8304 {
8305 switch (SYMBOL_REF_TLS_MODEL (x))
8306 {
8307 case 0:
8308 break;
8309 case TLS_MODEL_LOCAL_EXEC:
8310 fputs ("@le", file);
8311 break;
8312 case TLS_MODEL_INITIAL_EXEC:
8313 fputs ("@ie", file);
8314 break;
8315 case TLS_MODEL_GLOBAL_DYNAMIC:
8316 case TLS_MODEL_LOCAL_DYNAMIC:
8317 fputs ("@m", file);
8318 break;
8319 default:
8320 gcc_unreachable ();
8321 }
8322 }
8323 }
8324
8325 /* Return true if X is a symbol that refers to real (rather than emulated)
8326 TLS. */
8327
8328 static bool
8329 rs6000_real_tls_symbol_ref_p (rtx x)
8330 {
8331 return (GET_CODE (x) == SYMBOL_REF
8332 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8333 }
8334
8335 /* In the name of slightly smaller debug output, and to cater to
8336 general assembler lossage, recognize various UNSPEC sequences
8337 and turn them back into a direct symbol reference. */
8338
8339 static rtx
8340 rs6000_delegitimize_address (rtx orig_x)
8341 {
8342 rtx x, y, offset;
8343
8344 orig_x = delegitimize_mem_from_attrs (orig_x);
8345 x = orig_x;
8346 if (MEM_P (x))
8347 x = XEXP (x, 0);
8348
8349 y = x;
8350 if (TARGET_CMODEL != CMODEL_SMALL
8351 && GET_CODE (y) == LO_SUM)
8352 y = XEXP (y, 1);
8353
8354 offset = NULL_RTX;
8355 if (GET_CODE (y) == PLUS
8356 && GET_MODE (y) == Pmode
8357 && CONST_INT_P (XEXP (y, 1)))
8358 {
8359 offset = XEXP (y, 1);
8360 y = XEXP (y, 0);
8361 }
8362
8363 if (GET_CODE (y) == UNSPEC
8364 && XINT (y, 1) == UNSPEC_TOCREL)
8365 {
8366 y = XVECEXP (y, 0, 0);
8367
8368 #ifdef HAVE_AS_TLS
8369 /* Do not associate thread-local symbols with the original
8370 constant pool symbol. */
8371 if (TARGET_XCOFF
8372 && GET_CODE (y) == SYMBOL_REF
8373 && CONSTANT_POOL_ADDRESS_P (y)
8374 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8375 return orig_x;
8376 #endif
8377
8378 if (offset != NULL_RTX)
8379 y = gen_rtx_PLUS (Pmode, y, offset);
8380 if (!MEM_P (orig_x))
8381 return y;
8382 else
8383 return replace_equiv_address_nv (orig_x, y);
8384 }
8385
8386 if (TARGET_MACHO
8387 && GET_CODE (orig_x) == LO_SUM
8388 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8389 {
8390 y = XEXP (XEXP (orig_x, 1), 0);
8391 if (GET_CODE (y) == UNSPEC
8392 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8393 return XVECEXP (y, 0, 0);
8394 }
8395
8396 return orig_x;
8397 }
8398
8399 /* Return true if X shouldn't be emitted into the debug info.
8400 The linker doesn't like .toc section references from
8401 .debug_* sections, so reject .toc section symbols. */
8402
8403 static bool
8404 rs6000_const_not_ok_for_debug_p (rtx x)
8405 {
8406 if (GET_CODE (x) == UNSPEC)
8407 return true;
8408 if (GET_CODE (x) == SYMBOL_REF
8409 && CONSTANT_POOL_ADDRESS_P (x))
8410 {
8411 rtx c = get_pool_constant (x);
8412 machine_mode cmode = get_pool_mode (x);
8413 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8414 return true;
8415 }
8416
8417 return false;
8418 }
8419
8420
8421 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8422
8423 static bool
8424 rs6000_legitimate_combined_insn (rtx_insn *insn)
8425 {
8426 int icode = INSN_CODE (insn);
8427
8428 /* Reject creating doloop insns. Combine should not be allowed
8429 to create these for a number of reasons:
8430 1) In a nested loop, if combine creates one of these in an
8431 outer loop and the register allocator happens to allocate ctr
8432 to the outer loop insn, then the inner loop can't use ctr.
8433 Inner loops ought to be more highly optimized.
8434 2) Combine often wants to create one of these from what was
8435 originally a three insn sequence, first combining the three
8436 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8437 allocated ctr, the splitter takes use back to the three insn
8438 sequence. It's better to stop combine at the two insn
8439 sequence.
8440 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8441 insns, the register allocator sometimes uses floating point
8442 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8443 jump insn and output reloads are not implemented for jumps,
8444 the ctrsi/ctrdi splitters need to handle all possible cases.
8445 That's a pain, and it gets to be seriously difficult when a
8446 splitter that runs after reload needs memory to transfer from
8447 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8448 for the difficult case. It's better to not create problems
8449 in the first place. */
8450 if (icode != CODE_FOR_nothing
8451 && (icode == CODE_FOR_bdz_si
8452 || icode == CODE_FOR_bdz_di
8453 || icode == CODE_FOR_bdnz_si
8454 || icode == CODE_FOR_bdnz_di
8455 || icode == CODE_FOR_bdztf_si
8456 || icode == CODE_FOR_bdztf_di
8457 || icode == CODE_FOR_bdnztf_si
8458 || icode == CODE_FOR_bdnztf_di))
8459 return false;
8460
8461 return true;
8462 }
8463
8464 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8465
8466 static GTY(()) rtx rs6000_tls_symbol;
8467 static rtx
8468 rs6000_tls_get_addr (void)
8469 {
8470 if (!rs6000_tls_symbol)
8471 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8472
8473 return rs6000_tls_symbol;
8474 }
8475
8476 /* Construct the SYMBOL_REF for TLS GOT references. */
8477
8478 static GTY(()) rtx rs6000_got_symbol;
8479 static rtx
8480 rs6000_got_sym (void)
8481 {
8482 if (!rs6000_got_symbol)
8483 {
8484 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8485 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8486 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8487 }
8488
8489 return rs6000_got_symbol;
8490 }
8491
8492 /* AIX Thread-Local Address support. */
8493
8494 static rtx
8495 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8496 {
8497 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8498 const char *name;
8499 char *tlsname;
8500
8501 name = XSTR (addr, 0);
8502 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8503 or the symbol will be in TLS private data section. */
8504 if (name[strlen (name) - 1] != ']'
8505 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8506 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8507 {
8508 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8509 strcpy (tlsname, name);
8510 strcat (tlsname,
8511 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8512 tlsaddr = copy_rtx (addr);
8513 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8514 }
8515 else
8516 tlsaddr = addr;
8517
8518 /* Place addr into TOC constant pool. */
8519 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8520
8521 /* Output the TOC entry and create the MEM referencing the value. */
8522 if (constant_pool_expr_p (XEXP (sym, 0))
8523 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8524 {
8525 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8526 mem = gen_const_mem (Pmode, tocref);
8527 set_mem_alias_set (mem, get_TOC_alias_set ());
8528 }
8529 else
8530 return sym;
8531
8532 /* Use global-dynamic for local-dynamic. */
8533 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8534 || model == TLS_MODEL_LOCAL_DYNAMIC)
8535 {
8536 /* Create new TOC reference for @m symbol. */
8537 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8538 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8539 strcpy (tlsname, "*LCM");
8540 strcat (tlsname, name + 3);
8541 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8542 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8543 tocref = create_TOC_reference (modaddr, NULL_RTX);
8544 rtx modmem = gen_const_mem (Pmode, tocref);
8545 set_mem_alias_set (modmem, get_TOC_alias_set ());
8546
8547 rtx modreg = gen_reg_rtx (Pmode);
8548 emit_insn (gen_rtx_SET (modreg, modmem));
8549
8550 tmpreg = gen_reg_rtx (Pmode);
8551 emit_insn (gen_rtx_SET (tmpreg, mem));
8552
8553 dest = gen_reg_rtx (Pmode);
8554 if (TARGET_32BIT)
8555 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8556 else
8557 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8558 return dest;
8559 }
8560 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8561 else if (TARGET_32BIT)
8562 {
8563 tlsreg = gen_reg_rtx (SImode);
8564 emit_insn (gen_tls_get_tpointer (tlsreg));
8565 }
8566 else
8567 tlsreg = gen_rtx_REG (DImode, 13);
8568
8569 /* Load the TOC value into temporary register. */
8570 tmpreg = gen_reg_rtx (Pmode);
8571 emit_insn (gen_rtx_SET (tmpreg, mem));
8572 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8573 gen_rtx_MINUS (Pmode, addr, tlsreg));
8574
8575 /* Add TOC symbol value to TLS pointer. */
8576 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8577
8578 return dest;
8579 }
8580
8581 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8582 this (thread-local) address. */
8583
8584 static rtx
8585 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8586 {
8587 rtx dest, insn;
8588
8589 if (TARGET_XCOFF)
8590 return rs6000_legitimize_tls_address_aix (addr, model);
8591
8592 dest = gen_reg_rtx (Pmode);
8593 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8594 {
8595 rtx tlsreg;
8596
8597 if (TARGET_64BIT)
8598 {
8599 tlsreg = gen_rtx_REG (Pmode, 13);
8600 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8601 }
8602 else
8603 {
8604 tlsreg = gen_rtx_REG (Pmode, 2);
8605 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8606 }
8607 emit_insn (insn);
8608 }
8609 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8610 {
8611 rtx tlsreg, tmp;
8612
8613 tmp = gen_reg_rtx (Pmode);
8614 if (TARGET_64BIT)
8615 {
8616 tlsreg = gen_rtx_REG (Pmode, 13);
8617 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8618 }
8619 else
8620 {
8621 tlsreg = gen_rtx_REG (Pmode, 2);
8622 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8623 }
8624 emit_insn (insn);
8625 if (TARGET_64BIT)
8626 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8627 else
8628 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8629 emit_insn (insn);
8630 }
8631 else
8632 {
8633 rtx r3, got, tga, tmp1, tmp2, call_insn;
8634
8635 /* We currently use relocations like @got@tlsgd for tls, which
8636 means the linker will handle allocation of tls entries, placing
8637 them in the .got section. So use a pointer to the .got section,
8638 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8639 or to secondary GOT sections used by 32-bit -fPIC. */
8640 if (TARGET_64BIT)
8641 got = gen_rtx_REG (Pmode, 2);
8642 else
8643 {
8644 if (flag_pic == 1)
8645 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8646 else
8647 {
8648 rtx gsym = rs6000_got_sym ();
8649 got = gen_reg_rtx (Pmode);
8650 if (flag_pic == 0)
8651 rs6000_emit_move (got, gsym, Pmode);
8652 else
8653 {
8654 rtx mem, lab;
8655
8656 tmp1 = gen_reg_rtx (Pmode);
8657 tmp2 = gen_reg_rtx (Pmode);
8658 mem = gen_const_mem (Pmode, tmp1);
8659 lab = gen_label_rtx ();
8660 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8661 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8662 if (TARGET_LINK_STACK)
8663 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8664 emit_move_insn (tmp2, mem);
8665 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8666 set_unique_reg_note (last, REG_EQUAL, gsym);
8667 }
8668 }
8669 }
8670
8671 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8672 {
8673 tga = rs6000_tls_get_addr ();
8674 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8675 const0_rtx, Pmode);
8676
8677 r3 = gen_rtx_REG (Pmode, 3);
8678 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8679 {
8680 if (TARGET_64BIT)
8681 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
8682 else
8683 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
8684 }
8685 else if (DEFAULT_ABI == ABI_V4)
8686 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
8687 else
8688 gcc_unreachable ();
8689 call_insn = last_call_insn ();
8690 PATTERN (call_insn) = insn;
8691 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8692 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8693 pic_offset_table_rtx);
8694 }
8695 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8696 {
8697 tga = rs6000_tls_get_addr ();
8698 tmp1 = gen_reg_rtx (Pmode);
8699 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8700 const0_rtx, Pmode);
8701
8702 r3 = gen_rtx_REG (Pmode, 3);
8703 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8704 {
8705 if (TARGET_64BIT)
8706 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
8707 else
8708 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
8709 }
8710 else if (DEFAULT_ABI == ABI_V4)
8711 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
8712 else
8713 gcc_unreachable ();
8714 call_insn = last_call_insn ();
8715 PATTERN (call_insn) = insn;
8716 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8717 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8718 pic_offset_table_rtx);
8719
8720 if (rs6000_tls_size == 16)
8721 {
8722 if (TARGET_64BIT)
8723 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8724 else
8725 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8726 }
8727 else if (rs6000_tls_size == 32)
8728 {
8729 tmp2 = gen_reg_rtx (Pmode);
8730 if (TARGET_64BIT)
8731 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8732 else
8733 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8734 emit_insn (insn);
8735 if (TARGET_64BIT)
8736 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8737 else
8738 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8739 }
8740 else
8741 {
8742 tmp2 = gen_reg_rtx (Pmode);
8743 if (TARGET_64BIT)
8744 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8745 else
8746 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8747 emit_insn (insn);
8748 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8749 }
8750 emit_insn (insn);
8751 }
8752 else
8753 {
8754 /* IE, or 64-bit offset LE. */
8755 tmp2 = gen_reg_rtx (Pmode);
8756 if (TARGET_64BIT)
8757 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8758 else
8759 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8760 emit_insn (insn);
8761 if (TARGET_64BIT)
8762 insn = gen_tls_tls_64 (dest, tmp2, addr);
8763 else
8764 insn = gen_tls_tls_32 (dest, tmp2, addr);
8765 emit_insn (insn);
8766 }
8767 }
8768
8769 return dest;
8770 }
8771
8772 /* Only create the global variable for the stack protect guard if we are using
8773 the global flavor of that guard. */
8774 static tree
8775 rs6000_init_stack_protect_guard (void)
8776 {
8777 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8778 return default_stack_protect_guard ();
8779
8780 return NULL_TREE;
8781 }
8782
8783 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8784
8785 static bool
8786 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8787 {
8788 if (GET_CODE (x) == HIGH
8789 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8790 return true;
8791
8792 /* A TLS symbol in the TOC cannot contain a sum. */
8793 if (GET_CODE (x) == CONST
8794 && GET_CODE (XEXP (x, 0)) == PLUS
8795 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8796 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8797 return true;
8798
8799 /* Do not place an ELF TLS symbol in the constant pool. */
8800 return TARGET_ELF && tls_referenced_p (x);
8801 }
8802
8803 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8804 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8805 can be addressed relative to the toc pointer. */
8806
8807 static bool
8808 use_toc_relative_ref (rtx sym, machine_mode mode)
8809 {
8810 return ((constant_pool_expr_p (sym)
8811 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8812 get_pool_mode (sym)))
8813 || (TARGET_CMODEL == CMODEL_MEDIUM
8814 && SYMBOL_REF_LOCAL_P (sym)
8815 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8816 }
8817
8818 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8819 replace the input X, or the original X if no replacement is called for.
8820 The output parameter *WIN is 1 if the calling macro should goto WIN,
8821 0 if it should not.
8822
8823 For RS/6000, we wish to handle large displacements off a base
8824 register by splitting the addend across an addiu/addis and the mem insn.
8825 This cuts number of extra insns needed from 3 to 1.
8826
8827 On Darwin, we use this to generate code for floating point constants.
8828 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8829 The Darwin code is inside #if TARGET_MACHO because only then are the
8830 machopic_* functions defined. */
8831 static rtx
8832 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8833 int opnum, int type,
8834 int ind_levels ATTRIBUTE_UNUSED, int *win)
8835 {
8836 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8837 bool quad_offset_p = mode_supports_dq_form (mode);
8838
8839 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8840 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8841 if (reg_offset_p
8842 && opnum == 1
8843 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8844 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8845 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8846 && TARGET_P9_VECTOR)
8847 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8848 && TARGET_P9_VECTOR)))
8849 reg_offset_p = false;
8850
8851 /* We must recognize output that we have already generated ourselves. */
8852 if (GET_CODE (x) == PLUS
8853 && GET_CODE (XEXP (x, 0)) == PLUS
8854 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8855 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8856 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8857 {
8858 if (TARGET_DEBUG_ADDR)
8859 {
8860 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8861 debug_rtx (x);
8862 }
8863 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8864 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8865 opnum, (enum reload_type) type);
8866 *win = 1;
8867 return x;
8868 }
8869
8870 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8871 if (GET_CODE (x) == LO_SUM
8872 && GET_CODE (XEXP (x, 0)) == HIGH)
8873 {
8874 if (TARGET_DEBUG_ADDR)
8875 {
8876 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8877 debug_rtx (x);
8878 }
8879 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8880 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8881 opnum, (enum reload_type) type);
8882 *win = 1;
8883 return x;
8884 }
8885
8886 #if TARGET_MACHO
8887 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8888 && GET_CODE (x) == LO_SUM
8889 && GET_CODE (XEXP (x, 0)) == PLUS
8890 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8891 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8892 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8893 && machopic_operand_p (XEXP (x, 1)))
8894 {
8895 /* Result of previous invocation of this function on Darwin
8896 floating point constant. */
8897 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8898 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8899 opnum, (enum reload_type) type);
8900 *win = 1;
8901 return x;
8902 }
8903 #endif
8904
8905 if (TARGET_CMODEL != CMODEL_SMALL
8906 && reg_offset_p
8907 && !quad_offset_p
8908 && small_toc_ref (x, VOIDmode))
8909 {
8910 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8911 x = gen_rtx_LO_SUM (Pmode, hi, x);
8912 if (TARGET_DEBUG_ADDR)
8913 {
8914 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8915 debug_rtx (x);
8916 }
8917 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8918 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8919 opnum, (enum reload_type) type);
8920 *win = 1;
8921 return x;
8922 }
8923
8924 if (GET_CODE (x) == PLUS
8925 && REG_P (XEXP (x, 0))
8926 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
8927 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8928 && CONST_INT_P (XEXP (x, 1))
8929 && reg_offset_p
8930 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8931 {
8932 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
8933 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
8934 HOST_WIDE_INT high
8935 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8936
8937 /* Check for 32-bit overflow or quad addresses with one of the
8938 four least significant bits set. */
8939 if (high + low != val
8940 || (quad_offset_p && (low & 0xf)))
8941 {
8942 *win = 0;
8943 return x;
8944 }
8945
8946 /* Reload the high part into a base reg; leave the low part
8947 in the mem directly. */
8948
8949 x = gen_rtx_PLUS (GET_MODE (x),
8950 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8951 GEN_INT (high)),
8952 GEN_INT (low));
8953
8954 if (TARGET_DEBUG_ADDR)
8955 {
8956 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
8957 debug_rtx (x);
8958 }
8959 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8960 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8961 opnum, (enum reload_type) type);
8962 *win = 1;
8963 return x;
8964 }
8965
8966 if (GET_CODE (x) == SYMBOL_REF
8967 && reg_offset_p
8968 && !quad_offset_p
8969 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
8970 #if TARGET_MACHO
8971 && DEFAULT_ABI == ABI_DARWIN
8972 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
8973 && machopic_symbol_defined_p (x)
8974 #else
8975 && DEFAULT_ABI == ABI_V4
8976 && !flag_pic
8977 #endif
8978 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
8979 The same goes for DImode without 64-bit gprs and DFmode and DDmode
8980 without fprs.
8981 ??? Assume floating point reg based on mode? This assumption is
8982 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
8983 where reload ends up doing a DFmode load of a constant from
8984 mem using two gprs. Unfortunately, at this point reload
8985 hasn't yet selected regs so poking around in reload data
8986 won't help and even if we could figure out the regs reliably,
8987 we'd still want to allow this transformation when the mem is
8988 naturally aligned. Since we say the address is good here, we
8989 can't disable offsets from LO_SUMs in mem_operand_gpr.
8990 FIXME: Allow offset from lo_sum for other modes too, when
8991 mem is sufficiently aligned.
8992
8993 Also disallow this if the type can go in VMX/Altivec registers, since
8994 those registers do not have d-form (reg+offset) address modes. */
8995 && !reg_addr[mode].scalar_in_vmx_p
8996 && mode != TFmode
8997 && mode != TDmode
8998 && mode != IFmode
8999 && mode != KFmode
9000 && (mode != TImode || !TARGET_VSX)
9001 && mode != PTImode
9002 && (mode != DImode || TARGET_POWERPC64)
9003 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9004 || TARGET_HARD_FLOAT))
9005 {
9006 #if TARGET_MACHO
9007 if (flag_pic)
9008 {
9009 rtx offset = machopic_gen_offset (x);
9010 x = gen_rtx_LO_SUM (GET_MODE (x),
9011 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9012 gen_rtx_HIGH (Pmode, offset)), offset);
9013 }
9014 else
9015 #endif
9016 x = gen_rtx_LO_SUM (GET_MODE (x),
9017 gen_rtx_HIGH (Pmode, x), x);
9018
9019 if (TARGET_DEBUG_ADDR)
9020 {
9021 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9022 debug_rtx (x);
9023 }
9024 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9025 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9026 opnum, (enum reload_type) type);
9027 *win = 1;
9028 return x;
9029 }
9030
9031 /* Reload an offset address wrapped by an AND that represents the
9032 masking of the lower bits. Strip the outer AND and let reload
9033 convert the offset address into an indirect address. For VSX,
9034 force reload to create the address with an AND in a separate
9035 register, because we can't guarantee an altivec register will
9036 be used. */
9037 if (VECTOR_MEM_ALTIVEC_P (mode)
9038 && GET_CODE (x) == AND
9039 && GET_CODE (XEXP (x, 0)) == PLUS
9040 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9041 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9042 && GET_CODE (XEXP (x, 1)) == CONST_INT
9043 && INTVAL (XEXP (x, 1)) == -16)
9044 {
9045 x = XEXP (x, 0);
9046 *win = 1;
9047 return x;
9048 }
9049
9050 if (TARGET_TOC
9051 && reg_offset_p
9052 && !quad_offset_p
9053 && GET_CODE (x) == SYMBOL_REF
9054 && use_toc_relative_ref (x, mode))
9055 {
9056 x = create_TOC_reference (x, NULL_RTX);
9057 if (TARGET_CMODEL != CMODEL_SMALL)
9058 {
9059 if (TARGET_DEBUG_ADDR)
9060 {
9061 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9062 debug_rtx (x);
9063 }
9064 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9065 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9066 opnum, (enum reload_type) type);
9067 }
9068 *win = 1;
9069 return x;
9070 }
9071 *win = 0;
9072 return x;
9073 }
9074
9075 /* Debug version of rs6000_legitimize_reload_address. */
9076 static rtx
9077 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9078 int opnum, int type,
9079 int ind_levels, int *win)
9080 {
9081 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9082 ind_levels, win);
9083 fprintf (stderr,
9084 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9085 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9086 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9087 debug_rtx (x);
9088
9089 if (x == ret)
9090 fprintf (stderr, "Same address returned\n");
9091 else if (!ret)
9092 fprintf (stderr, "NULL returned\n");
9093 else
9094 {
9095 fprintf (stderr, "New address:\n");
9096 debug_rtx (ret);
9097 }
9098
9099 return ret;
9100 }
9101
9102 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9103 that is a valid memory address for an instruction.
9104 The MODE argument is the machine mode for the MEM expression
9105 that wants to use this address.
9106
9107 On the RS/6000, there are four valid address: a SYMBOL_REF that
9108 refers to a constant pool entry of an address (or the sum of it
9109 plus a constant), a short (16-bit signed) constant plus a register,
9110 the sum of two registers, or a register indirect, possibly with an
9111 auto-increment. For DFmode, DDmode and DImode with a constant plus
9112 register, we must ensure that both words are addressable or PowerPC64
9113 with offset word aligned.
9114
9115 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9116 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9117 because adjacent memory cells are accessed by adding word-sized offsets
9118 during assembly output. */
9119 static bool
9120 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9121 {
9122 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9123 bool quad_offset_p = mode_supports_dq_form (mode);
9124
9125 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9126 if (VECTOR_MEM_ALTIVEC_P (mode)
9127 && GET_CODE (x) == AND
9128 && GET_CODE (XEXP (x, 1)) == CONST_INT
9129 && INTVAL (XEXP (x, 1)) == -16)
9130 x = XEXP (x, 0);
9131
9132 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9133 return 0;
9134 if (legitimate_indirect_address_p (x, reg_ok_strict))
9135 return 1;
9136 if (TARGET_UPDATE
9137 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9138 && mode_supports_pre_incdec_p (mode)
9139 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9140 return 1;
9141 /* Handle restricted vector d-form offsets in ISA 3.0. */
9142 if (quad_offset_p)
9143 {
9144 if (quad_address_p (x, mode, reg_ok_strict))
9145 return 1;
9146 }
9147 else if (virtual_stack_registers_memory_p (x))
9148 return 1;
9149
9150 else if (reg_offset_p)
9151 {
9152 if (legitimate_small_data_p (mode, x))
9153 return 1;
9154 if (legitimate_constant_pool_address_p (x, mode,
9155 reg_ok_strict || lra_in_progress))
9156 return 1;
9157 }
9158
9159 /* For TImode, if we have TImode in VSX registers, only allow register
9160 indirect addresses. This will allow the values to go in either GPRs
9161 or VSX registers without reloading. The vector types would tend to
9162 go into VSX registers, so we allow REG+REG, while TImode seems
9163 somewhat split, in that some uses are GPR based, and some VSX based. */
9164 /* FIXME: We could loosen this by changing the following to
9165 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9166 but currently we cannot allow REG+REG addressing for TImode. See
9167 PR72827 for complete details on how this ends up hoodwinking DSE. */
9168 if (mode == TImode && TARGET_VSX)
9169 return 0;
9170 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9171 if (! reg_ok_strict
9172 && reg_offset_p
9173 && GET_CODE (x) == PLUS
9174 && GET_CODE (XEXP (x, 0)) == REG
9175 && (XEXP (x, 0) == virtual_stack_vars_rtx
9176 || XEXP (x, 0) == arg_pointer_rtx)
9177 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9178 return 1;
9179 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9180 return 1;
9181 if (!FLOAT128_2REG_P (mode)
9182 && (TARGET_HARD_FLOAT
9183 || TARGET_POWERPC64
9184 || (mode != DFmode && mode != DDmode))
9185 && (TARGET_POWERPC64 || mode != DImode)
9186 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9187 && mode != PTImode
9188 && !avoiding_indexed_address_p (mode)
9189 && legitimate_indexed_address_p (x, reg_ok_strict))
9190 return 1;
9191 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9192 && mode_supports_pre_modify_p (mode)
9193 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9194 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9195 reg_ok_strict, false)
9196 || (!avoiding_indexed_address_p (mode)
9197 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9198 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9199 return 1;
9200 if (reg_offset_p && !quad_offset_p
9201 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9202 return 1;
9203 return 0;
9204 }
9205
9206 /* Debug version of rs6000_legitimate_address_p. */
9207 static bool
9208 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9209 bool reg_ok_strict)
9210 {
9211 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9212 fprintf (stderr,
9213 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9214 "strict = %d, reload = %s, code = %s\n",
9215 ret ? "true" : "false",
9216 GET_MODE_NAME (mode),
9217 reg_ok_strict,
9218 (reload_completed ? "after" : "before"),
9219 GET_RTX_NAME (GET_CODE (x)));
9220 debug_rtx (x);
9221
9222 return ret;
9223 }
9224
9225 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9226
9227 static bool
9228 rs6000_mode_dependent_address_p (const_rtx addr,
9229 addr_space_t as ATTRIBUTE_UNUSED)
9230 {
9231 return rs6000_mode_dependent_address_ptr (addr);
9232 }
9233
9234 /* Go to LABEL if ADDR (a legitimate address expression)
9235 has an effect that depends on the machine mode it is used for.
9236
9237 On the RS/6000 this is true of all integral offsets (since AltiVec
9238 and VSX modes don't allow them) or is a pre-increment or decrement.
9239
9240 ??? Except that due to conceptual problems in offsettable_address_p
9241 we can't really report the problems of integral offsets. So leave
9242 this assuming that the adjustable offset must be valid for the
9243 sub-words of a TFmode operand, which is what we had before. */
9244
9245 static bool
9246 rs6000_mode_dependent_address (const_rtx addr)
9247 {
9248 switch (GET_CODE (addr))
9249 {
9250 case PLUS:
9251 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9252 is considered a legitimate address before reload, so there
9253 are no offset restrictions in that case. Note that this
9254 condition is safe in strict mode because any address involving
9255 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9256 been rejected as illegitimate. */
9257 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9258 && XEXP (addr, 0) != arg_pointer_rtx
9259 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9260 {
9261 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9262 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9263 }
9264 break;
9265
9266 case LO_SUM:
9267 /* Anything in the constant pool is sufficiently aligned that
9268 all bytes have the same high part address. */
9269 return !legitimate_constant_pool_address_p (addr, QImode, false);
9270
9271 /* Auto-increment cases are now treated generically in recog.c. */
9272 case PRE_MODIFY:
9273 return TARGET_UPDATE;
9274
9275 /* AND is only allowed in Altivec loads. */
9276 case AND:
9277 return true;
9278
9279 default:
9280 break;
9281 }
9282
9283 return false;
9284 }
9285
9286 /* Debug version of rs6000_mode_dependent_address. */
9287 static bool
9288 rs6000_debug_mode_dependent_address (const_rtx addr)
9289 {
9290 bool ret = rs6000_mode_dependent_address (addr);
9291
9292 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9293 ret ? "true" : "false");
9294 debug_rtx (addr);
9295
9296 return ret;
9297 }
9298
9299 /* Implement FIND_BASE_TERM. */
9300
9301 rtx
9302 rs6000_find_base_term (rtx op)
9303 {
9304 rtx base;
9305
9306 base = op;
9307 if (GET_CODE (base) == CONST)
9308 base = XEXP (base, 0);
9309 if (GET_CODE (base) == PLUS)
9310 base = XEXP (base, 0);
9311 if (GET_CODE (base) == UNSPEC)
9312 switch (XINT (base, 1))
9313 {
9314 case UNSPEC_TOCREL:
9315 case UNSPEC_MACHOPIC_OFFSET:
9316 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9317 for aliasing purposes. */
9318 return XVECEXP (base, 0, 0);
9319 }
9320
9321 return op;
9322 }
9323
9324 /* More elaborate version of recog's offsettable_memref_p predicate
9325 that works around the ??? note of rs6000_mode_dependent_address.
9326 In particular it accepts
9327
9328 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9329
9330 in 32-bit mode, that the recog predicate rejects. */
9331
9332 static bool
9333 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9334 {
9335 bool worst_case;
9336
9337 if (!MEM_P (op))
9338 return false;
9339
9340 /* First mimic offsettable_memref_p. */
9341 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9342 return true;
9343
9344 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9345 the latter predicate knows nothing about the mode of the memory
9346 reference and, therefore, assumes that it is the largest supported
9347 mode (TFmode). As a consequence, legitimate offsettable memory
9348 references are rejected. rs6000_legitimate_offset_address_p contains
9349 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9350 at least with a little bit of help here given that we know the
9351 actual registers used. */
9352 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9353 || GET_MODE_SIZE (reg_mode) == 4);
9354 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9355 strict, worst_case);
9356 }
9357
9358 /* Determine the reassociation width to be used in reassociate_bb.
9359 This takes into account how many parallel operations we
9360 can actually do of a given type, and also the latency.
9361 P8:
9362 int add/sub 6/cycle
9363 mul 2/cycle
9364 vect add/sub/mul 2/cycle
9365 fp add/sub/mul 2/cycle
9366 dfp 1/cycle
9367 */
9368
9369 static int
9370 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9371 machine_mode mode)
9372 {
9373 switch (rs6000_tune)
9374 {
9375 case PROCESSOR_POWER8:
9376 case PROCESSOR_POWER9:
9377 if (DECIMAL_FLOAT_MODE_P (mode))
9378 return 1;
9379 if (VECTOR_MODE_P (mode))
9380 return 4;
9381 if (INTEGRAL_MODE_P (mode))
9382 return 1;
9383 if (FLOAT_MODE_P (mode))
9384 return 4;
9385 break;
9386 default:
9387 break;
9388 }
9389 return 1;
9390 }
9391
9392 /* Change register usage conditional on target flags. */
9393 static void
9394 rs6000_conditional_register_usage (void)
9395 {
9396 int i;
9397
9398 if (TARGET_DEBUG_TARGET)
9399 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9400
9401 /* Set MQ register fixed (already call_used) so that it will not be
9402 allocated. */
9403 fixed_regs[64] = 1;
9404
9405 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9406 if (TARGET_64BIT)
9407 fixed_regs[13] = call_used_regs[13]
9408 = call_really_used_regs[13] = 1;
9409
9410 /* Conditionally disable FPRs. */
9411 if (TARGET_SOFT_FLOAT)
9412 for (i = 32; i < 64; i++)
9413 fixed_regs[i] = call_used_regs[i]
9414 = call_really_used_regs[i] = 1;
9415
9416 /* The TOC register is not killed across calls in a way that is
9417 visible to the compiler. */
9418 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9419 call_really_used_regs[2] = 0;
9420
9421 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9422 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9423
9424 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9425 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9426 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9427 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9428
9429 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9430 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9431 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9432 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9433
9434 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9435 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9436 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9437
9438 if (!TARGET_ALTIVEC && !TARGET_VSX)
9439 {
9440 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9441 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9442 call_really_used_regs[VRSAVE_REGNO] = 1;
9443 }
9444
9445 if (TARGET_ALTIVEC || TARGET_VSX)
9446 global_regs[VSCR_REGNO] = 1;
9447
9448 if (TARGET_ALTIVEC_ABI)
9449 {
9450 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9451 call_used_regs[i] = call_really_used_regs[i] = 1;
9452
9453 /* AIX reserves VR20:31 in non-extended ABI mode. */
9454 if (TARGET_XCOFF)
9455 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9456 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9457 }
9458 }
9459
9460 \f
9461 /* Output insns to set DEST equal to the constant SOURCE as a series of
9462 lis, ori and shl instructions and return TRUE. */
9463
9464 bool
9465 rs6000_emit_set_const (rtx dest, rtx source)
9466 {
9467 machine_mode mode = GET_MODE (dest);
9468 rtx temp, set;
9469 rtx_insn *insn;
9470 HOST_WIDE_INT c;
9471
9472 gcc_checking_assert (CONST_INT_P (source));
9473 c = INTVAL (source);
9474 switch (mode)
9475 {
9476 case E_QImode:
9477 case E_HImode:
9478 emit_insn (gen_rtx_SET (dest, source));
9479 return true;
9480
9481 case E_SImode:
9482 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9483
9484 emit_insn (gen_rtx_SET (copy_rtx (temp),
9485 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9486 emit_insn (gen_rtx_SET (dest,
9487 gen_rtx_IOR (SImode, copy_rtx (temp),
9488 GEN_INT (c & 0xffff))));
9489 break;
9490
9491 case E_DImode:
9492 if (!TARGET_POWERPC64)
9493 {
9494 rtx hi, lo;
9495
9496 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9497 DImode);
9498 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9499 DImode);
9500 emit_move_insn (hi, GEN_INT (c >> 32));
9501 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9502 emit_move_insn (lo, GEN_INT (c));
9503 }
9504 else
9505 rs6000_emit_set_long_const (dest, c);
9506 break;
9507
9508 default:
9509 gcc_unreachable ();
9510 }
9511
9512 insn = get_last_insn ();
9513 set = single_set (insn);
9514 if (! CONSTANT_P (SET_SRC (set)))
9515 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9516
9517 return true;
9518 }
9519
9520 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9521 Output insns to set DEST equal to the constant C as a series of
9522 lis, ori and shl instructions. */
9523
9524 static void
9525 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9526 {
9527 rtx temp;
9528 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9529
9530 ud1 = c & 0xffff;
9531 c = c >> 16;
9532 ud2 = c & 0xffff;
9533 c = c >> 16;
9534 ud3 = c & 0xffff;
9535 c = c >> 16;
9536 ud4 = c & 0xffff;
9537
9538 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9539 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9540 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9541
9542 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9543 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9544 {
9545 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9546
9547 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9548 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9549 if (ud1 != 0)
9550 emit_move_insn (dest,
9551 gen_rtx_IOR (DImode, copy_rtx (temp),
9552 GEN_INT (ud1)));
9553 }
9554 else if (ud3 == 0 && ud4 == 0)
9555 {
9556 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9557
9558 gcc_assert (ud2 & 0x8000);
9559 emit_move_insn (copy_rtx (temp),
9560 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9561 if (ud1 != 0)
9562 emit_move_insn (copy_rtx (temp),
9563 gen_rtx_IOR (DImode, copy_rtx (temp),
9564 GEN_INT (ud1)));
9565 emit_move_insn (dest,
9566 gen_rtx_ZERO_EXTEND (DImode,
9567 gen_lowpart (SImode,
9568 copy_rtx (temp))));
9569 }
9570 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9571 || (ud4 == 0 && ! (ud3 & 0x8000)))
9572 {
9573 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9574
9575 emit_move_insn (copy_rtx (temp),
9576 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9577 if (ud2 != 0)
9578 emit_move_insn (copy_rtx (temp),
9579 gen_rtx_IOR (DImode, copy_rtx (temp),
9580 GEN_INT (ud2)));
9581 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9582 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9583 GEN_INT (16)));
9584 if (ud1 != 0)
9585 emit_move_insn (dest,
9586 gen_rtx_IOR (DImode, copy_rtx (temp),
9587 GEN_INT (ud1)));
9588 }
9589 else
9590 {
9591 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9592
9593 emit_move_insn (copy_rtx (temp),
9594 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9595 if (ud3 != 0)
9596 emit_move_insn (copy_rtx (temp),
9597 gen_rtx_IOR (DImode, copy_rtx (temp),
9598 GEN_INT (ud3)));
9599
9600 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9601 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9602 GEN_INT (32)));
9603 if (ud2 != 0)
9604 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9605 gen_rtx_IOR (DImode, copy_rtx (temp),
9606 GEN_INT (ud2 << 16)));
9607 if (ud1 != 0)
9608 emit_move_insn (dest,
9609 gen_rtx_IOR (DImode, copy_rtx (temp),
9610 GEN_INT (ud1)));
9611 }
9612 }
9613
9614 /* Helper for the following. Get rid of [r+r] memory refs
9615 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9616
9617 static void
9618 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9619 {
9620 if (GET_CODE (operands[0]) == MEM
9621 && GET_CODE (XEXP (operands[0], 0)) != REG
9622 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9623 GET_MODE (operands[0]), false))
9624 operands[0]
9625 = replace_equiv_address (operands[0],
9626 copy_addr_to_reg (XEXP (operands[0], 0)));
9627
9628 if (GET_CODE (operands[1]) == MEM
9629 && GET_CODE (XEXP (operands[1], 0)) != REG
9630 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9631 GET_MODE (operands[1]), false))
9632 operands[1]
9633 = replace_equiv_address (operands[1],
9634 copy_addr_to_reg (XEXP (operands[1], 0)));
9635 }
9636
9637 /* Generate a vector of constants to permute MODE for a little-endian
9638 storage operation by swapping the two halves of a vector. */
9639 static rtvec
9640 rs6000_const_vec (machine_mode mode)
9641 {
9642 int i, subparts;
9643 rtvec v;
9644
9645 switch (mode)
9646 {
9647 case E_V1TImode:
9648 subparts = 1;
9649 break;
9650 case E_V2DFmode:
9651 case E_V2DImode:
9652 subparts = 2;
9653 break;
9654 case E_V4SFmode:
9655 case E_V4SImode:
9656 subparts = 4;
9657 break;
9658 case E_V8HImode:
9659 subparts = 8;
9660 break;
9661 case E_V16QImode:
9662 subparts = 16;
9663 break;
9664 default:
9665 gcc_unreachable();
9666 }
9667
9668 v = rtvec_alloc (subparts);
9669
9670 for (i = 0; i < subparts / 2; ++i)
9671 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9672 for (i = subparts / 2; i < subparts; ++i)
9673 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9674
9675 return v;
9676 }
9677
9678 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9679 store operation. */
9680 void
9681 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9682 {
9683 /* Scalar permutations are easier to express in integer modes rather than
9684 floating-point modes, so cast them here. We use V1TImode instead
9685 of TImode to ensure that the values don't go through GPRs. */
9686 if (FLOAT128_VECTOR_P (mode))
9687 {
9688 dest = gen_lowpart (V1TImode, dest);
9689 source = gen_lowpart (V1TImode, source);
9690 mode = V1TImode;
9691 }
9692
9693 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9694 scalar. */
9695 if (mode == TImode || mode == V1TImode)
9696 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9697 GEN_INT (64))));
9698 else
9699 {
9700 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9701 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9702 }
9703 }
9704
9705 /* Emit a little-endian load from vector memory location SOURCE to VSX
9706 register DEST in mode MODE. The load is done with two permuting
9707 insn's that represent an lxvd2x and xxpermdi. */
9708 void
9709 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9710 {
9711 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9712 V1TImode). */
9713 if (mode == TImode || mode == V1TImode)
9714 {
9715 mode = V2DImode;
9716 dest = gen_lowpart (V2DImode, dest);
9717 source = adjust_address (source, V2DImode, 0);
9718 }
9719
9720 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9721 rs6000_emit_le_vsx_permute (tmp, source, mode);
9722 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9723 }
9724
9725 /* Emit a little-endian store to vector memory location DEST from VSX
9726 register SOURCE in mode MODE. The store is done with two permuting
9727 insn's that represent an xxpermdi and an stxvd2x. */
9728 void
9729 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9730 {
9731 /* This should never be called during or after LRA, because it does
9732 not re-permute the source register. It is intended only for use
9733 during expand. */
9734 gcc_assert (!lra_in_progress && !reload_completed);
9735
9736 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9737 V1TImode). */
9738 if (mode == TImode || mode == V1TImode)
9739 {
9740 mode = V2DImode;
9741 dest = adjust_address (dest, V2DImode, 0);
9742 source = gen_lowpart (V2DImode, source);
9743 }
9744
9745 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9746 rs6000_emit_le_vsx_permute (tmp, source, mode);
9747 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9748 }
9749
9750 /* Emit a sequence representing a little-endian VSX load or store,
9751 moving data from SOURCE to DEST in mode MODE. This is done
9752 separately from rs6000_emit_move to ensure it is called only
9753 during expand. LE VSX loads and stores introduced later are
9754 handled with a split. The expand-time RTL generation allows
9755 us to optimize away redundant pairs of register-permutes. */
9756 void
9757 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9758 {
9759 gcc_assert (!BYTES_BIG_ENDIAN
9760 && VECTOR_MEM_VSX_P (mode)
9761 && !TARGET_P9_VECTOR
9762 && !gpr_or_gpr_p (dest, source)
9763 && (MEM_P (source) ^ MEM_P (dest)));
9764
9765 if (MEM_P (source))
9766 {
9767 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9768 rs6000_emit_le_vsx_load (dest, source, mode);
9769 }
9770 else
9771 {
9772 if (!REG_P (source))
9773 source = force_reg (mode, source);
9774 rs6000_emit_le_vsx_store (dest, source, mode);
9775 }
9776 }
9777
9778 /* Return whether a SFmode or SImode move can be done without converting one
9779 mode to another. This arrises when we have:
9780
9781 (SUBREG:SF (REG:SI ...))
9782 (SUBREG:SI (REG:SF ...))
9783
9784 and one of the values is in a floating point/vector register, where SFmode
9785 scalars are stored in DFmode format. */
9786
9787 bool
9788 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9789 {
9790 if (TARGET_ALLOW_SF_SUBREG)
9791 return true;
9792
9793 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9794 return true;
9795
9796 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9797 return true;
9798
9799 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9800 if (SUBREG_P (dest))
9801 {
9802 rtx dest_subreg = SUBREG_REG (dest);
9803 rtx src_subreg = SUBREG_REG (src);
9804 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9805 }
9806
9807 return false;
9808 }
9809
9810
9811 /* Helper function to change moves with:
9812
9813 (SUBREG:SF (REG:SI)) and
9814 (SUBREG:SI (REG:SF))
9815
9816 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9817 values are stored as DFmode values in the VSX registers. We need to convert
9818 the bits before we can use a direct move or operate on the bits in the
9819 vector register as an integer type.
9820
9821 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9822
9823 static bool
9824 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9825 {
9826 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
9827 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9828 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9829 {
9830 rtx inner_source = SUBREG_REG (source);
9831 machine_mode inner_mode = GET_MODE (inner_source);
9832
9833 if (mode == SImode && inner_mode == SFmode)
9834 {
9835 emit_insn (gen_movsi_from_sf (dest, inner_source));
9836 return true;
9837 }
9838
9839 if (mode == SFmode && inner_mode == SImode)
9840 {
9841 emit_insn (gen_movsf_from_si (dest, inner_source));
9842 return true;
9843 }
9844 }
9845
9846 return false;
9847 }
9848
9849 /* Emit a move from SOURCE to DEST in mode MODE. */
9850 void
9851 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9852 {
9853 rtx operands[2];
9854 operands[0] = dest;
9855 operands[1] = source;
9856
9857 if (TARGET_DEBUG_ADDR)
9858 {
9859 fprintf (stderr,
9860 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9861 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9862 GET_MODE_NAME (mode),
9863 lra_in_progress,
9864 reload_completed,
9865 can_create_pseudo_p ());
9866 debug_rtx (dest);
9867 fprintf (stderr, "source:\n");
9868 debug_rtx (source);
9869 }
9870
9871 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
9872 if (CONST_WIDE_INT_P (operands[1])
9873 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9874 {
9875 /* This should be fixed with the introduction of CONST_WIDE_INT. */
9876 gcc_unreachable ();
9877 }
9878
9879 #ifdef HAVE_AS_GNU_ATTRIBUTE
9880 /* If we use a long double type, set the flags in .gnu_attribute that say
9881 what the long double type is. This is to allow the linker's warning
9882 message for the wrong long double to be useful, even if the function does
9883 not do a call (for example, doing a 128-bit add on power9 if the long
9884 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9885 used if they aren't the default long dobule type. */
9886 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9887 {
9888 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9889 rs6000_passes_float = rs6000_passes_long_double = true;
9890
9891 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9892 rs6000_passes_float = rs6000_passes_long_double = true;
9893 }
9894 #endif
9895
9896 /* See if we need to special case SImode/SFmode SUBREG moves. */
9897 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9898 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9899 return;
9900
9901 /* Check if GCC is setting up a block move that will end up using FP
9902 registers as temporaries. We must make sure this is acceptable. */
9903 if (GET_CODE (operands[0]) == MEM
9904 && GET_CODE (operands[1]) == MEM
9905 && mode == DImode
9906 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9907 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9908 && ! (rs6000_slow_unaligned_access (SImode,
9909 (MEM_ALIGN (operands[0]) > 32
9910 ? 32 : MEM_ALIGN (operands[0])))
9911 || rs6000_slow_unaligned_access (SImode,
9912 (MEM_ALIGN (operands[1]) > 32
9913 ? 32 : MEM_ALIGN (operands[1]))))
9914 && ! MEM_VOLATILE_P (operands [0])
9915 && ! MEM_VOLATILE_P (operands [1]))
9916 {
9917 emit_move_insn (adjust_address (operands[0], SImode, 0),
9918 adjust_address (operands[1], SImode, 0));
9919 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9920 adjust_address (copy_rtx (operands[1]), SImode, 4));
9921 return;
9922 }
9923
9924 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9925 && !gpc_reg_operand (operands[1], mode))
9926 operands[1] = force_reg (mode, operands[1]);
9927
9928 /* Recognize the case where operand[1] is a reference to thread-local
9929 data and load its address to a register. */
9930 if (tls_referenced_p (operands[1]))
9931 {
9932 enum tls_model model;
9933 rtx tmp = operands[1];
9934 rtx addend = NULL;
9935
9936 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9937 {
9938 addend = XEXP (XEXP (tmp, 0), 1);
9939 tmp = XEXP (XEXP (tmp, 0), 0);
9940 }
9941
9942 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
9943 model = SYMBOL_REF_TLS_MODEL (tmp);
9944 gcc_assert (model != 0);
9945
9946 tmp = rs6000_legitimize_tls_address (tmp, model);
9947 if (addend)
9948 {
9949 tmp = gen_rtx_PLUS (mode, tmp, addend);
9950 tmp = force_operand (tmp, operands[0]);
9951 }
9952 operands[1] = tmp;
9953 }
9954
9955 /* 128-bit constant floating-point values on Darwin should really be loaded
9956 as two parts. However, this premature splitting is a problem when DFmode
9957 values can go into Altivec registers. */
9958 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
9959 && GET_CODE (operands[1]) == CONST_DOUBLE)
9960 {
9961 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9962 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9963 DFmode);
9964 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9965 GET_MODE_SIZE (DFmode)),
9966 simplify_gen_subreg (DFmode, operands[1], mode,
9967 GET_MODE_SIZE (DFmode)),
9968 DFmode);
9969 return;
9970 }
9971
9972 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9973 p1:SD) if p1 is not of floating point class and p0 is spilled as
9974 we can have no analogous movsd_store for this. */
9975 if (lra_in_progress && mode == DDmode
9976 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9977 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9978 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
9979 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9980 {
9981 enum reg_class cl;
9982 int regno = REGNO (SUBREG_REG (operands[1]));
9983
9984 if (regno >= FIRST_PSEUDO_REGISTER)
9985 {
9986 cl = reg_preferred_class (regno);
9987 regno = reg_renumber[regno];
9988 if (regno < 0)
9989 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9990 }
9991 if (regno >= 0 && ! FP_REGNO_P (regno))
9992 {
9993 mode = SDmode;
9994 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9995 operands[1] = SUBREG_REG (operands[1]);
9996 }
9997 }
9998 if (lra_in_progress
9999 && mode == SDmode
10000 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10001 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10002 && (REG_P (operands[1])
10003 || (GET_CODE (operands[1]) == SUBREG
10004 && REG_P (SUBREG_REG (operands[1])))))
10005 {
10006 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10007 ? SUBREG_REG (operands[1]) : operands[1]);
10008 enum reg_class cl;
10009
10010 if (regno >= FIRST_PSEUDO_REGISTER)
10011 {
10012 cl = reg_preferred_class (regno);
10013 gcc_assert (cl != NO_REGS);
10014 regno = reg_renumber[regno];
10015 if (regno < 0)
10016 regno = ira_class_hard_regs[cl][0];
10017 }
10018 if (FP_REGNO_P (regno))
10019 {
10020 if (GET_MODE (operands[0]) != DDmode)
10021 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10022 emit_insn (gen_movsd_store (operands[0], operands[1]));
10023 }
10024 else if (INT_REGNO_P (regno))
10025 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10026 else
10027 gcc_unreachable();
10028 return;
10029 }
10030 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10031 p:DD)) if p0 is not of floating point class and p1 is spilled as
10032 we can have no analogous movsd_load for this. */
10033 if (lra_in_progress && mode == DDmode
10034 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10035 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10036 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10037 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10038 {
10039 enum reg_class cl;
10040 int regno = REGNO (SUBREG_REG (operands[0]));
10041
10042 if (regno >= FIRST_PSEUDO_REGISTER)
10043 {
10044 cl = reg_preferred_class (regno);
10045 regno = reg_renumber[regno];
10046 if (regno < 0)
10047 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10048 }
10049 if (regno >= 0 && ! FP_REGNO_P (regno))
10050 {
10051 mode = SDmode;
10052 operands[0] = SUBREG_REG (operands[0]);
10053 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10054 }
10055 }
10056 if (lra_in_progress
10057 && mode == SDmode
10058 && (REG_P (operands[0])
10059 || (GET_CODE (operands[0]) == SUBREG
10060 && REG_P (SUBREG_REG (operands[0]))))
10061 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10062 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10063 {
10064 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10065 ? SUBREG_REG (operands[0]) : operands[0]);
10066 enum reg_class cl;
10067
10068 if (regno >= FIRST_PSEUDO_REGISTER)
10069 {
10070 cl = reg_preferred_class (regno);
10071 gcc_assert (cl != NO_REGS);
10072 regno = reg_renumber[regno];
10073 if (regno < 0)
10074 regno = ira_class_hard_regs[cl][0];
10075 }
10076 if (FP_REGNO_P (regno))
10077 {
10078 if (GET_MODE (operands[1]) != DDmode)
10079 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10080 emit_insn (gen_movsd_load (operands[0], operands[1]));
10081 }
10082 else if (INT_REGNO_P (regno))
10083 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10084 else
10085 gcc_unreachable();
10086 return;
10087 }
10088
10089 /* FIXME: In the long term, this switch statement should go away
10090 and be replaced by a sequence of tests based on things like
10091 mode == Pmode. */
10092 switch (mode)
10093 {
10094 case E_HImode:
10095 case E_QImode:
10096 if (CONSTANT_P (operands[1])
10097 && GET_CODE (operands[1]) != CONST_INT)
10098 operands[1] = force_const_mem (mode, operands[1]);
10099 break;
10100
10101 case E_TFmode:
10102 case E_TDmode:
10103 case E_IFmode:
10104 case E_KFmode:
10105 if (FLOAT128_2REG_P (mode))
10106 rs6000_eliminate_indexed_memrefs (operands);
10107 /* fall through */
10108
10109 case E_DFmode:
10110 case E_DDmode:
10111 case E_SFmode:
10112 case E_SDmode:
10113 if (CONSTANT_P (operands[1])
10114 && ! easy_fp_constant (operands[1], mode))
10115 operands[1] = force_const_mem (mode, operands[1]);
10116 break;
10117
10118 case E_V16QImode:
10119 case E_V8HImode:
10120 case E_V4SFmode:
10121 case E_V4SImode:
10122 case E_V2DFmode:
10123 case E_V2DImode:
10124 case E_V1TImode:
10125 if (CONSTANT_P (operands[1])
10126 && !easy_vector_constant (operands[1], mode))
10127 operands[1] = force_const_mem (mode, operands[1]);
10128 break;
10129
10130 case E_SImode:
10131 case E_DImode:
10132 /* Use default pattern for address of ELF small data */
10133 if (TARGET_ELF
10134 && mode == Pmode
10135 && DEFAULT_ABI == ABI_V4
10136 && (GET_CODE (operands[1]) == SYMBOL_REF
10137 || GET_CODE (operands[1]) == CONST)
10138 && small_data_operand (operands[1], mode))
10139 {
10140 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10141 return;
10142 }
10143
10144 if (DEFAULT_ABI == ABI_V4
10145 && mode == Pmode && mode == SImode
10146 && flag_pic == 1 && got_operand (operands[1], mode))
10147 {
10148 emit_insn (gen_movsi_got (operands[0], operands[1]));
10149 return;
10150 }
10151
10152 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10153 && TARGET_NO_TOC
10154 && ! flag_pic
10155 && mode == Pmode
10156 && CONSTANT_P (operands[1])
10157 && GET_CODE (operands[1]) != HIGH
10158 && GET_CODE (operands[1]) != CONST_INT)
10159 {
10160 rtx target = (!can_create_pseudo_p ()
10161 ? operands[0]
10162 : gen_reg_rtx (mode));
10163
10164 /* If this is a function address on -mcall-aixdesc,
10165 convert it to the address of the descriptor. */
10166 if (DEFAULT_ABI == ABI_AIX
10167 && GET_CODE (operands[1]) == SYMBOL_REF
10168 && XSTR (operands[1], 0)[0] == '.')
10169 {
10170 const char *name = XSTR (operands[1], 0);
10171 rtx new_ref;
10172 while (*name == '.')
10173 name++;
10174 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10175 CONSTANT_POOL_ADDRESS_P (new_ref)
10176 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10177 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10178 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10179 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10180 operands[1] = new_ref;
10181 }
10182
10183 if (DEFAULT_ABI == ABI_DARWIN)
10184 {
10185 #if TARGET_MACHO
10186 if (MACHO_DYNAMIC_NO_PIC_P)
10187 {
10188 /* Take care of any required data indirection. */
10189 operands[1] = rs6000_machopic_legitimize_pic_address (
10190 operands[1], mode, operands[0]);
10191 if (operands[0] != operands[1])
10192 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10193 return;
10194 }
10195 #endif
10196 emit_insn (gen_macho_high (target, operands[1]));
10197 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10198 return;
10199 }
10200
10201 emit_insn (gen_elf_high (target, operands[1]));
10202 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10203 return;
10204 }
10205
10206 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10207 and we have put it in the TOC, we just need to make a TOC-relative
10208 reference to it. */
10209 if (TARGET_TOC
10210 && GET_CODE (operands[1]) == SYMBOL_REF
10211 && use_toc_relative_ref (operands[1], mode))
10212 operands[1] = create_TOC_reference (operands[1], operands[0]);
10213 else if (mode == Pmode
10214 && CONSTANT_P (operands[1])
10215 && GET_CODE (operands[1]) != HIGH
10216 && ((GET_CODE (operands[1]) != CONST_INT
10217 && ! easy_fp_constant (operands[1], mode))
10218 || (GET_CODE (operands[1]) == CONST_INT
10219 && (num_insns_constant (operands[1], mode)
10220 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10221 || (GET_CODE (operands[0]) == REG
10222 && FP_REGNO_P (REGNO (operands[0]))))
10223 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10224 && (TARGET_CMODEL == CMODEL_SMALL
10225 || can_create_pseudo_p ()
10226 || (REG_P (operands[0])
10227 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10228 {
10229
10230 #if TARGET_MACHO
10231 /* Darwin uses a special PIC legitimizer. */
10232 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10233 {
10234 operands[1] =
10235 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10236 operands[0]);
10237 if (operands[0] != operands[1])
10238 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10239 return;
10240 }
10241 #endif
10242
10243 /* If we are to limit the number of things we put in the TOC and
10244 this is a symbol plus a constant we can add in one insn,
10245 just put the symbol in the TOC and add the constant. */
10246 if (GET_CODE (operands[1]) == CONST
10247 && TARGET_NO_SUM_IN_TOC
10248 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10249 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10250 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10251 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10252 && ! side_effects_p (operands[0]))
10253 {
10254 rtx sym =
10255 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10256 rtx other = XEXP (XEXP (operands[1], 0), 1);
10257
10258 sym = force_reg (mode, sym);
10259 emit_insn (gen_add3_insn (operands[0], sym, other));
10260 return;
10261 }
10262
10263 operands[1] = force_const_mem (mode, operands[1]);
10264
10265 if (TARGET_TOC
10266 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10267 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10268 {
10269 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10270 operands[0]);
10271 operands[1] = gen_const_mem (mode, tocref);
10272 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10273 }
10274 }
10275 break;
10276
10277 case E_TImode:
10278 if (!VECTOR_MEM_VSX_P (TImode))
10279 rs6000_eliminate_indexed_memrefs (operands);
10280 break;
10281
10282 case E_PTImode:
10283 rs6000_eliminate_indexed_memrefs (operands);
10284 break;
10285
10286 default:
10287 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10288 }
10289
10290 /* Above, we may have called force_const_mem which may have returned
10291 an invalid address. If we can, fix this up; otherwise, reload will
10292 have to deal with it. */
10293 if (GET_CODE (operands[1]) == MEM)
10294 operands[1] = validize_mem (operands[1]);
10295
10296 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10297 }
10298 \f
10299 /* Nonzero if we can use a floating-point register to pass this arg. */
10300 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10301 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10302 && (CUM)->fregno <= FP_ARG_MAX_REG \
10303 && TARGET_HARD_FLOAT)
10304
10305 /* Nonzero if we can use an AltiVec register to pass this arg. */
10306 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10307 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10308 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10309 && TARGET_ALTIVEC_ABI \
10310 && (NAMED))
10311
10312 /* Walk down the type tree of TYPE counting consecutive base elements.
10313 If *MODEP is VOIDmode, then set it to the first valid floating point
10314 or vector type. If a non-floating point or vector type is found, or
10315 if a floating point or vector type that doesn't match a non-VOIDmode
10316 *MODEP is found, then return -1, otherwise return the count in the
10317 sub-tree. */
10318
10319 static int
10320 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10321 {
10322 machine_mode mode;
10323 HOST_WIDE_INT size;
10324
10325 switch (TREE_CODE (type))
10326 {
10327 case REAL_TYPE:
10328 mode = TYPE_MODE (type);
10329 if (!SCALAR_FLOAT_MODE_P (mode))
10330 return -1;
10331
10332 if (*modep == VOIDmode)
10333 *modep = mode;
10334
10335 if (*modep == mode)
10336 return 1;
10337
10338 break;
10339
10340 case COMPLEX_TYPE:
10341 mode = TYPE_MODE (TREE_TYPE (type));
10342 if (!SCALAR_FLOAT_MODE_P (mode))
10343 return -1;
10344
10345 if (*modep == VOIDmode)
10346 *modep = mode;
10347
10348 if (*modep == mode)
10349 return 2;
10350
10351 break;
10352
10353 case VECTOR_TYPE:
10354 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10355 return -1;
10356
10357 /* Use V4SImode as representative of all 128-bit vector types. */
10358 size = int_size_in_bytes (type);
10359 switch (size)
10360 {
10361 case 16:
10362 mode = V4SImode;
10363 break;
10364 default:
10365 return -1;
10366 }
10367
10368 if (*modep == VOIDmode)
10369 *modep = mode;
10370
10371 /* Vector modes are considered to be opaque: two vectors are
10372 equivalent for the purposes of being homogeneous aggregates
10373 if they are the same size. */
10374 if (*modep == mode)
10375 return 1;
10376
10377 break;
10378
10379 case ARRAY_TYPE:
10380 {
10381 int count;
10382 tree index = TYPE_DOMAIN (type);
10383
10384 /* Can't handle incomplete types nor sizes that are not
10385 fixed. */
10386 if (!COMPLETE_TYPE_P (type)
10387 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10388 return -1;
10389
10390 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10391 if (count == -1
10392 || !index
10393 || !TYPE_MAX_VALUE (index)
10394 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10395 || !TYPE_MIN_VALUE (index)
10396 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10397 || count < 0)
10398 return -1;
10399
10400 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10401 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10402
10403 /* There must be no padding. */
10404 if (wi::to_wide (TYPE_SIZE (type))
10405 != count * GET_MODE_BITSIZE (*modep))
10406 return -1;
10407
10408 return count;
10409 }
10410
10411 case RECORD_TYPE:
10412 {
10413 int count = 0;
10414 int sub_count;
10415 tree field;
10416
10417 /* Can't handle incomplete types nor sizes that are not
10418 fixed. */
10419 if (!COMPLETE_TYPE_P (type)
10420 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10421 return -1;
10422
10423 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10424 {
10425 if (TREE_CODE (field) != FIELD_DECL)
10426 continue;
10427
10428 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10429 if (sub_count < 0)
10430 return -1;
10431 count += sub_count;
10432 }
10433
10434 /* There must be no padding. */
10435 if (wi::to_wide (TYPE_SIZE (type))
10436 != count * GET_MODE_BITSIZE (*modep))
10437 return -1;
10438
10439 return count;
10440 }
10441
10442 case UNION_TYPE:
10443 case QUAL_UNION_TYPE:
10444 {
10445 /* These aren't very interesting except in a degenerate case. */
10446 int count = 0;
10447 int sub_count;
10448 tree field;
10449
10450 /* Can't handle incomplete types nor sizes that are not
10451 fixed. */
10452 if (!COMPLETE_TYPE_P (type)
10453 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10454 return -1;
10455
10456 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10457 {
10458 if (TREE_CODE (field) != FIELD_DECL)
10459 continue;
10460
10461 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10462 if (sub_count < 0)
10463 return -1;
10464 count = count > sub_count ? count : sub_count;
10465 }
10466
10467 /* There must be no padding. */
10468 if (wi::to_wide (TYPE_SIZE (type))
10469 != count * GET_MODE_BITSIZE (*modep))
10470 return -1;
10471
10472 return count;
10473 }
10474
10475 default:
10476 break;
10477 }
10478
10479 return -1;
10480 }
10481
10482 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10483 float or vector aggregate that shall be passed in FP/vector registers
10484 according to the ELFv2 ABI, return the homogeneous element mode in
10485 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10486
10487 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10488
10489 static bool
10490 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10491 machine_mode *elt_mode,
10492 int *n_elts)
10493 {
10494 /* Note that we do not accept complex types at the top level as
10495 homogeneous aggregates; these types are handled via the
10496 targetm.calls.split_complex_arg mechanism. Complex types
10497 can be elements of homogeneous aggregates, however. */
10498 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10499 && AGGREGATE_TYPE_P (type))
10500 {
10501 machine_mode field_mode = VOIDmode;
10502 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10503
10504 if (field_count > 0)
10505 {
10506 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10507 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10508
10509 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10510 up to AGGR_ARG_NUM_REG registers. */
10511 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10512 {
10513 if (elt_mode)
10514 *elt_mode = field_mode;
10515 if (n_elts)
10516 *n_elts = field_count;
10517 return true;
10518 }
10519 }
10520 }
10521
10522 if (elt_mode)
10523 *elt_mode = mode;
10524 if (n_elts)
10525 *n_elts = 1;
10526 return false;
10527 }
10528
10529 /* Return a nonzero value to say to return the function value in
10530 memory, just as large structures are always returned. TYPE will be
10531 the data type of the value, and FNTYPE will be the type of the
10532 function doing the returning, or @code{NULL} for libcalls.
10533
10534 The AIX ABI for the RS/6000 specifies that all structures are
10535 returned in memory. The Darwin ABI does the same.
10536
10537 For the Darwin 64 Bit ABI, a function result can be returned in
10538 registers or in memory, depending on the size of the return data
10539 type. If it is returned in registers, the value occupies the same
10540 registers as it would if it were the first and only function
10541 argument. Otherwise, the function places its result in memory at
10542 the location pointed to by GPR3.
10543
10544 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10545 but a draft put them in memory, and GCC used to implement the draft
10546 instead of the final standard. Therefore, aix_struct_return
10547 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10548 compatibility can change DRAFT_V4_STRUCT_RET to override the
10549 default, and -m switches get the final word. See
10550 rs6000_option_override_internal for more details.
10551
10552 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10553 long double support is enabled. These values are returned in memory.
10554
10555 int_size_in_bytes returns -1 for variable size objects, which go in
10556 memory always. The cast to unsigned makes -1 > 8. */
10557
10558 static bool
10559 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10560 {
10561 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10562 if (TARGET_MACHO
10563 && rs6000_darwin64_abi
10564 && TREE_CODE (type) == RECORD_TYPE
10565 && int_size_in_bytes (type) > 0)
10566 {
10567 CUMULATIVE_ARGS valcum;
10568 rtx valret;
10569
10570 valcum.words = 0;
10571 valcum.fregno = FP_ARG_MIN_REG;
10572 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10573 /* Do a trial code generation as if this were going to be passed
10574 as an argument; if any part goes in memory, we return NULL. */
10575 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10576 if (valret)
10577 return false;
10578 /* Otherwise fall through to more conventional ABI rules. */
10579 }
10580
10581 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10582 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10583 NULL, NULL))
10584 return false;
10585
10586 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10587 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10588 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10589 return false;
10590
10591 if (AGGREGATE_TYPE_P (type)
10592 && (aix_struct_return
10593 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10594 return true;
10595
10596 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10597 modes only exist for GCC vector types if -maltivec. */
10598 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10599 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10600 return false;
10601
10602 /* Return synthetic vectors in memory. */
10603 if (TREE_CODE (type) == VECTOR_TYPE
10604 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10605 {
10606 static bool warned_for_return_big_vectors = false;
10607 if (!warned_for_return_big_vectors)
10608 {
10609 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10610 "non-standard ABI extension with no compatibility "
10611 "guarantee");
10612 warned_for_return_big_vectors = true;
10613 }
10614 return true;
10615 }
10616
10617 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10618 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10619 return true;
10620
10621 return false;
10622 }
10623
10624 /* Specify whether values returned in registers should be at the most
10625 significant end of a register. We want aggregates returned by
10626 value to match the way aggregates are passed to functions. */
10627
10628 static bool
10629 rs6000_return_in_msb (const_tree valtype)
10630 {
10631 return (DEFAULT_ABI == ABI_ELFv2
10632 && BYTES_BIG_ENDIAN
10633 && AGGREGATE_TYPE_P (valtype)
10634 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10635 == PAD_UPWARD));
10636 }
10637
10638 #ifdef HAVE_AS_GNU_ATTRIBUTE
10639 /* Return TRUE if a call to function FNDECL may be one that
10640 potentially affects the function calling ABI of the object file. */
10641
10642 static bool
10643 call_ABI_of_interest (tree fndecl)
10644 {
10645 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10646 {
10647 struct cgraph_node *c_node;
10648
10649 /* Libcalls are always interesting. */
10650 if (fndecl == NULL_TREE)
10651 return true;
10652
10653 /* Any call to an external function is interesting. */
10654 if (DECL_EXTERNAL (fndecl))
10655 return true;
10656
10657 /* Interesting functions that we are emitting in this object file. */
10658 c_node = cgraph_node::get (fndecl);
10659 c_node = c_node->ultimate_alias_target ();
10660 return !c_node->only_called_directly_p ();
10661 }
10662 return false;
10663 }
10664 #endif
10665
10666 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10667 for a call to a function whose data type is FNTYPE.
10668 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10669
10670 For incoming args we set the number of arguments in the prototype large
10671 so we never return a PARALLEL. */
10672
10673 void
10674 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10675 rtx libname ATTRIBUTE_UNUSED, int incoming,
10676 int libcall, int n_named_args,
10677 tree fndecl ATTRIBUTE_UNUSED,
10678 machine_mode return_mode ATTRIBUTE_UNUSED)
10679 {
10680 static CUMULATIVE_ARGS zero_cumulative;
10681
10682 *cum = zero_cumulative;
10683 cum->words = 0;
10684 cum->fregno = FP_ARG_MIN_REG;
10685 cum->vregno = ALTIVEC_ARG_MIN_REG;
10686 cum->prototype = (fntype && prototype_p (fntype));
10687 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10688 ? CALL_LIBCALL : CALL_NORMAL);
10689 cum->sysv_gregno = GP_ARG_MIN_REG;
10690 cum->stdarg = stdarg_p (fntype);
10691 cum->libcall = libcall;
10692
10693 cum->nargs_prototype = 0;
10694 if (incoming || cum->prototype)
10695 cum->nargs_prototype = n_named_args;
10696
10697 /* Check for a longcall attribute. */
10698 if ((!fntype && rs6000_default_long_calls)
10699 || (fntype
10700 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10701 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10702 cum->call_cookie |= CALL_LONG;
10703
10704 if (TARGET_DEBUG_ARG)
10705 {
10706 fprintf (stderr, "\ninit_cumulative_args:");
10707 if (fntype)
10708 {
10709 tree ret_type = TREE_TYPE (fntype);
10710 fprintf (stderr, " ret code = %s,",
10711 get_tree_code_name (TREE_CODE (ret_type)));
10712 }
10713
10714 if (cum->call_cookie & CALL_LONG)
10715 fprintf (stderr, " longcall,");
10716
10717 fprintf (stderr, " proto = %d, nargs = %d\n",
10718 cum->prototype, cum->nargs_prototype);
10719 }
10720
10721 #ifdef HAVE_AS_GNU_ATTRIBUTE
10722 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10723 {
10724 cum->escapes = call_ABI_of_interest (fndecl);
10725 if (cum->escapes)
10726 {
10727 tree return_type;
10728
10729 if (fntype)
10730 {
10731 return_type = TREE_TYPE (fntype);
10732 return_mode = TYPE_MODE (return_type);
10733 }
10734 else
10735 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10736
10737 if (return_type != NULL)
10738 {
10739 if (TREE_CODE (return_type) == RECORD_TYPE
10740 && TYPE_TRANSPARENT_AGGR (return_type))
10741 {
10742 return_type = TREE_TYPE (first_field (return_type));
10743 return_mode = TYPE_MODE (return_type);
10744 }
10745 if (AGGREGATE_TYPE_P (return_type)
10746 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10747 <= 8))
10748 rs6000_returns_struct = true;
10749 }
10750 if (SCALAR_FLOAT_MODE_P (return_mode))
10751 {
10752 rs6000_passes_float = true;
10753 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10754 && (FLOAT128_IBM_P (return_mode)
10755 || FLOAT128_IEEE_P (return_mode)
10756 || (return_type != NULL
10757 && (TYPE_MAIN_VARIANT (return_type)
10758 == long_double_type_node))))
10759 rs6000_passes_long_double = true;
10760
10761 /* Note if we passed or return a IEEE 128-bit type. We changed
10762 the mangling for these types, and we may need to make an alias
10763 with the old mangling. */
10764 if (FLOAT128_IEEE_P (return_mode))
10765 rs6000_passes_ieee128 = true;
10766 }
10767 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10768 rs6000_passes_vector = true;
10769 }
10770 }
10771 #endif
10772
10773 if (fntype
10774 && !TARGET_ALTIVEC
10775 && TARGET_ALTIVEC_ABI
10776 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10777 {
10778 error ("cannot return value in vector register because"
10779 " altivec instructions are disabled, use %qs"
10780 " to enable them", "-maltivec");
10781 }
10782 }
10783 \f
10784 /* The mode the ABI uses for a word. This is not the same as word_mode
10785 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10786
10787 static scalar_int_mode
10788 rs6000_abi_word_mode (void)
10789 {
10790 return TARGET_32BIT ? SImode : DImode;
10791 }
10792
10793 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10794 static char *
10795 rs6000_offload_options (void)
10796 {
10797 if (TARGET_64BIT)
10798 return xstrdup ("-foffload-abi=lp64");
10799 else
10800 return xstrdup ("-foffload-abi=ilp32");
10801 }
10802
10803 /* On rs6000, function arguments are promoted, as are function return
10804 values. */
10805
10806 static machine_mode
10807 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10808 machine_mode mode,
10809 int *punsignedp ATTRIBUTE_UNUSED,
10810 const_tree, int)
10811 {
10812 PROMOTE_MODE (mode, *punsignedp, type);
10813
10814 return mode;
10815 }
10816
10817 /* Return true if TYPE must be passed on the stack and not in registers. */
10818
10819 static bool
10820 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10821 {
10822 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10823 return must_pass_in_stack_var_size (mode, type);
10824 else
10825 return must_pass_in_stack_var_size_or_pad (mode, type);
10826 }
10827
10828 static inline bool
10829 is_complex_IBM_long_double (machine_mode mode)
10830 {
10831 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10832 }
10833
10834 /* Whether ABI_V4 passes MODE args to a function in floating point
10835 registers. */
10836
10837 static bool
10838 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10839 {
10840 if (!TARGET_HARD_FLOAT)
10841 return false;
10842 if (mode == DFmode)
10843 return true;
10844 if (mode == SFmode && named)
10845 return true;
10846 /* ABI_V4 passes complex IBM long double in 8 gprs.
10847 Stupid, but we can't change the ABI now. */
10848 if (is_complex_IBM_long_double (mode))
10849 return false;
10850 if (FLOAT128_2REG_P (mode))
10851 return true;
10852 if (DECIMAL_FLOAT_MODE_P (mode))
10853 return true;
10854 return false;
10855 }
10856
10857 /* Implement TARGET_FUNCTION_ARG_PADDING.
10858
10859 For the AIX ABI structs are always stored left shifted in their
10860 argument slot. */
10861
10862 static pad_direction
10863 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10864 {
10865 #ifndef AGGREGATE_PADDING_FIXED
10866 #define AGGREGATE_PADDING_FIXED 0
10867 #endif
10868 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10869 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10870 #endif
10871
10872 if (!AGGREGATE_PADDING_FIXED)
10873 {
10874 /* GCC used to pass structures of the same size as integer types as
10875 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10876 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10877 passed padded downward, except that -mstrict-align further
10878 muddied the water in that multi-component structures of 2 and 4
10879 bytes in size were passed padded upward.
10880
10881 The following arranges for best compatibility with previous
10882 versions of gcc, but removes the -mstrict-align dependency. */
10883 if (BYTES_BIG_ENDIAN)
10884 {
10885 HOST_WIDE_INT size = 0;
10886
10887 if (mode == BLKmode)
10888 {
10889 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10890 size = int_size_in_bytes (type);
10891 }
10892 else
10893 size = GET_MODE_SIZE (mode);
10894
10895 if (size == 1 || size == 2 || size == 4)
10896 return PAD_DOWNWARD;
10897 }
10898 return PAD_UPWARD;
10899 }
10900
10901 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10902 {
10903 if (type != 0 && AGGREGATE_TYPE_P (type))
10904 return PAD_UPWARD;
10905 }
10906
10907 /* Fall back to the default. */
10908 return default_function_arg_padding (mode, type);
10909 }
10910
10911 /* If defined, a C expression that gives the alignment boundary, in bits,
10912 of an argument with the specified mode and type. If it is not defined,
10913 PARM_BOUNDARY is used for all arguments.
10914
10915 V.4 wants long longs and doubles to be double word aligned. Just
10916 testing the mode size is a boneheaded way to do this as it means
10917 that other types such as complex int are also double word aligned.
10918 However, we're stuck with this because changing the ABI might break
10919 existing library interfaces.
10920
10921 Quadword align Altivec/VSX vectors.
10922 Quadword align large synthetic vector types. */
10923
10924 static unsigned int
10925 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10926 {
10927 machine_mode elt_mode;
10928 int n_elts;
10929
10930 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10931
10932 if (DEFAULT_ABI == ABI_V4
10933 && (GET_MODE_SIZE (mode) == 8
10934 || (TARGET_HARD_FLOAT
10935 && !is_complex_IBM_long_double (mode)
10936 && FLOAT128_2REG_P (mode))))
10937 return 64;
10938 else if (FLOAT128_VECTOR_P (mode))
10939 return 128;
10940 else if (type && TREE_CODE (type) == VECTOR_TYPE
10941 && int_size_in_bytes (type) >= 8
10942 && int_size_in_bytes (type) < 16)
10943 return 64;
10944 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10945 || (type && TREE_CODE (type) == VECTOR_TYPE
10946 && int_size_in_bytes (type) >= 16))
10947 return 128;
10948
10949 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10950 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10951 -mcompat-align-parm is used. */
10952 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10953 || DEFAULT_ABI == ABI_ELFv2)
10954 && type && TYPE_ALIGN (type) > 64)
10955 {
10956 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10957 or homogeneous float/vector aggregates here. We already handled
10958 vector aggregates above, but still need to check for float here. */
10959 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10960 && !SCALAR_FLOAT_MODE_P (elt_mode));
10961
10962 /* We used to check for BLKmode instead of the above aggregate type
10963 check. Warn when this results in any difference to the ABI. */
10964 if (aggregate_p != (mode == BLKmode))
10965 {
10966 static bool warned;
10967 if (!warned && warn_psabi)
10968 {
10969 warned = true;
10970 inform (input_location,
10971 "the ABI of passing aggregates with %d-byte alignment"
10972 " has changed in GCC 5",
10973 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10974 }
10975 }
10976
10977 if (aggregate_p)
10978 return 128;
10979 }
10980
10981 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10982 implement the "aggregate type" check as a BLKmode check here; this
10983 means certain aggregate types are in fact not aligned. */
10984 if (TARGET_MACHO && rs6000_darwin64_abi
10985 && mode == BLKmode
10986 && type && TYPE_ALIGN (type) > 64)
10987 return 128;
10988
10989 return PARM_BOUNDARY;
10990 }
10991
10992 /* The offset in words to the start of the parameter save area. */
10993
10994 static unsigned int
10995 rs6000_parm_offset (void)
10996 {
10997 return (DEFAULT_ABI == ABI_V4 ? 2
10998 : DEFAULT_ABI == ABI_ELFv2 ? 4
10999 : 6);
11000 }
11001
11002 /* For a function parm of MODE and TYPE, return the starting word in
11003 the parameter area. NWORDS of the parameter area are already used. */
11004
11005 static unsigned int
11006 rs6000_parm_start (machine_mode mode, const_tree type,
11007 unsigned int nwords)
11008 {
11009 unsigned int align;
11010
11011 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11012 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11013 }
11014
11015 /* Compute the size (in words) of a function argument. */
11016
11017 static unsigned long
11018 rs6000_arg_size (machine_mode mode, const_tree type)
11019 {
11020 unsigned long size;
11021
11022 if (mode != BLKmode)
11023 size = GET_MODE_SIZE (mode);
11024 else
11025 size = int_size_in_bytes (type);
11026
11027 if (TARGET_32BIT)
11028 return (size + 3) >> 2;
11029 else
11030 return (size + 7) >> 3;
11031 }
11032 \f
11033 /* Use this to flush pending int fields. */
11034
11035 static void
11036 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11037 HOST_WIDE_INT bitpos, int final)
11038 {
11039 unsigned int startbit, endbit;
11040 int intregs, intoffset;
11041
11042 /* Handle the situations where a float is taking up the first half
11043 of the GPR, and the other half is empty (typically due to
11044 alignment restrictions). We can detect this by a 8-byte-aligned
11045 int field, or by seeing that this is the final flush for this
11046 argument. Count the word and continue on. */
11047 if (cum->floats_in_gpr == 1
11048 && (cum->intoffset % 64 == 0
11049 || (cum->intoffset == -1 && final)))
11050 {
11051 cum->words++;
11052 cum->floats_in_gpr = 0;
11053 }
11054
11055 if (cum->intoffset == -1)
11056 return;
11057
11058 intoffset = cum->intoffset;
11059 cum->intoffset = -1;
11060 cum->floats_in_gpr = 0;
11061
11062 if (intoffset % BITS_PER_WORD != 0)
11063 {
11064 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11065 if (!int_mode_for_size (bits, 0).exists ())
11066 {
11067 /* We couldn't find an appropriate mode, which happens,
11068 e.g., in packed structs when there are 3 bytes to load.
11069 Back intoffset back to the beginning of the word in this
11070 case. */
11071 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11072 }
11073 }
11074
11075 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11076 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11077 intregs = (endbit - startbit) / BITS_PER_WORD;
11078 cum->words += intregs;
11079 /* words should be unsigned. */
11080 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11081 {
11082 int pad = (endbit/BITS_PER_WORD) - cum->words;
11083 cum->words += pad;
11084 }
11085 }
11086
11087 /* The darwin64 ABI calls for us to recurse down through structs,
11088 looking for elements passed in registers. Unfortunately, we have
11089 to track int register count here also because of misalignments
11090 in powerpc alignment mode. */
11091
11092 static void
11093 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11094 const_tree type,
11095 HOST_WIDE_INT startbitpos)
11096 {
11097 tree f;
11098
11099 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11100 if (TREE_CODE (f) == FIELD_DECL)
11101 {
11102 HOST_WIDE_INT bitpos = startbitpos;
11103 tree ftype = TREE_TYPE (f);
11104 machine_mode mode;
11105 if (ftype == error_mark_node)
11106 continue;
11107 mode = TYPE_MODE (ftype);
11108
11109 if (DECL_SIZE (f) != 0
11110 && tree_fits_uhwi_p (bit_position (f)))
11111 bitpos += int_bit_position (f);
11112
11113 /* ??? FIXME: else assume zero offset. */
11114
11115 if (TREE_CODE (ftype) == RECORD_TYPE)
11116 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11117 else if (USE_FP_FOR_ARG_P (cum, mode))
11118 {
11119 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11120 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11121 cum->fregno += n_fpregs;
11122 /* Single-precision floats present a special problem for
11123 us, because they are smaller than an 8-byte GPR, and so
11124 the structure-packing rules combined with the standard
11125 varargs behavior mean that we want to pack float/float
11126 and float/int combinations into a single register's
11127 space. This is complicated by the arg advance flushing,
11128 which works on arbitrarily large groups of int-type
11129 fields. */
11130 if (mode == SFmode)
11131 {
11132 if (cum->floats_in_gpr == 1)
11133 {
11134 /* Two floats in a word; count the word and reset
11135 the float count. */
11136 cum->words++;
11137 cum->floats_in_gpr = 0;
11138 }
11139 else if (bitpos % 64 == 0)
11140 {
11141 /* A float at the beginning of an 8-byte word;
11142 count it and put off adjusting cum->words until
11143 we see if a arg advance flush is going to do it
11144 for us. */
11145 cum->floats_in_gpr++;
11146 }
11147 else
11148 {
11149 /* The float is at the end of a word, preceded
11150 by integer fields, so the arg advance flush
11151 just above has already set cum->words and
11152 everything is taken care of. */
11153 }
11154 }
11155 else
11156 cum->words += n_fpregs;
11157 }
11158 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11159 {
11160 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11161 cum->vregno++;
11162 cum->words += 2;
11163 }
11164 else if (cum->intoffset == -1)
11165 cum->intoffset = bitpos;
11166 }
11167 }
11168
11169 /* Check for an item that needs to be considered specially under the darwin 64
11170 bit ABI. These are record types where the mode is BLK or the structure is
11171 8 bytes in size. */
11172 static int
11173 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11174 {
11175 return rs6000_darwin64_abi
11176 && ((mode == BLKmode
11177 && TREE_CODE (type) == RECORD_TYPE
11178 && int_size_in_bytes (type) > 0)
11179 || (type && TREE_CODE (type) == RECORD_TYPE
11180 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11181 }
11182
11183 /* Update the data in CUM to advance over an argument
11184 of mode MODE and data type TYPE.
11185 (TYPE is null for libcalls where that information may not be available.)
11186
11187 Note that for args passed by reference, function_arg will be called
11188 with MODE and TYPE set to that of the pointer to the arg, not the arg
11189 itself. */
11190
11191 static void
11192 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11193 const_tree type, bool named, int depth)
11194 {
11195 machine_mode elt_mode;
11196 int n_elts;
11197
11198 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11199
11200 /* Only tick off an argument if we're not recursing. */
11201 if (depth == 0)
11202 cum->nargs_prototype--;
11203
11204 #ifdef HAVE_AS_GNU_ATTRIBUTE
11205 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11206 && cum->escapes)
11207 {
11208 if (SCALAR_FLOAT_MODE_P (mode))
11209 {
11210 rs6000_passes_float = true;
11211 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11212 && (FLOAT128_IBM_P (mode)
11213 || FLOAT128_IEEE_P (mode)
11214 || (type != NULL
11215 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11216 rs6000_passes_long_double = true;
11217
11218 /* Note if we passed or return a IEEE 128-bit type. We changed the
11219 mangling for these types, and we may need to make an alias with
11220 the old mangling. */
11221 if (FLOAT128_IEEE_P (mode))
11222 rs6000_passes_ieee128 = true;
11223 }
11224 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11225 rs6000_passes_vector = true;
11226 }
11227 #endif
11228
11229 if (TARGET_ALTIVEC_ABI
11230 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11231 || (type && TREE_CODE (type) == VECTOR_TYPE
11232 && int_size_in_bytes (type) == 16)))
11233 {
11234 bool stack = false;
11235
11236 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11237 {
11238 cum->vregno += n_elts;
11239
11240 if (!TARGET_ALTIVEC)
11241 error ("cannot pass argument in vector register because"
11242 " altivec instructions are disabled, use %qs"
11243 " to enable them", "-maltivec");
11244
11245 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11246 even if it is going to be passed in a vector register.
11247 Darwin does the same for variable-argument functions. */
11248 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11249 && TARGET_64BIT)
11250 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11251 stack = true;
11252 }
11253 else
11254 stack = true;
11255
11256 if (stack)
11257 {
11258 int align;
11259
11260 /* Vector parameters must be 16-byte aligned. In 32-bit
11261 mode this means we need to take into account the offset
11262 to the parameter save area. In 64-bit mode, they just
11263 have to start on an even word, since the parameter save
11264 area is 16-byte aligned. */
11265 if (TARGET_32BIT)
11266 align = -(rs6000_parm_offset () + cum->words) & 3;
11267 else
11268 align = cum->words & 1;
11269 cum->words += align + rs6000_arg_size (mode, type);
11270
11271 if (TARGET_DEBUG_ARG)
11272 {
11273 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11274 cum->words, align);
11275 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11276 cum->nargs_prototype, cum->prototype,
11277 GET_MODE_NAME (mode));
11278 }
11279 }
11280 }
11281 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11282 {
11283 int size = int_size_in_bytes (type);
11284 /* Variable sized types have size == -1 and are
11285 treated as if consisting entirely of ints.
11286 Pad to 16 byte boundary if needed. */
11287 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11288 && (cum->words % 2) != 0)
11289 cum->words++;
11290 /* For varargs, we can just go up by the size of the struct. */
11291 if (!named)
11292 cum->words += (size + 7) / 8;
11293 else
11294 {
11295 /* It is tempting to say int register count just goes up by
11296 sizeof(type)/8, but this is wrong in a case such as
11297 { int; double; int; } [powerpc alignment]. We have to
11298 grovel through the fields for these too. */
11299 cum->intoffset = 0;
11300 cum->floats_in_gpr = 0;
11301 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11302 rs6000_darwin64_record_arg_advance_flush (cum,
11303 size * BITS_PER_UNIT, 1);
11304 }
11305 if (TARGET_DEBUG_ARG)
11306 {
11307 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11308 cum->words, TYPE_ALIGN (type), size);
11309 fprintf (stderr,
11310 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11311 cum->nargs_prototype, cum->prototype,
11312 GET_MODE_NAME (mode));
11313 }
11314 }
11315 else if (DEFAULT_ABI == ABI_V4)
11316 {
11317 if (abi_v4_pass_in_fpr (mode, named))
11318 {
11319 /* _Decimal128 must use an even/odd register pair. This assumes
11320 that the register number is odd when fregno is odd. */
11321 if (mode == TDmode && (cum->fregno % 2) == 1)
11322 cum->fregno++;
11323
11324 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11325 <= FP_ARG_V4_MAX_REG)
11326 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11327 else
11328 {
11329 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11330 if (mode == DFmode || FLOAT128_IBM_P (mode)
11331 || mode == DDmode || mode == TDmode)
11332 cum->words += cum->words & 1;
11333 cum->words += rs6000_arg_size (mode, type);
11334 }
11335 }
11336 else
11337 {
11338 int n_words = rs6000_arg_size (mode, type);
11339 int gregno = cum->sysv_gregno;
11340
11341 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11342 As does any other 2 word item such as complex int due to a
11343 historical mistake. */
11344 if (n_words == 2)
11345 gregno += (1 - gregno) & 1;
11346
11347 /* Multi-reg args are not split between registers and stack. */
11348 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11349 {
11350 /* Long long is aligned on the stack. So are other 2 word
11351 items such as complex int due to a historical mistake. */
11352 if (n_words == 2)
11353 cum->words += cum->words & 1;
11354 cum->words += n_words;
11355 }
11356
11357 /* Note: continuing to accumulate gregno past when we've started
11358 spilling to the stack indicates the fact that we've started
11359 spilling to the stack to expand_builtin_saveregs. */
11360 cum->sysv_gregno = gregno + n_words;
11361 }
11362
11363 if (TARGET_DEBUG_ARG)
11364 {
11365 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11366 cum->words, cum->fregno);
11367 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11368 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11369 fprintf (stderr, "mode = %4s, named = %d\n",
11370 GET_MODE_NAME (mode), named);
11371 }
11372 }
11373 else
11374 {
11375 int n_words = rs6000_arg_size (mode, type);
11376 int start_words = cum->words;
11377 int align_words = rs6000_parm_start (mode, type, start_words);
11378
11379 cum->words = align_words + n_words;
11380
11381 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11382 {
11383 /* _Decimal128 must be passed in an even/odd float register pair.
11384 This assumes that the register number is odd when fregno is
11385 odd. */
11386 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11387 cum->fregno++;
11388 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11389 }
11390
11391 if (TARGET_DEBUG_ARG)
11392 {
11393 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11394 cum->words, cum->fregno);
11395 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11396 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11397 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11398 named, align_words - start_words, depth);
11399 }
11400 }
11401 }
11402
11403 static void
11404 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11405 const_tree type, bool named)
11406 {
11407 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11408 0);
11409 }
11410
11411 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11412 structure between cum->intoffset and bitpos to integer registers. */
11413
11414 static void
11415 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11416 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11417 {
11418 machine_mode mode;
11419 unsigned int regno;
11420 unsigned int startbit, endbit;
11421 int this_regno, intregs, intoffset;
11422 rtx reg;
11423
11424 if (cum->intoffset == -1)
11425 return;
11426
11427 intoffset = cum->intoffset;
11428 cum->intoffset = -1;
11429
11430 /* If this is the trailing part of a word, try to only load that
11431 much into the register. Otherwise load the whole register. Note
11432 that in the latter case we may pick up unwanted bits. It's not a
11433 problem at the moment but may wish to revisit. */
11434
11435 if (intoffset % BITS_PER_WORD != 0)
11436 {
11437 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11438 if (!int_mode_for_size (bits, 0).exists (&mode))
11439 {
11440 /* We couldn't find an appropriate mode, which happens,
11441 e.g., in packed structs when there are 3 bytes to load.
11442 Back intoffset back to the beginning of the word in this
11443 case. */
11444 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11445 mode = word_mode;
11446 }
11447 }
11448 else
11449 mode = word_mode;
11450
11451 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11452 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11453 intregs = (endbit - startbit) / BITS_PER_WORD;
11454 this_regno = cum->words + intoffset / BITS_PER_WORD;
11455
11456 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11457 cum->use_stack = 1;
11458
11459 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11460 if (intregs <= 0)
11461 return;
11462
11463 intoffset /= BITS_PER_UNIT;
11464 do
11465 {
11466 regno = GP_ARG_MIN_REG + this_regno;
11467 reg = gen_rtx_REG (mode, regno);
11468 rvec[(*k)++] =
11469 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11470
11471 this_regno += 1;
11472 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11473 mode = word_mode;
11474 intregs -= 1;
11475 }
11476 while (intregs > 0);
11477 }
11478
11479 /* Recursive workhorse for the following. */
11480
11481 static void
11482 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11483 HOST_WIDE_INT startbitpos, rtx rvec[],
11484 int *k)
11485 {
11486 tree f;
11487
11488 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11489 if (TREE_CODE (f) == FIELD_DECL)
11490 {
11491 HOST_WIDE_INT bitpos = startbitpos;
11492 tree ftype = TREE_TYPE (f);
11493 machine_mode mode;
11494 if (ftype == error_mark_node)
11495 continue;
11496 mode = TYPE_MODE (ftype);
11497
11498 if (DECL_SIZE (f) != 0
11499 && tree_fits_uhwi_p (bit_position (f)))
11500 bitpos += int_bit_position (f);
11501
11502 /* ??? FIXME: else assume zero offset. */
11503
11504 if (TREE_CODE (ftype) == RECORD_TYPE)
11505 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11506 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11507 {
11508 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11509 #if 0
11510 switch (mode)
11511 {
11512 case E_SCmode: mode = SFmode; break;
11513 case E_DCmode: mode = DFmode; break;
11514 case E_TCmode: mode = TFmode; break;
11515 default: break;
11516 }
11517 #endif
11518 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11519 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11520 {
11521 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11522 && (mode == TFmode || mode == TDmode));
11523 /* Long double or _Decimal128 split over regs and memory. */
11524 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11525 cum->use_stack=1;
11526 }
11527 rvec[(*k)++]
11528 = gen_rtx_EXPR_LIST (VOIDmode,
11529 gen_rtx_REG (mode, cum->fregno++),
11530 GEN_INT (bitpos / BITS_PER_UNIT));
11531 if (FLOAT128_2REG_P (mode))
11532 cum->fregno++;
11533 }
11534 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11535 {
11536 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11537 rvec[(*k)++]
11538 = gen_rtx_EXPR_LIST (VOIDmode,
11539 gen_rtx_REG (mode, cum->vregno++),
11540 GEN_INT (bitpos / BITS_PER_UNIT));
11541 }
11542 else if (cum->intoffset == -1)
11543 cum->intoffset = bitpos;
11544 }
11545 }
11546
11547 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11548 the register(s) to be used for each field and subfield of a struct
11549 being passed by value, along with the offset of where the
11550 register's value may be found in the block. FP fields go in FP
11551 register, vector fields go in vector registers, and everything
11552 else goes in int registers, packed as in memory.
11553
11554 This code is also used for function return values. RETVAL indicates
11555 whether this is the case.
11556
11557 Much of this is taken from the SPARC V9 port, which has a similar
11558 calling convention. */
11559
11560 static rtx
11561 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11562 bool named, bool retval)
11563 {
11564 rtx rvec[FIRST_PSEUDO_REGISTER];
11565 int k = 1, kbase = 1;
11566 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11567 /* This is a copy; modifications are not visible to our caller. */
11568 CUMULATIVE_ARGS copy_cum = *orig_cum;
11569 CUMULATIVE_ARGS *cum = &copy_cum;
11570
11571 /* Pad to 16 byte boundary if needed. */
11572 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11573 && (cum->words % 2) != 0)
11574 cum->words++;
11575
11576 cum->intoffset = 0;
11577 cum->use_stack = 0;
11578 cum->named = named;
11579
11580 /* Put entries into rvec[] for individual FP and vector fields, and
11581 for the chunks of memory that go in int regs. Note we start at
11582 element 1; 0 is reserved for an indication of using memory, and
11583 may or may not be filled in below. */
11584 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11585 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11586
11587 /* If any part of the struct went on the stack put all of it there.
11588 This hack is because the generic code for
11589 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11590 parts of the struct are not at the beginning. */
11591 if (cum->use_stack)
11592 {
11593 if (retval)
11594 return NULL_RTX; /* doesn't go in registers at all */
11595 kbase = 0;
11596 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11597 }
11598 if (k > 1 || cum->use_stack)
11599 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11600 else
11601 return NULL_RTX;
11602 }
11603
11604 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11605
11606 static rtx
11607 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11608 int align_words)
11609 {
11610 int n_units;
11611 int i, k;
11612 rtx rvec[GP_ARG_NUM_REG + 1];
11613
11614 if (align_words >= GP_ARG_NUM_REG)
11615 return NULL_RTX;
11616
11617 n_units = rs6000_arg_size (mode, type);
11618
11619 /* Optimize the simple case where the arg fits in one gpr, except in
11620 the case of BLKmode due to assign_parms assuming that registers are
11621 BITS_PER_WORD wide. */
11622 if (n_units == 0
11623 || (n_units == 1 && mode != BLKmode))
11624 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11625
11626 k = 0;
11627 if (align_words + n_units > GP_ARG_NUM_REG)
11628 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11629 using a magic NULL_RTX component.
11630 This is not strictly correct. Only some of the arg belongs in
11631 memory, not all of it. However, the normal scheme using
11632 function_arg_partial_nregs can result in unusual subregs, eg.
11633 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11634 store the whole arg to memory is often more efficient than code
11635 to store pieces, and we know that space is available in the right
11636 place for the whole arg. */
11637 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11638
11639 i = 0;
11640 do
11641 {
11642 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11643 rtx off = GEN_INT (i++ * 4);
11644 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11645 }
11646 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11647
11648 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11649 }
11650
11651 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11652 but must also be copied into the parameter save area starting at
11653 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11654 to the GPRs and/or memory. Return the number of elements used. */
11655
11656 static int
11657 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11658 int align_words, rtx *rvec)
11659 {
11660 int k = 0;
11661
11662 if (align_words < GP_ARG_NUM_REG)
11663 {
11664 int n_words = rs6000_arg_size (mode, type);
11665
11666 if (align_words + n_words > GP_ARG_NUM_REG
11667 || mode == BLKmode
11668 || (TARGET_32BIT && TARGET_POWERPC64))
11669 {
11670 /* If this is partially on the stack, then we only
11671 include the portion actually in registers here. */
11672 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11673 int i = 0;
11674
11675 if (align_words + n_words > GP_ARG_NUM_REG)
11676 {
11677 /* Not all of the arg fits in gprs. Say that it goes in memory
11678 too, using a magic NULL_RTX component. Also see comment in
11679 rs6000_mixed_function_arg for why the normal
11680 function_arg_partial_nregs scheme doesn't work in this case. */
11681 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11682 }
11683
11684 do
11685 {
11686 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11687 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11688 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11689 }
11690 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11691 }
11692 else
11693 {
11694 /* The whole arg fits in gprs. */
11695 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11696 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11697 }
11698 }
11699 else
11700 {
11701 /* It's entirely in memory. */
11702 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11703 }
11704
11705 return k;
11706 }
11707
11708 /* RVEC is a vector of K components of an argument of mode MODE.
11709 Construct the final function_arg return value from it. */
11710
11711 static rtx
11712 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11713 {
11714 gcc_assert (k >= 1);
11715
11716 /* Avoid returning a PARALLEL in the trivial cases. */
11717 if (k == 1)
11718 {
11719 if (XEXP (rvec[0], 0) == NULL_RTX)
11720 return NULL_RTX;
11721
11722 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11723 return XEXP (rvec[0], 0);
11724 }
11725
11726 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11727 }
11728
11729 /* Determine where to put an argument to a function.
11730 Value is zero to push the argument on the stack,
11731 or a hard register in which to store the argument.
11732
11733 MODE is the argument's machine mode.
11734 TYPE is the data type of the argument (as a tree).
11735 This is null for libcalls where that information may
11736 not be available.
11737 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11738 the preceding args and about the function being called. It is
11739 not modified in this routine.
11740 NAMED is nonzero if this argument is a named parameter
11741 (otherwise it is an extra parameter matching an ellipsis).
11742
11743 On RS/6000 the first eight words of non-FP are normally in registers
11744 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11745 Under V.4, the first 8 FP args are in registers.
11746
11747 If this is floating-point and no prototype is specified, we use
11748 both an FP and integer register (or possibly FP reg and stack). Library
11749 functions (when CALL_LIBCALL is set) always have the proper types for args,
11750 so we can pass the FP value just in one register. emit_library_function
11751 doesn't support PARALLEL anyway.
11752
11753 Note that for args passed by reference, function_arg will be called
11754 with MODE and TYPE set to that of the pointer to the arg, not the arg
11755 itself. */
11756
11757 static rtx
11758 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11759 const_tree type, bool named)
11760 {
11761 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11762 enum rs6000_abi abi = DEFAULT_ABI;
11763 machine_mode elt_mode;
11764 int n_elts;
11765
11766 /* Return a marker to indicate whether CR1 needs to set or clear the
11767 bit that V.4 uses to say fp args were passed in registers.
11768 Assume that we don't need the marker for software floating point,
11769 or compiler generated library calls. */
11770 if (mode == VOIDmode)
11771 {
11772 if (abi == ABI_V4
11773 && (cum->call_cookie & CALL_LIBCALL) == 0
11774 && (cum->stdarg
11775 || (cum->nargs_prototype < 0
11776 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11777 && TARGET_HARD_FLOAT)
11778 return GEN_INT (cum->call_cookie
11779 | ((cum->fregno == FP_ARG_MIN_REG)
11780 ? CALL_V4_SET_FP_ARGS
11781 : CALL_V4_CLEAR_FP_ARGS));
11782
11783 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11784 }
11785
11786 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11787
11788 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11789 {
11790 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11791 if (rslt != NULL_RTX)
11792 return rslt;
11793 /* Else fall through to usual handling. */
11794 }
11795
11796 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11797 {
11798 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11799 rtx r, off;
11800 int i, k = 0;
11801
11802 /* Do we also need to pass this argument in the parameter save area?
11803 Library support functions for IEEE 128-bit are assumed to not need the
11804 value passed both in GPRs and in vector registers. */
11805 if (TARGET_64BIT && !cum->prototype
11806 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11807 {
11808 int align_words = ROUND_UP (cum->words, 2);
11809 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11810 }
11811
11812 /* Describe where this argument goes in the vector registers. */
11813 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11814 {
11815 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11816 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11817 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11818 }
11819
11820 return rs6000_finish_function_arg (mode, rvec, k);
11821 }
11822 else if (TARGET_ALTIVEC_ABI
11823 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11824 || (type && TREE_CODE (type) == VECTOR_TYPE
11825 && int_size_in_bytes (type) == 16)))
11826 {
11827 if (named || abi == ABI_V4)
11828 return NULL_RTX;
11829 else
11830 {
11831 /* Vector parameters to varargs functions under AIX or Darwin
11832 get passed in memory and possibly also in GPRs. */
11833 int align, align_words, n_words;
11834 machine_mode part_mode;
11835
11836 /* Vector parameters must be 16-byte aligned. In 32-bit
11837 mode this means we need to take into account the offset
11838 to the parameter save area. In 64-bit mode, they just
11839 have to start on an even word, since the parameter save
11840 area is 16-byte aligned. */
11841 if (TARGET_32BIT)
11842 align = -(rs6000_parm_offset () + cum->words) & 3;
11843 else
11844 align = cum->words & 1;
11845 align_words = cum->words + align;
11846
11847 /* Out of registers? Memory, then. */
11848 if (align_words >= GP_ARG_NUM_REG)
11849 return NULL_RTX;
11850
11851 if (TARGET_32BIT && TARGET_POWERPC64)
11852 return rs6000_mixed_function_arg (mode, type, align_words);
11853
11854 /* The vector value goes in GPRs. Only the part of the
11855 value in GPRs is reported here. */
11856 part_mode = mode;
11857 n_words = rs6000_arg_size (mode, type);
11858 if (align_words + n_words > GP_ARG_NUM_REG)
11859 /* Fortunately, there are only two possibilities, the value
11860 is either wholly in GPRs or half in GPRs and half not. */
11861 part_mode = DImode;
11862
11863 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11864 }
11865 }
11866
11867 else if (abi == ABI_V4)
11868 {
11869 if (abi_v4_pass_in_fpr (mode, named))
11870 {
11871 /* _Decimal128 must use an even/odd register pair. This assumes
11872 that the register number is odd when fregno is odd. */
11873 if (mode == TDmode && (cum->fregno % 2) == 1)
11874 cum->fregno++;
11875
11876 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11877 <= FP_ARG_V4_MAX_REG)
11878 return gen_rtx_REG (mode, cum->fregno);
11879 else
11880 return NULL_RTX;
11881 }
11882 else
11883 {
11884 int n_words = rs6000_arg_size (mode, type);
11885 int gregno = cum->sysv_gregno;
11886
11887 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11888 As does any other 2 word item such as complex int due to a
11889 historical mistake. */
11890 if (n_words == 2)
11891 gregno += (1 - gregno) & 1;
11892
11893 /* Multi-reg args are not split between registers and stack. */
11894 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11895 return NULL_RTX;
11896
11897 if (TARGET_32BIT && TARGET_POWERPC64)
11898 return rs6000_mixed_function_arg (mode, type,
11899 gregno - GP_ARG_MIN_REG);
11900 return gen_rtx_REG (mode, gregno);
11901 }
11902 }
11903 else
11904 {
11905 int align_words = rs6000_parm_start (mode, type, cum->words);
11906
11907 /* _Decimal128 must be passed in an even/odd float register pair.
11908 This assumes that the register number is odd when fregno is odd. */
11909 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11910 cum->fregno++;
11911
11912 if (USE_FP_FOR_ARG_P (cum, elt_mode))
11913 {
11914 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11915 rtx r, off;
11916 int i, k = 0;
11917 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11918 int fpr_words;
11919
11920 /* Do we also need to pass this argument in the parameter
11921 save area? */
11922 if (type && (cum->nargs_prototype <= 0
11923 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11924 && TARGET_XL_COMPAT
11925 && align_words >= GP_ARG_NUM_REG)))
11926 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11927
11928 /* Describe where this argument goes in the fprs. */
11929 for (i = 0; i < n_elts
11930 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11931 {
11932 /* Check if the argument is split over registers and memory.
11933 This can only ever happen for long double or _Decimal128;
11934 complex types are handled via split_complex_arg. */
11935 machine_mode fmode = elt_mode;
11936 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11937 {
11938 gcc_assert (FLOAT128_2REG_P (fmode));
11939 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11940 }
11941
11942 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11943 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11944 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11945 }
11946
11947 /* If there were not enough FPRs to hold the argument, the rest
11948 usually goes into memory. However, if the current position
11949 is still within the register parameter area, a portion may
11950 actually have to go into GPRs.
11951
11952 Note that it may happen that the portion of the argument
11953 passed in the first "half" of the first GPR was already
11954 passed in the last FPR as well.
11955
11956 For unnamed arguments, we already set up GPRs to cover the
11957 whole argument in rs6000_psave_function_arg, so there is
11958 nothing further to do at this point. */
11959 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11960 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11961 && cum->nargs_prototype > 0)
11962 {
11963 static bool warned;
11964
11965 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11966 int n_words = rs6000_arg_size (mode, type);
11967
11968 align_words += fpr_words;
11969 n_words -= fpr_words;
11970
11971 do
11972 {
11973 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11974 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11975 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11976 }
11977 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11978
11979 if (!warned && warn_psabi)
11980 {
11981 warned = true;
11982 inform (input_location,
11983 "the ABI of passing homogeneous float aggregates"
11984 " has changed in GCC 5");
11985 }
11986 }
11987
11988 return rs6000_finish_function_arg (mode, rvec, k);
11989 }
11990 else if (align_words < GP_ARG_NUM_REG)
11991 {
11992 if (TARGET_32BIT && TARGET_POWERPC64)
11993 return rs6000_mixed_function_arg (mode, type, align_words);
11994
11995 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11996 }
11997 else
11998 return NULL_RTX;
11999 }
12000 }
12001 \f
12002 /* For an arg passed partly in registers and partly in memory, this is
12003 the number of bytes passed in registers. For args passed entirely in
12004 registers or entirely in memory, zero. When an arg is described by a
12005 PARALLEL, perhaps using more than one register type, this function
12006 returns the number of bytes used by the first element of the PARALLEL. */
12007
12008 static int
12009 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12010 tree type, bool named)
12011 {
12012 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12013 bool passed_in_gprs = true;
12014 int ret = 0;
12015 int align_words;
12016 machine_mode elt_mode;
12017 int n_elts;
12018
12019 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12020
12021 if (DEFAULT_ABI == ABI_V4)
12022 return 0;
12023
12024 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12025 {
12026 /* If we are passing this arg in the fixed parameter save area (gprs or
12027 memory) as well as VRs, we do not use the partial bytes mechanism;
12028 instead, rs6000_function_arg will return a PARALLEL including a memory
12029 element as necessary. Library support functions for IEEE 128-bit are
12030 assumed to not need the value passed both in GPRs and in vector
12031 registers. */
12032 if (TARGET_64BIT && !cum->prototype
12033 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12034 return 0;
12035
12036 /* Otherwise, we pass in VRs only. Check for partial copies. */
12037 passed_in_gprs = false;
12038 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12039 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12040 }
12041
12042 /* In this complicated case we just disable the partial_nregs code. */
12043 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12044 return 0;
12045
12046 align_words = rs6000_parm_start (mode, type, cum->words);
12047
12048 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12049 {
12050 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12051
12052 /* If we are passing this arg in the fixed parameter save area
12053 (gprs or memory) as well as FPRs, we do not use the partial
12054 bytes mechanism; instead, rs6000_function_arg will return a
12055 PARALLEL including a memory element as necessary. */
12056 if (type
12057 && (cum->nargs_prototype <= 0
12058 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12059 && TARGET_XL_COMPAT
12060 && align_words >= GP_ARG_NUM_REG)))
12061 return 0;
12062
12063 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12064 passed_in_gprs = false;
12065 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12066 {
12067 /* Compute number of bytes / words passed in FPRs. If there
12068 is still space available in the register parameter area
12069 *after* that amount, a part of the argument will be passed
12070 in GPRs. In that case, the total amount passed in any
12071 registers is equal to the amount that would have been passed
12072 in GPRs if everything were passed there, so we fall back to
12073 the GPR code below to compute the appropriate value. */
12074 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12075 * MIN (8, GET_MODE_SIZE (elt_mode)));
12076 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12077
12078 if (align_words + fpr_words < GP_ARG_NUM_REG)
12079 passed_in_gprs = true;
12080 else
12081 ret = fpr;
12082 }
12083 }
12084
12085 if (passed_in_gprs
12086 && align_words < GP_ARG_NUM_REG
12087 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12088 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12089
12090 if (ret != 0 && TARGET_DEBUG_ARG)
12091 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12092
12093 return ret;
12094 }
12095 \f
12096 /* A C expression that indicates when an argument must be passed by
12097 reference. If nonzero for an argument, a copy of that argument is
12098 made in memory and a pointer to the argument is passed instead of
12099 the argument itself. The pointer is passed in whatever way is
12100 appropriate for passing a pointer to that type.
12101
12102 Under V.4, aggregates and long double are passed by reference.
12103
12104 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12105 reference unless the AltiVec vector extension ABI is in force.
12106
12107 As an extension to all ABIs, variable sized types are passed by
12108 reference. */
12109
12110 static bool
12111 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12112 machine_mode mode, const_tree type,
12113 bool named ATTRIBUTE_UNUSED)
12114 {
12115 if (!type)
12116 return 0;
12117
12118 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12119 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12120 {
12121 if (TARGET_DEBUG_ARG)
12122 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12123 return 1;
12124 }
12125
12126 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12127 {
12128 if (TARGET_DEBUG_ARG)
12129 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12130 return 1;
12131 }
12132
12133 if (int_size_in_bytes (type) < 0)
12134 {
12135 if (TARGET_DEBUG_ARG)
12136 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12137 return 1;
12138 }
12139
12140 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12141 modes only exist for GCC vector types if -maltivec. */
12142 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12143 {
12144 if (TARGET_DEBUG_ARG)
12145 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12146 return 1;
12147 }
12148
12149 /* Pass synthetic vectors in memory. */
12150 if (TREE_CODE (type) == VECTOR_TYPE
12151 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12152 {
12153 static bool warned_for_pass_big_vectors = false;
12154 if (TARGET_DEBUG_ARG)
12155 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12156 if (!warned_for_pass_big_vectors)
12157 {
12158 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12159 "non-standard ABI extension with no compatibility "
12160 "guarantee");
12161 warned_for_pass_big_vectors = true;
12162 }
12163 return 1;
12164 }
12165
12166 return 0;
12167 }
12168
12169 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12170 already processes. Return true if the parameter must be passed
12171 (fully or partially) on the stack. */
12172
12173 static bool
12174 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12175 {
12176 machine_mode mode;
12177 int unsignedp;
12178 rtx entry_parm;
12179
12180 /* Catch errors. */
12181 if (type == NULL || type == error_mark_node)
12182 return true;
12183
12184 /* Handle types with no storage requirement. */
12185 if (TYPE_MODE (type) == VOIDmode)
12186 return false;
12187
12188 /* Handle complex types. */
12189 if (TREE_CODE (type) == COMPLEX_TYPE)
12190 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12191 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12192
12193 /* Handle transparent aggregates. */
12194 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12195 && TYPE_TRANSPARENT_AGGR (type))
12196 type = TREE_TYPE (first_field (type));
12197
12198 /* See if this arg was passed by invisible reference. */
12199 if (pass_by_reference (get_cumulative_args (args_so_far),
12200 TYPE_MODE (type), type, true))
12201 type = build_pointer_type (type);
12202
12203 /* Find mode as it is passed by the ABI. */
12204 unsignedp = TYPE_UNSIGNED (type);
12205 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12206
12207 /* If we must pass in stack, we need a stack. */
12208 if (rs6000_must_pass_in_stack (mode, type))
12209 return true;
12210
12211 /* If there is no incoming register, we need a stack. */
12212 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12213 if (entry_parm == NULL)
12214 return true;
12215
12216 /* Likewise if we need to pass both in registers and on the stack. */
12217 if (GET_CODE (entry_parm) == PARALLEL
12218 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12219 return true;
12220
12221 /* Also true if we're partially in registers and partially not. */
12222 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12223 return true;
12224
12225 /* Update info on where next arg arrives in registers. */
12226 rs6000_function_arg_advance (args_so_far, mode, type, true);
12227 return false;
12228 }
12229
12230 /* Return true if FUN has no prototype, has a variable argument
12231 list, or passes any parameter in memory. */
12232
12233 static bool
12234 rs6000_function_parms_need_stack (tree fun, bool incoming)
12235 {
12236 tree fntype, result;
12237 CUMULATIVE_ARGS args_so_far_v;
12238 cumulative_args_t args_so_far;
12239
12240 if (!fun)
12241 /* Must be a libcall, all of which only use reg parms. */
12242 return false;
12243
12244 fntype = fun;
12245 if (!TYPE_P (fun))
12246 fntype = TREE_TYPE (fun);
12247
12248 /* Varargs functions need the parameter save area. */
12249 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12250 return true;
12251
12252 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12253 args_so_far = pack_cumulative_args (&args_so_far_v);
12254
12255 /* When incoming, we will have been passed the function decl.
12256 It is necessary to use the decl to handle K&R style functions,
12257 where TYPE_ARG_TYPES may not be available. */
12258 if (incoming)
12259 {
12260 gcc_assert (DECL_P (fun));
12261 result = DECL_RESULT (fun);
12262 }
12263 else
12264 result = TREE_TYPE (fntype);
12265
12266 if (result && aggregate_value_p (result, fntype))
12267 {
12268 if (!TYPE_P (result))
12269 result = TREE_TYPE (result);
12270 result = build_pointer_type (result);
12271 rs6000_parm_needs_stack (args_so_far, result);
12272 }
12273
12274 if (incoming)
12275 {
12276 tree parm;
12277
12278 for (parm = DECL_ARGUMENTS (fun);
12279 parm && parm != void_list_node;
12280 parm = TREE_CHAIN (parm))
12281 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12282 return true;
12283 }
12284 else
12285 {
12286 function_args_iterator args_iter;
12287 tree arg_type;
12288
12289 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12290 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12291 return true;
12292 }
12293
12294 return false;
12295 }
12296
12297 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12298 usually a constant depending on the ABI. However, in the ELFv2 ABI
12299 the register parameter area is optional when calling a function that
12300 has a prototype is scope, has no variable argument list, and passes
12301 all parameters in registers. */
12302
12303 int
12304 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12305 {
12306 int reg_parm_stack_space;
12307
12308 switch (DEFAULT_ABI)
12309 {
12310 default:
12311 reg_parm_stack_space = 0;
12312 break;
12313
12314 case ABI_AIX:
12315 case ABI_DARWIN:
12316 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12317 break;
12318
12319 case ABI_ELFv2:
12320 /* ??? Recomputing this every time is a bit expensive. Is there
12321 a place to cache this information? */
12322 if (rs6000_function_parms_need_stack (fun, incoming))
12323 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12324 else
12325 reg_parm_stack_space = 0;
12326 break;
12327 }
12328
12329 return reg_parm_stack_space;
12330 }
12331
12332 static void
12333 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12334 {
12335 int i;
12336 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12337
12338 if (nregs == 0)
12339 return;
12340
12341 for (i = 0; i < nregs; i++)
12342 {
12343 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12344 if (reload_completed)
12345 {
12346 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12347 tem = NULL_RTX;
12348 else
12349 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12350 i * GET_MODE_SIZE (reg_mode));
12351 }
12352 else
12353 tem = replace_equiv_address (tem, XEXP (tem, 0));
12354
12355 gcc_assert (tem);
12356
12357 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12358 }
12359 }
12360 \f
12361 /* Perform any needed actions needed for a function that is receiving a
12362 variable number of arguments.
12363
12364 CUM is as above.
12365
12366 MODE and TYPE are the mode and type of the current parameter.
12367
12368 PRETEND_SIZE is a variable that should be set to the amount of stack
12369 that must be pushed by the prolog to pretend that our caller pushed
12370 it.
12371
12372 Normally, this macro will push all remaining incoming registers on the
12373 stack and set PRETEND_SIZE to the length of the registers pushed. */
12374
12375 static void
12376 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12377 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12378 int no_rtl)
12379 {
12380 CUMULATIVE_ARGS next_cum;
12381 int reg_size = TARGET_32BIT ? 4 : 8;
12382 rtx save_area = NULL_RTX, mem;
12383 int first_reg_offset;
12384 alias_set_type set;
12385
12386 /* Skip the last named argument. */
12387 next_cum = *get_cumulative_args (cum);
12388 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12389
12390 if (DEFAULT_ABI == ABI_V4)
12391 {
12392 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12393
12394 if (! no_rtl)
12395 {
12396 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12397 HOST_WIDE_INT offset = 0;
12398
12399 /* Try to optimize the size of the varargs save area.
12400 The ABI requires that ap.reg_save_area is doubleword
12401 aligned, but we don't need to allocate space for all
12402 the bytes, only those to which we actually will save
12403 anything. */
12404 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12405 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12406 if (TARGET_HARD_FLOAT
12407 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12408 && cfun->va_list_fpr_size)
12409 {
12410 if (gpr_reg_num)
12411 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12412 * UNITS_PER_FP_WORD;
12413 if (cfun->va_list_fpr_size
12414 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12415 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12416 else
12417 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12418 * UNITS_PER_FP_WORD;
12419 }
12420 if (gpr_reg_num)
12421 {
12422 offset = -((first_reg_offset * reg_size) & ~7);
12423 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12424 {
12425 gpr_reg_num = cfun->va_list_gpr_size;
12426 if (reg_size == 4 && (first_reg_offset & 1))
12427 gpr_reg_num++;
12428 }
12429 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12430 }
12431 else if (fpr_size)
12432 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12433 * UNITS_PER_FP_WORD
12434 - (int) (GP_ARG_NUM_REG * reg_size);
12435
12436 if (gpr_size + fpr_size)
12437 {
12438 rtx reg_save_area
12439 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12440 gcc_assert (GET_CODE (reg_save_area) == MEM);
12441 reg_save_area = XEXP (reg_save_area, 0);
12442 if (GET_CODE (reg_save_area) == PLUS)
12443 {
12444 gcc_assert (XEXP (reg_save_area, 0)
12445 == virtual_stack_vars_rtx);
12446 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12447 offset += INTVAL (XEXP (reg_save_area, 1));
12448 }
12449 else
12450 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12451 }
12452
12453 cfun->machine->varargs_save_offset = offset;
12454 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12455 }
12456 }
12457 else
12458 {
12459 first_reg_offset = next_cum.words;
12460 save_area = crtl->args.internal_arg_pointer;
12461
12462 if (targetm.calls.must_pass_in_stack (mode, type))
12463 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12464 }
12465
12466 set = get_varargs_alias_set ();
12467 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12468 && cfun->va_list_gpr_size)
12469 {
12470 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12471
12472 if (va_list_gpr_counter_field)
12473 /* V4 va_list_gpr_size counts number of registers needed. */
12474 n_gpr = cfun->va_list_gpr_size;
12475 else
12476 /* char * va_list instead counts number of bytes needed. */
12477 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12478
12479 if (nregs > n_gpr)
12480 nregs = n_gpr;
12481
12482 mem = gen_rtx_MEM (BLKmode,
12483 plus_constant (Pmode, save_area,
12484 first_reg_offset * reg_size));
12485 MEM_NOTRAP_P (mem) = 1;
12486 set_mem_alias_set (mem, set);
12487 set_mem_align (mem, BITS_PER_WORD);
12488
12489 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12490 nregs);
12491 }
12492
12493 /* Save FP registers if needed. */
12494 if (DEFAULT_ABI == ABI_V4
12495 && TARGET_HARD_FLOAT
12496 && ! no_rtl
12497 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12498 && cfun->va_list_fpr_size)
12499 {
12500 int fregno = next_cum.fregno, nregs;
12501 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12502 rtx lab = gen_label_rtx ();
12503 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12504 * UNITS_PER_FP_WORD);
12505
12506 emit_jump_insn
12507 (gen_rtx_SET (pc_rtx,
12508 gen_rtx_IF_THEN_ELSE (VOIDmode,
12509 gen_rtx_NE (VOIDmode, cr1,
12510 const0_rtx),
12511 gen_rtx_LABEL_REF (VOIDmode, lab),
12512 pc_rtx)));
12513
12514 for (nregs = 0;
12515 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12516 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12517 {
12518 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12519 plus_constant (Pmode, save_area, off));
12520 MEM_NOTRAP_P (mem) = 1;
12521 set_mem_alias_set (mem, set);
12522 set_mem_align (mem, GET_MODE_ALIGNMENT (
12523 TARGET_HARD_FLOAT ? DFmode : SFmode));
12524 emit_move_insn (mem, gen_rtx_REG (
12525 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12526 }
12527
12528 emit_label (lab);
12529 }
12530 }
12531
12532 /* Create the va_list data type. */
12533
12534 static tree
12535 rs6000_build_builtin_va_list (void)
12536 {
12537 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12538
12539 /* For AIX, prefer 'char *' because that's what the system
12540 header files like. */
12541 if (DEFAULT_ABI != ABI_V4)
12542 return build_pointer_type (char_type_node);
12543
12544 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12545 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12546 get_identifier ("__va_list_tag"), record);
12547
12548 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12549 unsigned_char_type_node);
12550 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12551 unsigned_char_type_node);
12552 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12553 every user file. */
12554 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12555 get_identifier ("reserved"), short_unsigned_type_node);
12556 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12557 get_identifier ("overflow_arg_area"),
12558 ptr_type_node);
12559 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12560 get_identifier ("reg_save_area"),
12561 ptr_type_node);
12562
12563 va_list_gpr_counter_field = f_gpr;
12564 va_list_fpr_counter_field = f_fpr;
12565
12566 DECL_FIELD_CONTEXT (f_gpr) = record;
12567 DECL_FIELD_CONTEXT (f_fpr) = record;
12568 DECL_FIELD_CONTEXT (f_res) = record;
12569 DECL_FIELD_CONTEXT (f_ovf) = record;
12570 DECL_FIELD_CONTEXT (f_sav) = record;
12571
12572 TYPE_STUB_DECL (record) = type_decl;
12573 TYPE_NAME (record) = type_decl;
12574 TYPE_FIELDS (record) = f_gpr;
12575 DECL_CHAIN (f_gpr) = f_fpr;
12576 DECL_CHAIN (f_fpr) = f_res;
12577 DECL_CHAIN (f_res) = f_ovf;
12578 DECL_CHAIN (f_ovf) = f_sav;
12579
12580 layout_type (record);
12581
12582 /* The correct type is an array type of one element. */
12583 return build_array_type (record, build_index_type (size_zero_node));
12584 }
12585
12586 /* Implement va_start. */
12587
12588 static void
12589 rs6000_va_start (tree valist, rtx nextarg)
12590 {
12591 HOST_WIDE_INT words, n_gpr, n_fpr;
12592 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12593 tree gpr, fpr, ovf, sav, t;
12594
12595 /* Only SVR4 needs something special. */
12596 if (DEFAULT_ABI != ABI_V4)
12597 {
12598 std_expand_builtin_va_start (valist, nextarg);
12599 return;
12600 }
12601
12602 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12603 f_fpr = DECL_CHAIN (f_gpr);
12604 f_res = DECL_CHAIN (f_fpr);
12605 f_ovf = DECL_CHAIN (f_res);
12606 f_sav = DECL_CHAIN (f_ovf);
12607
12608 valist = build_simple_mem_ref (valist);
12609 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12610 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12611 f_fpr, NULL_TREE);
12612 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12613 f_ovf, NULL_TREE);
12614 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12615 f_sav, NULL_TREE);
12616
12617 /* Count number of gp and fp argument registers used. */
12618 words = crtl->args.info.words;
12619 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12620 GP_ARG_NUM_REG);
12621 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12622 FP_ARG_NUM_REG);
12623
12624 if (TARGET_DEBUG_ARG)
12625 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12626 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12627 words, n_gpr, n_fpr);
12628
12629 if (cfun->va_list_gpr_size)
12630 {
12631 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12632 build_int_cst (NULL_TREE, n_gpr));
12633 TREE_SIDE_EFFECTS (t) = 1;
12634 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12635 }
12636
12637 if (cfun->va_list_fpr_size)
12638 {
12639 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12640 build_int_cst (NULL_TREE, n_fpr));
12641 TREE_SIDE_EFFECTS (t) = 1;
12642 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12643
12644 #ifdef HAVE_AS_GNU_ATTRIBUTE
12645 if (call_ABI_of_interest (cfun->decl))
12646 rs6000_passes_float = true;
12647 #endif
12648 }
12649
12650 /* Find the overflow area. */
12651 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12652 if (words != 0)
12653 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12654 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12655 TREE_SIDE_EFFECTS (t) = 1;
12656 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12657
12658 /* If there were no va_arg invocations, don't set up the register
12659 save area. */
12660 if (!cfun->va_list_gpr_size
12661 && !cfun->va_list_fpr_size
12662 && n_gpr < GP_ARG_NUM_REG
12663 && n_fpr < FP_ARG_V4_MAX_REG)
12664 return;
12665
12666 /* Find the register save area. */
12667 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12668 if (cfun->machine->varargs_save_offset)
12669 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12670 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12671 TREE_SIDE_EFFECTS (t) = 1;
12672 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12673 }
12674
12675 /* Implement va_arg. */
12676
12677 static tree
12678 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12679 gimple_seq *post_p)
12680 {
12681 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12682 tree gpr, fpr, ovf, sav, reg, t, u;
12683 int size, rsize, n_reg, sav_ofs, sav_scale;
12684 tree lab_false, lab_over, addr;
12685 int align;
12686 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12687 int regalign = 0;
12688 gimple *stmt;
12689
12690 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12691 {
12692 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12693 return build_va_arg_indirect_ref (t);
12694 }
12695
12696 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12697 earlier version of gcc, with the property that it always applied alignment
12698 adjustments to the va-args (even for zero-sized types). The cheapest way
12699 to deal with this is to replicate the effect of the part of
12700 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12701 of relevance.
12702 We don't need to check for pass-by-reference because of the test above.
12703 We can return a simplifed answer, since we know there's no offset to add. */
12704
12705 if (((TARGET_MACHO
12706 && rs6000_darwin64_abi)
12707 || DEFAULT_ABI == ABI_ELFv2
12708 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12709 && integer_zerop (TYPE_SIZE (type)))
12710 {
12711 unsigned HOST_WIDE_INT align, boundary;
12712 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12713 align = PARM_BOUNDARY / BITS_PER_UNIT;
12714 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12715 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12716 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12717 boundary /= BITS_PER_UNIT;
12718 if (boundary > align)
12719 {
12720 tree t ;
12721 /* This updates arg ptr by the amount that would be necessary
12722 to align the zero-sized (but not zero-alignment) item. */
12723 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12724 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12725 gimplify_and_add (t, pre_p);
12726
12727 t = fold_convert (sizetype, valist_tmp);
12728 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12729 fold_convert (TREE_TYPE (valist),
12730 fold_build2 (BIT_AND_EXPR, sizetype, t,
12731 size_int (-boundary))));
12732 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12733 gimplify_and_add (t, pre_p);
12734 }
12735 /* Since it is zero-sized there's no increment for the item itself. */
12736 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12737 return build_va_arg_indirect_ref (valist_tmp);
12738 }
12739
12740 if (DEFAULT_ABI != ABI_V4)
12741 {
12742 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12743 {
12744 tree elem_type = TREE_TYPE (type);
12745 machine_mode elem_mode = TYPE_MODE (elem_type);
12746 int elem_size = GET_MODE_SIZE (elem_mode);
12747
12748 if (elem_size < UNITS_PER_WORD)
12749 {
12750 tree real_part, imag_part;
12751 gimple_seq post = NULL;
12752
12753 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12754 &post);
12755 /* Copy the value into a temporary, lest the formal temporary
12756 be reused out from under us. */
12757 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12758 gimple_seq_add_seq (pre_p, post);
12759
12760 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12761 post_p);
12762
12763 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12764 }
12765 }
12766
12767 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12768 }
12769
12770 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12771 f_fpr = DECL_CHAIN (f_gpr);
12772 f_res = DECL_CHAIN (f_fpr);
12773 f_ovf = DECL_CHAIN (f_res);
12774 f_sav = DECL_CHAIN (f_ovf);
12775
12776 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12777 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12778 f_fpr, NULL_TREE);
12779 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12780 f_ovf, NULL_TREE);
12781 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12782 f_sav, NULL_TREE);
12783
12784 size = int_size_in_bytes (type);
12785 rsize = (size + 3) / 4;
12786 int pad = 4 * rsize - size;
12787 align = 1;
12788
12789 machine_mode mode = TYPE_MODE (type);
12790 if (abi_v4_pass_in_fpr (mode, false))
12791 {
12792 /* FP args go in FP registers, if present. */
12793 reg = fpr;
12794 n_reg = (size + 7) / 8;
12795 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12796 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12797 if (mode != SFmode && mode != SDmode)
12798 align = 8;
12799 }
12800 else
12801 {
12802 /* Otherwise into GP registers. */
12803 reg = gpr;
12804 n_reg = rsize;
12805 sav_ofs = 0;
12806 sav_scale = 4;
12807 if (n_reg == 2)
12808 align = 8;
12809 }
12810
12811 /* Pull the value out of the saved registers.... */
12812
12813 lab_over = NULL;
12814 addr = create_tmp_var (ptr_type_node, "addr");
12815
12816 /* AltiVec vectors never go in registers when -mabi=altivec. */
12817 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12818 align = 16;
12819 else
12820 {
12821 lab_false = create_artificial_label (input_location);
12822 lab_over = create_artificial_label (input_location);
12823
12824 /* Long long is aligned in the registers. As are any other 2 gpr
12825 item such as complex int due to a historical mistake. */
12826 u = reg;
12827 if (n_reg == 2 && reg == gpr)
12828 {
12829 regalign = 1;
12830 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12831 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12832 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12833 unshare_expr (reg), u);
12834 }
12835 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12836 reg number is 0 for f1, so we want to make it odd. */
12837 else if (reg == fpr && mode == TDmode)
12838 {
12839 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12840 build_int_cst (TREE_TYPE (reg), 1));
12841 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12842 }
12843
12844 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12845 t = build2 (GE_EXPR, boolean_type_node, u, t);
12846 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12847 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12848 gimplify_and_add (t, pre_p);
12849
12850 t = sav;
12851 if (sav_ofs)
12852 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12853
12854 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12855 build_int_cst (TREE_TYPE (reg), n_reg));
12856 u = fold_convert (sizetype, u);
12857 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12858 t = fold_build_pointer_plus (t, u);
12859
12860 /* _Decimal32 varargs are located in the second word of the 64-bit
12861 FP register for 32-bit binaries. */
12862 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12863 t = fold_build_pointer_plus_hwi (t, size);
12864
12865 /* Args are passed right-aligned. */
12866 if (BYTES_BIG_ENDIAN)
12867 t = fold_build_pointer_plus_hwi (t, pad);
12868
12869 gimplify_assign (addr, t, pre_p);
12870
12871 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12872
12873 stmt = gimple_build_label (lab_false);
12874 gimple_seq_add_stmt (pre_p, stmt);
12875
12876 if ((n_reg == 2 && !regalign) || n_reg > 2)
12877 {
12878 /* Ensure that we don't find any more args in regs.
12879 Alignment has taken care of for special cases. */
12880 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12881 }
12882 }
12883
12884 /* ... otherwise out of the overflow area. */
12885
12886 /* Care for on-stack alignment if needed. */
12887 t = ovf;
12888 if (align != 1)
12889 {
12890 t = fold_build_pointer_plus_hwi (t, align - 1);
12891 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12892 build_int_cst (TREE_TYPE (t), -align));
12893 }
12894
12895 /* Args are passed right-aligned. */
12896 if (BYTES_BIG_ENDIAN)
12897 t = fold_build_pointer_plus_hwi (t, pad);
12898
12899 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12900
12901 gimplify_assign (unshare_expr (addr), t, pre_p);
12902
12903 t = fold_build_pointer_plus_hwi (t, size);
12904 gimplify_assign (unshare_expr (ovf), t, pre_p);
12905
12906 if (lab_over)
12907 {
12908 stmt = gimple_build_label (lab_over);
12909 gimple_seq_add_stmt (pre_p, stmt);
12910 }
12911
12912 if (STRICT_ALIGNMENT
12913 && (TYPE_ALIGN (type)
12914 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12915 {
12916 /* The value (of type complex double, for example) may not be
12917 aligned in memory in the saved registers, so copy via a
12918 temporary. (This is the same code as used for SPARC.) */
12919 tree tmp = create_tmp_var (type, "va_arg_tmp");
12920 tree dest_addr = build_fold_addr_expr (tmp);
12921
12922 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12923 3, dest_addr, addr, size_int (rsize * 4));
12924 TREE_ADDRESSABLE (tmp) = 1;
12925
12926 gimplify_and_add (copy, pre_p);
12927 addr = dest_addr;
12928 }
12929
12930 addr = fold_convert (ptrtype, addr);
12931 return build_va_arg_indirect_ref (addr);
12932 }
12933
12934 /* Builtins. */
12935
12936 static void
12937 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12938 {
12939 tree t;
12940 unsigned classify = rs6000_builtin_info[(int)code].attr;
12941 const char *attr_string = "";
12942
12943 gcc_assert (name != NULL);
12944 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12945
12946 if (rs6000_builtin_decls[(int)code])
12947 fatal_error (input_location,
12948 "internal error: builtin function %qs already processed",
12949 name);
12950
12951 rs6000_builtin_decls[(int)code] = t =
12952 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12953
12954 /* Set any special attributes. */
12955 if ((classify & RS6000_BTC_CONST) != 0)
12956 {
12957 /* const function, function only depends on the inputs. */
12958 TREE_READONLY (t) = 1;
12959 TREE_NOTHROW (t) = 1;
12960 attr_string = ", const";
12961 }
12962 else if ((classify & RS6000_BTC_PURE) != 0)
12963 {
12964 /* pure function, function can read global memory, but does not set any
12965 external state. */
12966 DECL_PURE_P (t) = 1;
12967 TREE_NOTHROW (t) = 1;
12968 attr_string = ", pure";
12969 }
12970 else if ((classify & RS6000_BTC_FP) != 0)
12971 {
12972 /* Function is a math function. If rounding mode is on, then treat the
12973 function as not reading global memory, but it can have arbitrary side
12974 effects. If it is off, then assume the function is a const function.
12975 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12976 builtin-attribute.def that is used for the math functions. */
12977 TREE_NOTHROW (t) = 1;
12978 if (flag_rounding_math)
12979 {
12980 DECL_PURE_P (t) = 1;
12981 DECL_IS_NOVOPS (t) = 1;
12982 attr_string = ", fp, pure";
12983 }
12984 else
12985 {
12986 TREE_READONLY (t) = 1;
12987 attr_string = ", fp, const";
12988 }
12989 }
12990 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12991 gcc_unreachable ();
12992
12993 if (TARGET_DEBUG_BUILTIN)
12994 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12995 (int)code, name, attr_string);
12996 }
12997
12998 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12999
13000 #undef RS6000_BUILTIN_0
13001 #undef RS6000_BUILTIN_1
13002 #undef RS6000_BUILTIN_2
13003 #undef RS6000_BUILTIN_3
13004 #undef RS6000_BUILTIN_A
13005 #undef RS6000_BUILTIN_D
13006 #undef RS6000_BUILTIN_H
13007 #undef RS6000_BUILTIN_P
13008 #undef RS6000_BUILTIN_X
13009
13010 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13011 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13012 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13013 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13014 { MASK, ICODE, NAME, ENUM },
13015
13016 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13017 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13018 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13019 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13020 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13021
13022 static const struct builtin_description bdesc_3arg[] =
13023 {
13024 #include "rs6000-builtin.def"
13025 };
13026
13027 /* DST operations: void foo (void *, const int, const char). */
13028
13029 #undef RS6000_BUILTIN_0
13030 #undef RS6000_BUILTIN_1
13031 #undef RS6000_BUILTIN_2
13032 #undef RS6000_BUILTIN_3
13033 #undef RS6000_BUILTIN_A
13034 #undef RS6000_BUILTIN_D
13035 #undef RS6000_BUILTIN_H
13036 #undef RS6000_BUILTIN_P
13037 #undef RS6000_BUILTIN_X
13038
13039 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13040 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13041 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13042 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13043 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13044 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13045 { MASK, ICODE, NAME, ENUM },
13046
13047 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13048 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13049 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13050
13051 static const struct builtin_description bdesc_dst[] =
13052 {
13053 #include "rs6000-builtin.def"
13054 };
13055
13056 /* Simple binary operations: VECc = foo (VECa, VECb). */
13057
13058 #undef RS6000_BUILTIN_0
13059 #undef RS6000_BUILTIN_1
13060 #undef RS6000_BUILTIN_2
13061 #undef RS6000_BUILTIN_3
13062 #undef RS6000_BUILTIN_A
13063 #undef RS6000_BUILTIN_D
13064 #undef RS6000_BUILTIN_H
13065 #undef RS6000_BUILTIN_P
13066 #undef RS6000_BUILTIN_X
13067
13068 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13069 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13070 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13071 { MASK, ICODE, NAME, ENUM },
13072
13073 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13074 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13075 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13076 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13077 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13078 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13079
13080 static const struct builtin_description bdesc_2arg[] =
13081 {
13082 #include "rs6000-builtin.def"
13083 };
13084
13085 #undef RS6000_BUILTIN_0
13086 #undef RS6000_BUILTIN_1
13087 #undef RS6000_BUILTIN_2
13088 #undef RS6000_BUILTIN_3
13089 #undef RS6000_BUILTIN_A
13090 #undef RS6000_BUILTIN_D
13091 #undef RS6000_BUILTIN_H
13092 #undef RS6000_BUILTIN_P
13093 #undef RS6000_BUILTIN_X
13094
13095 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13096 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13097 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13098 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13099 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13100 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13101 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13102 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13103 { MASK, ICODE, NAME, ENUM },
13104
13105 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13106
13107 /* AltiVec predicates. */
13108
13109 static const struct builtin_description bdesc_altivec_preds[] =
13110 {
13111 #include "rs6000-builtin.def"
13112 };
13113
13114 /* ABS* operations. */
13115
13116 #undef RS6000_BUILTIN_0
13117 #undef RS6000_BUILTIN_1
13118 #undef RS6000_BUILTIN_2
13119 #undef RS6000_BUILTIN_3
13120 #undef RS6000_BUILTIN_A
13121 #undef RS6000_BUILTIN_D
13122 #undef RS6000_BUILTIN_H
13123 #undef RS6000_BUILTIN_P
13124 #undef RS6000_BUILTIN_X
13125
13126 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13127 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13128 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13129 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13130 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13131 { MASK, ICODE, NAME, ENUM },
13132
13133 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13134 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13135 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13136 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13137
13138 static const struct builtin_description bdesc_abs[] =
13139 {
13140 #include "rs6000-builtin.def"
13141 };
13142
13143 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13144 foo (VECa). */
13145
13146 #undef RS6000_BUILTIN_0
13147 #undef RS6000_BUILTIN_1
13148 #undef RS6000_BUILTIN_2
13149 #undef RS6000_BUILTIN_3
13150 #undef RS6000_BUILTIN_A
13151 #undef RS6000_BUILTIN_D
13152 #undef RS6000_BUILTIN_H
13153 #undef RS6000_BUILTIN_P
13154 #undef RS6000_BUILTIN_X
13155
13156 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13157 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13158 { MASK, ICODE, NAME, ENUM },
13159
13160 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13161 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13162 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13163 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13164 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13165 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13166 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13167
13168 static const struct builtin_description bdesc_1arg[] =
13169 {
13170 #include "rs6000-builtin.def"
13171 };
13172
13173 /* Simple no-argument operations: result = __builtin_darn_32 () */
13174
13175 #undef RS6000_BUILTIN_0
13176 #undef RS6000_BUILTIN_1
13177 #undef RS6000_BUILTIN_2
13178 #undef RS6000_BUILTIN_3
13179 #undef RS6000_BUILTIN_A
13180 #undef RS6000_BUILTIN_D
13181 #undef RS6000_BUILTIN_H
13182 #undef RS6000_BUILTIN_P
13183 #undef RS6000_BUILTIN_X
13184
13185 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13186 { MASK, ICODE, NAME, ENUM },
13187
13188 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13189 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13190 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13191 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13192 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13193 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13194 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13195 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13196
13197 static const struct builtin_description bdesc_0arg[] =
13198 {
13199 #include "rs6000-builtin.def"
13200 };
13201
13202 /* HTM builtins. */
13203 #undef RS6000_BUILTIN_0
13204 #undef RS6000_BUILTIN_1
13205 #undef RS6000_BUILTIN_2
13206 #undef RS6000_BUILTIN_3
13207 #undef RS6000_BUILTIN_A
13208 #undef RS6000_BUILTIN_D
13209 #undef RS6000_BUILTIN_H
13210 #undef RS6000_BUILTIN_P
13211 #undef RS6000_BUILTIN_X
13212
13213 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13214 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13215 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13216 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13217 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13218 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13219 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13220 { MASK, ICODE, NAME, ENUM },
13221
13222 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13223 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13224
13225 static const struct builtin_description bdesc_htm[] =
13226 {
13227 #include "rs6000-builtin.def"
13228 };
13229
13230 #undef RS6000_BUILTIN_0
13231 #undef RS6000_BUILTIN_1
13232 #undef RS6000_BUILTIN_2
13233 #undef RS6000_BUILTIN_3
13234 #undef RS6000_BUILTIN_A
13235 #undef RS6000_BUILTIN_D
13236 #undef RS6000_BUILTIN_H
13237 #undef RS6000_BUILTIN_P
13238
13239 /* Return true if a builtin function is overloaded. */
13240 bool
13241 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13242 {
13243 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13244 }
13245
13246 const char *
13247 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13248 {
13249 return rs6000_builtin_info[(int)fncode].name;
13250 }
13251
13252 /* Expand an expression EXP that calls a builtin without arguments. */
13253 static rtx
13254 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13255 {
13256 rtx pat;
13257 machine_mode tmode = insn_data[icode].operand[0].mode;
13258
13259 if (icode == CODE_FOR_nothing)
13260 /* Builtin not supported on this processor. */
13261 return 0;
13262
13263 if (target == 0
13264 || GET_MODE (target) != tmode
13265 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13266 target = gen_reg_rtx (tmode);
13267
13268 pat = GEN_FCN (icode) (target);
13269 if (! pat)
13270 return 0;
13271 emit_insn (pat);
13272
13273 return target;
13274 }
13275
13276
13277 static rtx
13278 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13279 {
13280 rtx pat;
13281 tree arg0 = CALL_EXPR_ARG (exp, 0);
13282 tree arg1 = CALL_EXPR_ARG (exp, 1);
13283 rtx op0 = expand_normal (arg0);
13284 rtx op1 = expand_normal (arg1);
13285 machine_mode mode0 = insn_data[icode].operand[0].mode;
13286 machine_mode mode1 = insn_data[icode].operand[1].mode;
13287
13288 if (icode == CODE_FOR_nothing)
13289 /* Builtin not supported on this processor. */
13290 return 0;
13291
13292 /* If we got invalid arguments bail out before generating bad rtl. */
13293 if (arg0 == error_mark_node || arg1 == error_mark_node)
13294 return const0_rtx;
13295
13296 if (GET_CODE (op0) != CONST_INT
13297 || INTVAL (op0) > 255
13298 || INTVAL (op0) < 0)
13299 {
13300 error ("argument 1 must be an 8-bit field value");
13301 return const0_rtx;
13302 }
13303
13304 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13305 op0 = copy_to_mode_reg (mode0, op0);
13306
13307 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13308 op1 = copy_to_mode_reg (mode1, op1);
13309
13310 pat = GEN_FCN (icode) (op0, op1);
13311 if (! pat)
13312 return const0_rtx;
13313 emit_insn (pat);
13314
13315 return NULL_RTX;
13316 }
13317
13318 static rtx
13319 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13320 {
13321 rtx pat;
13322 tree arg0 = CALL_EXPR_ARG (exp, 0);
13323 rtx op0 = expand_normal (arg0);
13324 machine_mode tmode = insn_data[icode].operand[0].mode;
13325 machine_mode mode0 = insn_data[icode].operand[1].mode;
13326
13327 if (icode == CODE_FOR_nothing)
13328 /* Builtin not supported on this processor. */
13329 return 0;
13330
13331 /* If we got invalid arguments bail out before generating bad rtl. */
13332 if (arg0 == error_mark_node)
13333 return const0_rtx;
13334
13335 if (icode == CODE_FOR_altivec_vspltisb
13336 || icode == CODE_FOR_altivec_vspltish
13337 || icode == CODE_FOR_altivec_vspltisw)
13338 {
13339 /* Only allow 5-bit *signed* literals. */
13340 if (GET_CODE (op0) != CONST_INT
13341 || INTVAL (op0) > 15
13342 || INTVAL (op0) < -16)
13343 {
13344 error ("argument 1 must be a 5-bit signed literal");
13345 return CONST0_RTX (tmode);
13346 }
13347 }
13348
13349 if (target == 0
13350 || GET_MODE (target) != tmode
13351 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13352 target = gen_reg_rtx (tmode);
13353
13354 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13355 op0 = copy_to_mode_reg (mode0, op0);
13356
13357 pat = GEN_FCN (icode) (target, op0);
13358 if (! pat)
13359 return 0;
13360 emit_insn (pat);
13361
13362 return target;
13363 }
13364
13365 static rtx
13366 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13367 {
13368 rtx pat, scratch1, scratch2;
13369 tree arg0 = CALL_EXPR_ARG (exp, 0);
13370 rtx op0 = expand_normal (arg0);
13371 machine_mode tmode = insn_data[icode].operand[0].mode;
13372 machine_mode mode0 = insn_data[icode].operand[1].mode;
13373
13374 /* If we have invalid arguments, bail out before generating bad rtl. */
13375 if (arg0 == error_mark_node)
13376 return const0_rtx;
13377
13378 if (target == 0
13379 || GET_MODE (target) != tmode
13380 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13381 target = gen_reg_rtx (tmode);
13382
13383 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13384 op0 = copy_to_mode_reg (mode0, op0);
13385
13386 scratch1 = gen_reg_rtx (mode0);
13387 scratch2 = gen_reg_rtx (mode0);
13388
13389 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13390 if (! pat)
13391 return 0;
13392 emit_insn (pat);
13393
13394 return target;
13395 }
13396
13397 static rtx
13398 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13399 {
13400 rtx pat;
13401 tree arg0 = CALL_EXPR_ARG (exp, 0);
13402 tree arg1 = CALL_EXPR_ARG (exp, 1);
13403 rtx op0 = expand_normal (arg0);
13404 rtx op1 = expand_normal (arg1);
13405 machine_mode tmode = insn_data[icode].operand[0].mode;
13406 machine_mode mode0 = insn_data[icode].operand[1].mode;
13407 machine_mode mode1 = insn_data[icode].operand[2].mode;
13408
13409 if (icode == CODE_FOR_nothing)
13410 /* Builtin not supported on this processor. */
13411 return 0;
13412
13413 /* If we got invalid arguments bail out before generating bad rtl. */
13414 if (arg0 == error_mark_node || arg1 == error_mark_node)
13415 return const0_rtx;
13416
13417 if (icode == CODE_FOR_unpackv1ti
13418 || icode == CODE_FOR_unpackkf
13419 || icode == CODE_FOR_unpacktf
13420 || icode == CODE_FOR_unpackif
13421 || icode == CODE_FOR_unpacktd)
13422 {
13423 /* Only allow 1-bit unsigned literals. */
13424 STRIP_NOPS (arg1);
13425 if (TREE_CODE (arg1) != INTEGER_CST
13426 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13427 {
13428 error ("argument 2 must be a 1-bit unsigned literal");
13429 return CONST0_RTX (tmode);
13430 }
13431 }
13432 else if (icode == CODE_FOR_altivec_vspltw)
13433 {
13434 /* Only allow 2-bit unsigned literals. */
13435 STRIP_NOPS (arg1);
13436 if (TREE_CODE (arg1) != INTEGER_CST
13437 || TREE_INT_CST_LOW (arg1) & ~3)
13438 {
13439 error ("argument 2 must be a 2-bit unsigned literal");
13440 return CONST0_RTX (tmode);
13441 }
13442 }
13443 else if (icode == CODE_FOR_altivec_vsplth)
13444 {
13445 /* Only allow 3-bit unsigned literals. */
13446 STRIP_NOPS (arg1);
13447 if (TREE_CODE (arg1) != INTEGER_CST
13448 || TREE_INT_CST_LOW (arg1) & ~7)
13449 {
13450 error ("argument 2 must be a 3-bit unsigned literal");
13451 return CONST0_RTX (tmode);
13452 }
13453 }
13454 else if (icode == CODE_FOR_altivec_vspltb)
13455 {
13456 /* Only allow 4-bit unsigned literals. */
13457 STRIP_NOPS (arg1);
13458 if (TREE_CODE (arg1) != INTEGER_CST
13459 || TREE_INT_CST_LOW (arg1) & ~15)
13460 {
13461 error ("argument 2 must be a 4-bit unsigned literal");
13462 return CONST0_RTX (tmode);
13463 }
13464 }
13465 else if (icode == CODE_FOR_altivec_vcfux
13466 || icode == CODE_FOR_altivec_vcfsx
13467 || icode == CODE_FOR_altivec_vctsxs
13468 || icode == CODE_FOR_altivec_vctuxs)
13469 {
13470 /* Only allow 5-bit unsigned literals. */
13471 STRIP_NOPS (arg1);
13472 if (TREE_CODE (arg1) != INTEGER_CST
13473 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13474 {
13475 error ("argument 2 must be a 5-bit unsigned literal");
13476 return CONST0_RTX (tmode);
13477 }
13478 }
13479 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13480 || icode == CODE_FOR_dfptstsfi_lt_dd
13481 || icode == CODE_FOR_dfptstsfi_gt_dd
13482 || icode == CODE_FOR_dfptstsfi_unordered_dd
13483 || icode == CODE_FOR_dfptstsfi_eq_td
13484 || icode == CODE_FOR_dfptstsfi_lt_td
13485 || icode == CODE_FOR_dfptstsfi_gt_td
13486 || icode == CODE_FOR_dfptstsfi_unordered_td)
13487 {
13488 /* Only allow 6-bit unsigned literals. */
13489 STRIP_NOPS (arg0);
13490 if (TREE_CODE (arg0) != INTEGER_CST
13491 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13492 {
13493 error ("argument 1 must be a 6-bit unsigned literal");
13494 return CONST0_RTX (tmode);
13495 }
13496 }
13497 else if (icode == CODE_FOR_xststdcqp_kf
13498 || icode == CODE_FOR_xststdcqp_tf
13499 || icode == CODE_FOR_xststdcdp
13500 || icode == CODE_FOR_xststdcsp
13501 || icode == CODE_FOR_xvtstdcdp
13502 || icode == CODE_FOR_xvtstdcsp)
13503 {
13504 /* Only allow 7-bit unsigned literals. */
13505 STRIP_NOPS (arg1);
13506 if (TREE_CODE (arg1) != INTEGER_CST
13507 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13508 {
13509 error ("argument 2 must be a 7-bit unsigned literal");
13510 return CONST0_RTX (tmode);
13511 }
13512 }
13513
13514 if (target == 0
13515 || GET_MODE (target) != tmode
13516 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13517 target = gen_reg_rtx (tmode);
13518
13519 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13520 op0 = copy_to_mode_reg (mode0, op0);
13521 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13522 op1 = copy_to_mode_reg (mode1, op1);
13523
13524 pat = GEN_FCN (icode) (target, op0, op1);
13525 if (! pat)
13526 return 0;
13527 emit_insn (pat);
13528
13529 return target;
13530 }
13531
13532 static rtx
13533 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13534 {
13535 rtx pat, scratch;
13536 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13537 tree arg0 = CALL_EXPR_ARG (exp, 1);
13538 tree arg1 = CALL_EXPR_ARG (exp, 2);
13539 rtx op0 = expand_normal (arg0);
13540 rtx op1 = expand_normal (arg1);
13541 machine_mode tmode = SImode;
13542 machine_mode mode0 = insn_data[icode].operand[1].mode;
13543 machine_mode mode1 = insn_data[icode].operand[2].mode;
13544 int cr6_form_int;
13545
13546 if (TREE_CODE (cr6_form) != INTEGER_CST)
13547 {
13548 error ("argument 1 of %qs must be a constant",
13549 "__builtin_altivec_predicate");
13550 return const0_rtx;
13551 }
13552 else
13553 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13554
13555 gcc_assert (mode0 == mode1);
13556
13557 /* If we have invalid arguments, bail out before generating bad rtl. */
13558 if (arg0 == error_mark_node || arg1 == error_mark_node)
13559 return const0_rtx;
13560
13561 if (target == 0
13562 || GET_MODE (target) != tmode
13563 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13564 target = gen_reg_rtx (tmode);
13565
13566 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13567 op0 = copy_to_mode_reg (mode0, op0);
13568 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13569 op1 = copy_to_mode_reg (mode1, op1);
13570
13571 /* Note that for many of the relevant operations (e.g. cmpne or
13572 cmpeq) with float or double operands, it makes more sense for the
13573 mode of the allocated scratch register to select a vector of
13574 integer. But the choice to copy the mode of operand 0 was made
13575 long ago and there are no plans to change it. */
13576 scratch = gen_reg_rtx (mode0);
13577
13578 pat = GEN_FCN (icode) (scratch, op0, op1);
13579 if (! pat)
13580 return 0;
13581 emit_insn (pat);
13582
13583 /* The vec_any* and vec_all* predicates use the same opcodes for two
13584 different operations, but the bits in CR6 will be different
13585 depending on what information we want. So we have to play tricks
13586 with CR6 to get the right bits out.
13587
13588 If you think this is disgusting, look at the specs for the
13589 AltiVec predicates. */
13590
13591 switch (cr6_form_int)
13592 {
13593 case 0:
13594 emit_insn (gen_cr6_test_for_zero (target));
13595 break;
13596 case 1:
13597 emit_insn (gen_cr6_test_for_zero_reverse (target));
13598 break;
13599 case 2:
13600 emit_insn (gen_cr6_test_for_lt (target));
13601 break;
13602 case 3:
13603 emit_insn (gen_cr6_test_for_lt_reverse (target));
13604 break;
13605 default:
13606 error ("argument 1 of %qs is out of range",
13607 "__builtin_altivec_predicate");
13608 break;
13609 }
13610
13611 return target;
13612 }
13613
13614 rtx
13615 swap_endian_selector_for_mode (machine_mode mode)
13616 {
13617 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13618 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13619 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13620 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13621
13622 unsigned int *swaparray, i;
13623 rtx perm[16];
13624
13625 switch (mode)
13626 {
13627 case E_V1TImode:
13628 swaparray = swap1;
13629 break;
13630 case E_V2DFmode:
13631 case E_V2DImode:
13632 swaparray = swap2;
13633 break;
13634 case E_V4SFmode:
13635 case E_V4SImode:
13636 swaparray = swap4;
13637 break;
13638 case E_V8HImode:
13639 swaparray = swap8;
13640 break;
13641 default:
13642 gcc_unreachable ();
13643 }
13644
13645 for (i = 0; i < 16; ++i)
13646 perm[i] = GEN_INT (swaparray[i]);
13647
13648 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13649 gen_rtvec_v (16, perm)));
13650 }
13651
13652 static rtx
13653 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13654 {
13655 rtx pat, addr;
13656 tree arg0 = CALL_EXPR_ARG (exp, 0);
13657 tree arg1 = CALL_EXPR_ARG (exp, 1);
13658 machine_mode tmode = insn_data[icode].operand[0].mode;
13659 machine_mode mode0 = Pmode;
13660 machine_mode mode1 = Pmode;
13661 rtx op0 = expand_normal (arg0);
13662 rtx op1 = expand_normal (arg1);
13663
13664 if (icode == CODE_FOR_nothing)
13665 /* Builtin not supported on this processor. */
13666 return 0;
13667
13668 /* If we got invalid arguments bail out before generating bad rtl. */
13669 if (arg0 == error_mark_node || arg1 == error_mark_node)
13670 return const0_rtx;
13671
13672 if (target == 0
13673 || GET_MODE (target) != tmode
13674 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13675 target = gen_reg_rtx (tmode);
13676
13677 op1 = copy_to_mode_reg (mode1, op1);
13678
13679 /* For LVX, express the RTL accurately by ANDing the address with -16.
13680 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13681 so the raw address is fine. */
13682 if (icode == CODE_FOR_altivec_lvx_v1ti
13683 || icode == CODE_FOR_altivec_lvx_v2df
13684 || icode == CODE_FOR_altivec_lvx_v2di
13685 || icode == CODE_FOR_altivec_lvx_v4sf
13686 || icode == CODE_FOR_altivec_lvx_v4si
13687 || icode == CODE_FOR_altivec_lvx_v8hi
13688 || icode == CODE_FOR_altivec_lvx_v16qi)
13689 {
13690 rtx rawaddr;
13691 if (op0 == const0_rtx)
13692 rawaddr = op1;
13693 else
13694 {
13695 op0 = copy_to_mode_reg (mode0, op0);
13696 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13697 }
13698 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13699 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13700
13701 emit_insn (gen_rtx_SET (target, addr));
13702 }
13703 else
13704 {
13705 if (op0 == const0_rtx)
13706 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13707 else
13708 {
13709 op0 = copy_to_mode_reg (mode0, op0);
13710 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13711 gen_rtx_PLUS (Pmode, op1, op0));
13712 }
13713
13714 pat = GEN_FCN (icode) (target, addr);
13715 if (! pat)
13716 return 0;
13717 emit_insn (pat);
13718 }
13719
13720 return target;
13721 }
13722
13723 static rtx
13724 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13725 {
13726 rtx pat;
13727 tree arg0 = CALL_EXPR_ARG (exp, 0);
13728 tree arg1 = CALL_EXPR_ARG (exp, 1);
13729 tree arg2 = CALL_EXPR_ARG (exp, 2);
13730 rtx op0 = expand_normal (arg0);
13731 rtx op1 = expand_normal (arg1);
13732 rtx op2 = expand_normal (arg2);
13733 machine_mode mode0 = insn_data[icode].operand[0].mode;
13734 machine_mode mode1 = insn_data[icode].operand[1].mode;
13735 machine_mode mode2 = insn_data[icode].operand[2].mode;
13736
13737 if (icode == CODE_FOR_nothing)
13738 /* Builtin not supported on this processor. */
13739 return NULL_RTX;
13740
13741 /* If we got invalid arguments bail out before generating bad rtl. */
13742 if (arg0 == error_mark_node
13743 || arg1 == error_mark_node
13744 || arg2 == error_mark_node)
13745 return NULL_RTX;
13746
13747 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13748 op0 = copy_to_mode_reg (mode0, op0);
13749 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13750 op1 = copy_to_mode_reg (mode1, op1);
13751 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13752 op2 = copy_to_mode_reg (mode2, op2);
13753
13754 pat = GEN_FCN (icode) (op0, op1, op2);
13755 if (pat)
13756 emit_insn (pat);
13757
13758 return NULL_RTX;
13759 }
13760
13761 static rtx
13762 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13763 {
13764 tree arg0 = CALL_EXPR_ARG (exp, 0);
13765 tree arg1 = CALL_EXPR_ARG (exp, 1);
13766 tree arg2 = CALL_EXPR_ARG (exp, 2);
13767 rtx op0 = expand_normal (arg0);
13768 rtx op1 = expand_normal (arg1);
13769 rtx op2 = expand_normal (arg2);
13770 rtx pat, addr, rawaddr;
13771 machine_mode tmode = insn_data[icode].operand[0].mode;
13772 machine_mode smode = insn_data[icode].operand[1].mode;
13773 machine_mode mode1 = Pmode;
13774 machine_mode mode2 = Pmode;
13775
13776 /* Invalid arguments. Bail before doing anything stoopid! */
13777 if (arg0 == error_mark_node
13778 || arg1 == error_mark_node
13779 || arg2 == error_mark_node)
13780 return const0_rtx;
13781
13782 op2 = copy_to_mode_reg (mode2, op2);
13783
13784 /* For STVX, express the RTL accurately by ANDing the address with -16.
13785 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13786 so the raw address is fine. */
13787 if (icode == CODE_FOR_altivec_stvx_v2df
13788 || icode == CODE_FOR_altivec_stvx_v2di
13789 || icode == CODE_FOR_altivec_stvx_v4sf
13790 || icode == CODE_FOR_altivec_stvx_v4si
13791 || icode == CODE_FOR_altivec_stvx_v8hi
13792 || icode == CODE_FOR_altivec_stvx_v16qi)
13793 {
13794 if (op1 == const0_rtx)
13795 rawaddr = op2;
13796 else
13797 {
13798 op1 = copy_to_mode_reg (mode1, op1);
13799 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13800 }
13801
13802 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13803 addr = gen_rtx_MEM (tmode, addr);
13804
13805 op0 = copy_to_mode_reg (tmode, op0);
13806
13807 emit_insn (gen_rtx_SET (addr, op0));
13808 }
13809 else
13810 {
13811 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13812 op0 = copy_to_mode_reg (smode, op0);
13813
13814 if (op1 == const0_rtx)
13815 addr = gen_rtx_MEM (tmode, op2);
13816 else
13817 {
13818 op1 = copy_to_mode_reg (mode1, op1);
13819 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13820 }
13821
13822 pat = GEN_FCN (icode) (addr, op0);
13823 if (pat)
13824 emit_insn (pat);
13825 }
13826
13827 return NULL_RTX;
13828 }
13829
13830 /* Return the appropriate SPR number associated with the given builtin. */
13831 static inline HOST_WIDE_INT
13832 htm_spr_num (enum rs6000_builtins code)
13833 {
13834 if (code == HTM_BUILTIN_GET_TFHAR
13835 || code == HTM_BUILTIN_SET_TFHAR)
13836 return TFHAR_SPR;
13837 else if (code == HTM_BUILTIN_GET_TFIAR
13838 || code == HTM_BUILTIN_SET_TFIAR)
13839 return TFIAR_SPR;
13840 else if (code == HTM_BUILTIN_GET_TEXASR
13841 || code == HTM_BUILTIN_SET_TEXASR)
13842 return TEXASR_SPR;
13843 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13844 || code == HTM_BUILTIN_SET_TEXASRU);
13845 return TEXASRU_SPR;
13846 }
13847
13848 /* Return the appropriate SPR regno associated with the given builtin. */
13849 static inline HOST_WIDE_INT
13850 htm_spr_regno (enum rs6000_builtins code)
13851 {
13852 if (code == HTM_BUILTIN_GET_TFHAR
13853 || code == HTM_BUILTIN_SET_TFHAR)
13854 return TFHAR_REGNO;
13855 else if (code == HTM_BUILTIN_GET_TFIAR
13856 || code == HTM_BUILTIN_SET_TFIAR)
13857 return TFIAR_REGNO;
13858 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
13859 || code == HTM_BUILTIN_SET_TEXASR
13860 || code == HTM_BUILTIN_GET_TEXASRU
13861 || code == HTM_BUILTIN_SET_TEXASRU);
13862 return TEXASR_REGNO;
13863 }
13864
13865 /* Return the correct ICODE value depending on whether we are
13866 setting or reading the HTM SPRs. */
13867 static inline enum insn_code
13868 rs6000_htm_spr_icode (bool nonvoid)
13869 {
13870 if (nonvoid)
13871 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13872 else
13873 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13874 }
13875
13876 /* Expand the HTM builtin in EXP and store the result in TARGET.
13877 Store true in *EXPANDEDP if we found a builtin to expand. */
13878 static rtx
13879 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13880 {
13881 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13882 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13883 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13884 const struct builtin_description *d;
13885 size_t i;
13886
13887 *expandedp = true;
13888
13889 if (!TARGET_POWERPC64
13890 && (fcode == HTM_BUILTIN_TABORTDC
13891 || fcode == HTM_BUILTIN_TABORTDCI))
13892 {
13893 size_t uns_fcode = (size_t)fcode;
13894 const char *name = rs6000_builtin_info[uns_fcode].name;
13895 error ("builtin %qs is only valid in 64-bit mode", name);
13896 return const0_rtx;
13897 }
13898
13899 /* Expand the HTM builtins. */
13900 d = bdesc_htm;
13901 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13902 if (d->code == fcode)
13903 {
13904 rtx op[MAX_HTM_OPERANDS], pat;
13905 int nopnds = 0;
13906 tree arg;
13907 call_expr_arg_iterator iter;
13908 unsigned attr = rs6000_builtin_info[fcode].attr;
13909 enum insn_code icode = d->icode;
13910 const struct insn_operand_data *insn_op;
13911 bool uses_spr = (attr & RS6000_BTC_SPR);
13912 rtx cr = NULL_RTX;
13913
13914 if (uses_spr)
13915 icode = rs6000_htm_spr_icode (nonvoid);
13916 insn_op = &insn_data[icode].operand[0];
13917
13918 if (nonvoid)
13919 {
13920 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
13921 if (!target
13922 || GET_MODE (target) != tmode
13923 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13924 target = gen_reg_rtx (tmode);
13925 if (uses_spr)
13926 op[nopnds++] = target;
13927 }
13928
13929 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13930 {
13931 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13932 return const0_rtx;
13933
13934 insn_op = &insn_data[icode].operand[nopnds];
13935
13936 op[nopnds] = expand_normal (arg);
13937
13938 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
13939 {
13940 if (!strcmp (insn_op->constraint, "n"))
13941 {
13942 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
13943 if (!CONST_INT_P (op[nopnds]))
13944 error ("argument %d must be an unsigned literal", arg_num);
13945 else
13946 error ("argument %d is an unsigned literal that is "
13947 "out of range", arg_num);
13948 return const0_rtx;
13949 }
13950 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
13951 }
13952
13953 nopnds++;
13954 }
13955
13956 /* Handle the builtins for extended mnemonics. These accept
13957 no arguments, but map to builtins that take arguments. */
13958 switch (fcode)
13959 {
13960 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
13961 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
13962 op[nopnds++] = GEN_INT (1);
13963 if (flag_checking)
13964 attr |= RS6000_BTC_UNARY;
13965 break;
13966 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
13967 op[nopnds++] = GEN_INT (0);
13968 if (flag_checking)
13969 attr |= RS6000_BTC_UNARY;
13970 break;
13971 default:
13972 break;
13973 }
13974
13975 /* If this builtin accesses SPRs, then pass in the appropriate
13976 SPR number and SPR regno as the last two operands. */
13977 if (uses_spr)
13978 {
13979 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
13980 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
13981 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
13982 }
13983 /* If this builtin accesses a CR, then pass in a scratch
13984 CR as the last operand. */
13985 else if (attr & RS6000_BTC_CR)
13986 { cr = gen_reg_rtx (CCmode);
13987 op[nopnds++] = cr;
13988 }
13989
13990 if (flag_checking)
13991 {
13992 int expected_nopnds = 0;
13993 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
13994 expected_nopnds = 1;
13995 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
13996 expected_nopnds = 2;
13997 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
13998 expected_nopnds = 3;
13999 if (!(attr & RS6000_BTC_VOID))
14000 expected_nopnds += 1;
14001 if (uses_spr)
14002 expected_nopnds += 2;
14003
14004 gcc_assert (nopnds == expected_nopnds
14005 && nopnds <= MAX_HTM_OPERANDS);
14006 }
14007
14008 switch (nopnds)
14009 {
14010 case 1:
14011 pat = GEN_FCN (icode) (op[0]);
14012 break;
14013 case 2:
14014 pat = GEN_FCN (icode) (op[0], op[1]);
14015 break;
14016 case 3:
14017 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14018 break;
14019 case 4:
14020 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14021 break;
14022 default:
14023 gcc_unreachable ();
14024 }
14025 if (!pat)
14026 return NULL_RTX;
14027 emit_insn (pat);
14028
14029 if (attr & RS6000_BTC_CR)
14030 {
14031 if (fcode == HTM_BUILTIN_TBEGIN)
14032 {
14033 /* Emit code to set TARGET to true or false depending on
14034 whether the tbegin. instruction successfully or failed
14035 to start a transaction. We do this by placing the 1's
14036 complement of CR's EQ bit into TARGET. */
14037 rtx scratch = gen_reg_rtx (SImode);
14038 emit_insn (gen_rtx_SET (scratch,
14039 gen_rtx_EQ (SImode, cr,
14040 const0_rtx)));
14041 emit_insn (gen_rtx_SET (target,
14042 gen_rtx_XOR (SImode, scratch,
14043 GEN_INT (1))));
14044 }
14045 else
14046 {
14047 /* Emit code to copy the 4-bit condition register field
14048 CR into the least significant end of register TARGET. */
14049 rtx scratch1 = gen_reg_rtx (SImode);
14050 rtx scratch2 = gen_reg_rtx (SImode);
14051 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14052 emit_insn (gen_movcc (subreg, cr));
14053 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14054 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14055 }
14056 }
14057
14058 if (nonvoid)
14059 return target;
14060 return const0_rtx;
14061 }
14062
14063 *expandedp = false;
14064 return NULL_RTX;
14065 }
14066
14067 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14068
14069 static rtx
14070 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14071 rtx target)
14072 {
14073 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14074 if (fcode == RS6000_BUILTIN_CPU_INIT)
14075 return const0_rtx;
14076
14077 if (target == 0 || GET_MODE (target) != SImode)
14078 target = gen_reg_rtx (SImode);
14079
14080 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14081 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14082 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14083 to a STRING_CST. */
14084 if (TREE_CODE (arg) == ARRAY_REF
14085 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14086 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14087 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14088 arg = TREE_OPERAND (arg, 0);
14089
14090 if (TREE_CODE (arg) != STRING_CST)
14091 {
14092 error ("builtin %qs only accepts a string argument",
14093 rs6000_builtin_info[(size_t) fcode].name);
14094 return const0_rtx;
14095 }
14096
14097 if (fcode == RS6000_BUILTIN_CPU_IS)
14098 {
14099 const char *cpu = TREE_STRING_POINTER (arg);
14100 rtx cpuid = NULL_RTX;
14101 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14102 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14103 {
14104 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14105 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14106 break;
14107 }
14108 if (cpuid == NULL_RTX)
14109 {
14110 /* Invalid CPU argument. */
14111 error ("cpu %qs is an invalid argument to builtin %qs",
14112 cpu, rs6000_builtin_info[(size_t) fcode].name);
14113 return const0_rtx;
14114 }
14115
14116 rtx platform = gen_reg_rtx (SImode);
14117 rtx tcbmem = gen_const_mem (SImode,
14118 gen_rtx_PLUS (Pmode,
14119 gen_rtx_REG (Pmode, TLS_REGNUM),
14120 GEN_INT (TCB_PLATFORM_OFFSET)));
14121 emit_move_insn (platform, tcbmem);
14122 emit_insn (gen_eqsi3 (target, platform, cpuid));
14123 }
14124 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14125 {
14126 const char *hwcap = TREE_STRING_POINTER (arg);
14127 rtx mask = NULL_RTX;
14128 int hwcap_offset;
14129 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14130 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14131 {
14132 mask = GEN_INT (cpu_supports_info[i].mask);
14133 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14134 break;
14135 }
14136 if (mask == NULL_RTX)
14137 {
14138 /* Invalid HWCAP argument. */
14139 error ("%s %qs is an invalid argument to builtin %qs",
14140 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14141 return const0_rtx;
14142 }
14143
14144 rtx tcb_hwcap = gen_reg_rtx (SImode);
14145 rtx tcbmem = gen_const_mem (SImode,
14146 gen_rtx_PLUS (Pmode,
14147 gen_rtx_REG (Pmode, TLS_REGNUM),
14148 GEN_INT (hwcap_offset)));
14149 emit_move_insn (tcb_hwcap, tcbmem);
14150 rtx scratch1 = gen_reg_rtx (SImode);
14151 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14152 rtx scratch2 = gen_reg_rtx (SImode);
14153 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14154 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14155 }
14156 else
14157 gcc_unreachable ();
14158
14159 /* Record that we have expanded a CPU builtin, so that we can later
14160 emit a reference to the special symbol exported by LIBC to ensure we
14161 do not link against an old LIBC that doesn't support this feature. */
14162 cpu_builtin_p = true;
14163
14164 #else
14165 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14166 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14167
14168 /* For old LIBCs, always return FALSE. */
14169 emit_move_insn (target, GEN_INT (0));
14170 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14171
14172 return target;
14173 }
14174
14175 static rtx
14176 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14177 {
14178 rtx pat;
14179 tree arg0 = CALL_EXPR_ARG (exp, 0);
14180 tree arg1 = CALL_EXPR_ARG (exp, 1);
14181 tree arg2 = CALL_EXPR_ARG (exp, 2);
14182 rtx op0 = expand_normal (arg0);
14183 rtx op1 = expand_normal (arg1);
14184 rtx op2 = expand_normal (arg2);
14185 machine_mode tmode = insn_data[icode].operand[0].mode;
14186 machine_mode mode0 = insn_data[icode].operand[1].mode;
14187 machine_mode mode1 = insn_data[icode].operand[2].mode;
14188 machine_mode mode2 = insn_data[icode].operand[3].mode;
14189
14190 if (icode == CODE_FOR_nothing)
14191 /* Builtin not supported on this processor. */
14192 return 0;
14193
14194 /* If we got invalid arguments bail out before generating bad rtl. */
14195 if (arg0 == error_mark_node
14196 || arg1 == error_mark_node
14197 || arg2 == error_mark_node)
14198 return const0_rtx;
14199
14200 /* Check and prepare argument depending on the instruction code.
14201
14202 Note that a switch statement instead of the sequence of tests
14203 would be incorrect as many of the CODE_FOR values could be
14204 CODE_FOR_nothing and that would yield multiple alternatives
14205 with identical values. We'd never reach here at runtime in
14206 this case. */
14207 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14208 || icode == CODE_FOR_altivec_vsldoi_v2df
14209 || icode == CODE_FOR_altivec_vsldoi_v4si
14210 || icode == CODE_FOR_altivec_vsldoi_v8hi
14211 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14212 {
14213 /* Only allow 4-bit unsigned literals. */
14214 STRIP_NOPS (arg2);
14215 if (TREE_CODE (arg2) != INTEGER_CST
14216 || TREE_INT_CST_LOW (arg2) & ~0xf)
14217 {
14218 error ("argument 3 must be a 4-bit unsigned literal");
14219 return CONST0_RTX (tmode);
14220 }
14221 }
14222 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14223 || icode == CODE_FOR_vsx_xxpermdi_v2di
14224 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14225 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14226 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14227 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14228 || icode == CODE_FOR_vsx_xxpermdi_v4si
14229 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14230 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14231 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14232 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14233 || icode == CODE_FOR_vsx_xxsldwi_v4si
14234 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14235 || icode == CODE_FOR_vsx_xxsldwi_v2di
14236 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14237 {
14238 /* Only allow 2-bit unsigned literals. */
14239 STRIP_NOPS (arg2);
14240 if (TREE_CODE (arg2) != INTEGER_CST
14241 || TREE_INT_CST_LOW (arg2) & ~0x3)
14242 {
14243 error ("argument 3 must be a 2-bit unsigned literal");
14244 return CONST0_RTX (tmode);
14245 }
14246 }
14247 else if (icode == CODE_FOR_vsx_set_v2df
14248 || icode == CODE_FOR_vsx_set_v2di
14249 || icode == CODE_FOR_bcdadd
14250 || icode == CODE_FOR_bcdadd_lt
14251 || icode == CODE_FOR_bcdadd_eq
14252 || icode == CODE_FOR_bcdadd_gt
14253 || icode == CODE_FOR_bcdsub
14254 || icode == CODE_FOR_bcdsub_lt
14255 || icode == CODE_FOR_bcdsub_eq
14256 || icode == CODE_FOR_bcdsub_gt)
14257 {
14258 /* Only allow 1-bit unsigned literals. */
14259 STRIP_NOPS (arg2);
14260 if (TREE_CODE (arg2) != INTEGER_CST
14261 || TREE_INT_CST_LOW (arg2) & ~0x1)
14262 {
14263 error ("argument 3 must be a 1-bit unsigned literal");
14264 return CONST0_RTX (tmode);
14265 }
14266 }
14267 else if (icode == CODE_FOR_dfp_ddedpd_dd
14268 || icode == CODE_FOR_dfp_ddedpd_td)
14269 {
14270 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14271 STRIP_NOPS (arg0);
14272 if (TREE_CODE (arg0) != INTEGER_CST
14273 || TREE_INT_CST_LOW (arg2) & ~0x3)
14274 {
14275 error ("argument 1 must be 0 or 2");
14276 return CONST0_RTX (tmode);
14277 }
14278 }
14279 else if (icode == CODE_FOR_dfp_denbcd_dd
14280 || icode == CODE_FOR_dfp_denbcd_td)
14281 {
14282 /* Only allow 1-bit unsigned literals. */
14283 STRIP_NOPS (arg0);
14284 if (TREE_CODE (arg0) != INTEGER_CST
14285 || TREE_INT_CST_LOW (arg0) & ~0x1)
14286 {
14287 error ("argument 1 must be a 1-bit unsigned literal");
14288 return CONST0_RTX (tmode);
14289 }
14290 }
14291 else if (icode == CODE_FOR_dfp_dscli_dd
14292 || icode == CODE_FOR_dfp_dscli_td
14293 || icode == CODE_FOR_dfp_dscri_dd
14294 || icode == CODE_FOR_dfp_dscri_td)
14295 {
14296 /* Only allow 6-bit unsigned literals. */
14297 STRIP_NOPS (arg1);
14298 if (TREE_CODE (arg1) != INTEGER_CST
14299 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14300 {
14301 error ("argument 2 must be a 6-bit unsigned literal");
14302 return CONST0_RTX (tmode);
14303 }
14304 }
14305 else if (icode == CODE_FOR_crypto_vshasigmaw
14306 || icode == CODE_FOR_crypto_vshasigmad)
14307 {
14308 /* Check whether the 2nd and 3rd arguments are integer constants and in
14309 range and prepare arguments. */
14310 STRIP_NOPS (arg1);
14311 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14312 {
14313 error ("argument 2 must be 0 or 1");
14314 return CONST0_RTX (tmode);
14315 }
14316
14317 STRIP_NOPS (arg2);
14318 if (TREE_CODE (arg2) != INTEGER_CST
14319 || wi::geu_p (wi::to_wide (arg2), 16))
14320 {
14321 error ("argument 3 must be in the range 0..15");
14322 return CONST0_RTX (tmode);
14323 }
14324 }
14325
14326 if (target == 0
14327 || GET_MODE (target) != tmode
14328 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14329 target = gen_reg_rtx (tmode);
14330
14331 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14332 op0 = copy_to_mode_reg (mode0, op0);
14333 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14334 op1 = copy_to_mode_reg (mode1, op1);
14335 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14336 op2 = copy_to_mode_reg (mode2, op2);
14337
14338 pat = GEN_FCN (icode) (target, op0, op1, op2);
14339 if (! pat)
14340 return 0;
14341 emit_insn (pat);
14342
14343 return target;
14344 }
14345
14346
14347 /* Expand the dst builtins. */
14348 static rtx
14349 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14350 bool *expandedp)
14351 {
14352 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14353 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14354 tree arg0, arg1, arg2;
14355 machine_mode mode0, mode1;
14356 rtx pat, op0, op1, op2;
14357 const struct builtin_description *d;
14358 size_t i;
14359
14360 *expandedp = false;
14361
14362 /* Handle DST variants. */
14363 d = bdesc_dst;
14364 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14365 if (d->code == fcode)
14366 {
14367 arg0 = CALL_EXPR_ARG (exp, 0);
14368 arg1 = CALL_EXPR_ARG (exp, 1);
14369 arg2 = CALL_EXPR_ARG (exp, 2);
14370 op0 = expand_normal (arg0);
14371 op1 = expand_normal (arg1);
14372 op2 = expand_normal (arg2);
14373 mode0 = insn_data[d->icode].operand[0].mode;
14374 mode1 = insn_data[d->icode].operand[1].mode;
14375
14376 /* Invalid arguments, bail out before generating bad rtl. */
14377 if (arg0 == error_mark_node
14378 || arg1 == error_mark_node
14379 || arg2 == error_mark_node)
14380 return const0_rtx;
14381
14382 *expandedp = true;
14383 STRIP_NOPS (arg2);
14384 if (TREE_CODE (arg2) != INTEGER_CST
14385 || TREE_INT_CST_LOW (arg2) & ~0x3)
14386 {
14387 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14388 return const0_rtx;
14389 }
14390
14391 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14392 op0 = copy_to_mode_reg (Pmode, op0);
14393 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14394 op1 = copy_to_mode_reg (mode1, op1);
14395
14396 pat = GEN_FCN (d->icode) (op0, op1, op2);
14397 if (pat != 0)
14398 emit_insn (pat);
14399
14400 return NULL_RTX;
14401 }
14402
14403 return NULL_RTX;
14404 }
14405
14406 /* Expand vec_init builtin. */
14407 static rtx
14408 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14409 {
14410 machine_mode tmode = TYPE_MODE (type);
14411 machine_mode inner_mode = GET_MODE_INNER (tmode);
14412 int i, n_elt = GET_MODE_NUNITS (tmode);
14413
14414 gcc_assert (VECTOR_MODE_P (tmode));
14415 gcc_assert (n_elt == call_expr_nargs (exp));
14416
14417 if (!target || !register_operand (target, tmode))
14418 target = gen_reg_rtx (tmode);
14419
14420 /* If we have a vector compromised of a single element, such as V1TImode, do
14421 the initialization directly. */
14422 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14423 {
14424 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14425 emit_move_insn (target, gen_lowpart (tmode, x));
14426 }
14427 else
14428 {
14429 rtvec v = rtvec_alloc (n_elt);
14430
14431 for (i = 0; i < n_elt; ++i)
14432 {
14433 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14434 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14435 }
14436
14437 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14438 }
14439
14440 return target;
14441 }
14442
14443 /* Return the integer constant in ARG. Constrain it to be in the range
14444 of the subparts of VEC_TYPE; issue an error if not. */
14445
14446 static int
14447 get_element_number (tree vec_type, tree arg)
14448 {
14449 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14450
14451 if (!tree_fits_uhwi_p (arg)
14452 || (elt = tree_to_uhwi (arg), elt > max))
14453 {
14454 error ("selector must be an integer constant in the range 0..%wi", max);
14455 return 0;
14456 }
14457
14458 return elt;
14459 }
14460
14461 /* Expand vec_set builtin. */
14462 static rtx
14463 altivec_expand_vec_set_builtin (tree exp)
14464 {
14465 machine_mode tmode, mode1;
14466 tree arg0, arg1, arg2;
14467 int elt;
14468 rtx op0, op1;
14469
14470 arg0 = CALL_EXPR_ARG (exp, 0);
14471 arg1 = CALL_EXPR_ARG (exp, 1);
14472 arg2 = CALL_EXPR_ARG (exp, 2);
14473
14474 tmode = TYPE_MODE (TREE_TYPE (arg0));
14475 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14476 gcc_assert (VECTOR_MODE_P (tmode));
14477
14478 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14479 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14480 elt = get_element_number (TREE_TYPE (arg0), arg2);
14481
14482 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14483 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14484
14485 op0 = force_reg (tmode, op0);
14486 op1 = force_reg (mode1, op1);
14487
14488 rs6000_expand_vector_set (op0, op1, elt);
14489
14490 return op0;
14491 }
14492
14493 /* Expand vec_ext builtin. */
14494 static rtx
14495 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14496 {
14497 machine_mode tmode, mode0;
14498 tree arg0, arg1;
14499 rtx op0;
14500 rtx op1;
14501
14502 arg0 = CALL_EXPR_ARG (exp, 0);
14503 arg1 = CALL_EXPR_ARG (exp, 1);
14504
14505 op0 = expand_normal (arg0);
14506 op1 = expand_normal (arg1);
14507
14508 /* Call get_element_number to validate arg1 if it is a constant. */
14509 if (TREE_CODE (arg1) == INTEGER_CST)
14510 (void) get_element_number (TREE_TYPE (arg0), arg1);
14511
14512 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14513 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14514 gcc_assert (VECTOR_MODE_P (mode0));
14515
14516 op0 = force_reg (mode0, op0);
14517
14518 if (optimize || !target || !register_operand (target, tmode))
14519 target = gen_reg_rtx (tmode);
14520
14521 rs6000_expand_vector_extract (target, op0, op1);
14522
14523 return target;
14524 }
14525
14526 /* Expand the builtin in EXP and store the result in TARGET. Store
14527 true in *EXPANDEDP if we found a builtin to expand. */
14528 static rtx
14529 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14530 {
14531 const struct builtin_description *d;
14532 size_t i;
14533 enum insn_code icode;
14534 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14535 tree arg0, arg1, arg2;
14536 rtx op0, pat;
14537 machine_mode tmode, mode0;
14538 enum rs6000_builtins fcode
14539 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14540
14541 if (rs6000_overloaded_builtin_p (fcode))
14542 {
14543 *expandedp = true;
14544 error ("unresolved overload for Altivec builtin %qF", fndecl);
14545
14546 /* Given it is invalid, just generate a normal call. */
14547 return expand_call (exp, target, false);
14548 }
14549
14550 target = altivec_expand_dst_builtin (exp, target, expandedp);
14551 if (*expandedp)
14552 return target;
14553
14554 *expandedp = true;
14555
14556 switch (fcode)
14557 {
14558 case ALTIVEC_BUILTIN_STVX_V2DF:
14559 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14560 case ALTIVEC_BUILTIN_STVX_V2DI:
14561 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14562 case ALTIVEC_BUILTIN_STVX_V4SF:
14563 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14564 case ALTIVEC_BUILTIN_STVX:
14565 case ALTIVEC_BUILTIN_STVX_V4SI:
14566 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14567 case ALTIVEC_BUILTIN_STVX_V8HI:
14568 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14569 case ALTIVEC_BUILTIN_STVX_V16QI:
14570 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14571 case ALTIVEC_BUILTIN_STVEBX:
14572 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14573 case ALTIVEC_BUILTIN_STVEHX:
14574 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14575 case ALTIVEC_BUILTIN_STVEWX:
14576 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14577 case ALTIVEC_BUILTIN_STVXL_V2DF:
14578 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14579 case ALTIVEC_BUILTIN_STVXL_V2DI:
14580 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14581 case ALTIVEC_BUILTIN_STVXL_V4SF:
14582 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14583 case ALTIVEC_BUILTIN_STVXL:
14584 case ALTIVEC_BUILTIN_STVXL_V4SI:
14585 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14586 case ALTIVEC_BUILTIN_STVXL_V8HI:
14587 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14588 case ALTIVEC_BUILTIN_STVXL_V16QI:
14589 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14590
14591 case ALTIVEC_BUILTIN_STVLX:
14592 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14593 case ALTIVEC_BUILTIN_STVLXL:
14594 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14595 case ALTIVEC_BUILTIN_STVRX:
14596 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14597 case ALTIVEC_BUILTIN_STVRXL:
14598 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14599
14600 case P9V_BUILTIN_STXVL:
14601 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14602
14603 case P9V_BUILTIN_XST_LEN_R:
14604 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14605
14606 case VSX_BUILTIN_STXVD2X_V1TI:
14607 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14608 case VSX_BUILTIN_STXVD2X_V2DF:
14609 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14610 case VSX_BUILTIN_STXVD2X_V2DI:
14611 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14612 case VSX_BUILTIN_STXVW4X_V4SF:
14613 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14614 case VSX_BUILTIN_STXVW4X_V4SI:
14615 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14616 case VSX_BUILTIN_STXVW4X_V8HI:
14617 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14618 case VSX_BUILTIN_STXVW4X_V16QI:
14619 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14620
14621 /* For the following on big endian, it's ok to use any appropriate
14622 unaligned-supporting store, so use a generic expander. For
14623 little-endian, the exact element-reversing instruction must
14624 be used. */
14625 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14626 {
14627 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14628 : CODE_FOR_vsx_st_elemrev_v1ti);
14629 return altivec_expand_stv_builtin (code, exp);
14630 }
14631 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14632 {
14633 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14634 : CODE_FOR_vsx_st_elemrev_v2df);
14635 return altivec_expand_stv_builtin (code, exp);
14636 }
14637 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14638 {
14639 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14640 : CODE_FOR_vsx_st_elemrev_v2di);
14641 return altivec_expand_stv_builtin (code, exp);
14642 }
14643 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14644 {
14645 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14646 : CODE_FOR_vsx_st_elemrev_v4sf);
14647 return altivec_expand_stv_builtin (code, exp);
14648 }
14649 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14650 {
14651 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14652 : CODE_FOR_vsx_st_elemrev_v4si);
14653 return altivec_expand_stv_builtin (code, exp);
14654 }
14655 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14656 {
14657 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14658 : CODE_FOR_vsx_st_elemrev_v8hi);
14659 return altivec_expand_stv_builtin (code, exp);
14660 }
14661 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14662 {
14663 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14664 : CODE_FOR_vsx_st_elemrev_v16qi);
14665 return altivec_expand_stv_builtin (code, exp);
14666 }
14667
14668 case ALTIVEC_BUILTIN_MFVSCR:
14669 icode = CODE_FOR_altivec_mfvscr;
14670 tmode = insn_data[icode].operand[0].mode;
14671
14672 if (target == 0
14673 || GET_MODE (target) != tmode
14674 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14675 target = gen_reg_rtx (tmode);
14676
14677 pat = GEN_FCN (icode) (target);
14678 if (! pat)
14679 return 0;
14680 emit_insn (pat);
14681 return target;
14682
14683 case ALTIVEC_BUILTIN_MTVSCR:
14684 icode = CODE_FOR_altivec_mtvscr;
14685 arg0 = CALL_EXPR_ARG (exp, 0);
14686 op0 = expand_normal (arg0);
14687 mode0 = insn_data[icode].operand[0].mode;
14688
14689 /* If we got invalid arguments bail out before generating bad rtl. */
14690 if (arg0 == error_mark_node)
14691 return const0_rtx;
14692
14693 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14694 op0 = copy_to_mode_reg (mode0, op0);
14695
14696 pat = GEN_FCN (icode) (op0);
14697 if (pat)
14698 emit_insn (pat);
14699 return NULL_RTX;
14700
14701 case ALTIVEC_BUILTIN_DSSALL:
14702 emit_insn (gen_altivec_dssall ());
14703 return NULL_RTX;
14704
14705 case ALTIVEC_BUILTIN_DSS:
14706 icode = CODE_FOR_altivec_dss;
14707 arg0 = CALL_EXPR_ARG (exp, 0);
14708 STRIP_NOPS (arg0);
14709 op0 = expand_normal (arg0);
14710 mode0 = insn_data[icode].operand[0].mode;
14711
14712 /* If we got invalid arguments bail out before generating bad rtl. */
14713 if (arg0 == error_mark_node)
14714 return const0_rtx;
14715
14716 if (TREE_CODE (arg0) != INTEGER_CST
14717 || TREE_INT_CST_LOW (arg0) & ~0x3)
14718 {
14719 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14720 return const0_rtx;
14721 }
14722
14723 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14724 op0 = copy_to_mode_reg (mode0, op0);
14725
14726 emit_insn (gen_altivec_dss (op0));
14727 return NULL_RTX;
14728
14729 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14730 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14731 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14732 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14733 case VSX_BUILTIN_VEC_INIT_V2DF:
14734 case VSX_BUILTIN_VEC_INIT_V2DI:
14735 case VSX_BUILTIN_VEC_INIT_V1TI:
14736 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14737
14738 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14739 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14740 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14741 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14742 case VSX_BUILTIN_VEC_SET_V2DF:
14743 case VSX_BUILTIN_VEC_SET_V2DI:
14744 case VSX_BUILTIN_VEC_SET_V1TI:
14745 return altivec_expand_vec_set_builtin (exp);
14746
14747 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14748 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14749 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14750 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14751 case VSX_BUILTIN_VEC_EXT_V2DF:
14752 case VSX_BUILTIN_VEC_EXT_V2DI:
14753 case VSX_BUILTIN_VEC_EXT_V1TI:
14754 return altivec_expand_vec_ext_builtin (exp, target);
14755
14756 case P9V_BUILTIN_VEC_EXTRACT4B:
14757 arg1 = CALL_EXPR_ARG (exp, 1);
14758 STRIP_NOPS (arg1);
14759
14760 /* Generate a normal call if it is invalid. */
14761 if (arg1 == error_mark_node)
14762 return expand_call (exp, target, false);
14763
14764 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14765 {
14766 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14767 return expand_call (exp, target, false);
14768 }
14769 break;
14770
14771 case P9V_BUILTIN_VEC_INSERT4B:
14772 arg2 = CALL_EXPR_ARG (exp, 2);
14773 STRIP_NOPS (arg2);
14774
14775 /* Generate a normal call if it is invalid. */
14776 if (arg2 == error_mark_node)
14777 return expand_call (exp, target, false);
14778
14779 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14780 {
14781 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
14782 return expand_call (exp, target, false);
14783 }
14784 break;
14785
14786 default:
14787 break;
14788 /* Fall through. */
14789 }
14790
14791 /* Expand abs* operations. */
14792 d = bdesc_abs;
14793 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14794 if (d->code == fcode)
14795 return altivec_expand_abs_builtin (d->icode, exp, target);
14796
14797 /* Expand the AltiVec predicates. */
14798 d = bdesc_altivec_preds;
14799 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14800 if (d->code == fcode)
14801 return altivec_expand_predicate_builtin (d->icode, exp, target);
14802
14803 /* LV* are funky. We initialized them differently. */
14804 switch (fcode)
14805 {
14806 case ALTIVEC_BUILTIN_LVSL:
14807 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14808 exp, target, false);
14809 case ALTIVEC_BUILTIN_LVSR:
14810 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14811 exp, target, false);
14812 case ALTIVEC_BUILTIN_LVEBX:
14813 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14814 exp, target, false);
14815 case ALTIVEC_BUILTIN_LVEHX:
14816 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14817 exp, target, false);
14818 case ALTIVEC_BUILTIN_LVEWX:
14819 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14820 exp, target, false);
14821 case ALTIVEC_BUILTIN_LVXL_V2DF:
14822 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14823 exp, target, false);
14824 case ALTIVEC_BUILTIN_LVXL_V2DI:
14825 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14826 exp, target, false);
14827 case ALTIVEC_BUILTIN_LVXL_V4SF:
14828 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14829 exp, target, false);
14830 case ALTIVEC_BUILTIN_LVXL:
14831 case ALTIVEC_BUILTIN_LVXL_V4SI:
14832 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14833 exp, target, false);
14834 case ALTIVEC_BUILTIN_LVXL_V8HI:
14835 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14836 exp, target, false);
14837 case ALTIVEC_BUILTIN_LVXL_V16QI:
14838 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14839 exp, target, false);
14840 case ALTIVEC_BUILTIN_LVX_V1TI:
14841 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
14842 exp, target, false);
14843 case ALTIVEC_BUILTIN_LVX_V2DF:
14844 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
14845 exp, target, false);
14846 case ALTIVEC_BUILTIN_LVX_V2DI:
14847 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
14848 exp, target, false);
14849 case ALTIVEC_BUILTIN_LVX_V4SF:
14850 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
14851 exp, target, false);
14852 case ALTIVEC_BUILTIN_LVX:
14853 case ALTIVEC_BUILTIN_LVX_V4SI:
14854 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
14855 exp, target, false);
14856 case ALTIVEC_BUILTIN_LVX_V8HI:
14857 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
14858 exp, target, false);
14859 case ALTIVEC_BUILTIN_LVX_V16QI:
14860 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
14861 exp, target, false);
14862 case ALTIVEC_BUILTIN_LVLX:
14863 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14864 exp, target, true);
14865 case ALTIVEC_BUILTIN_LVLXL:
14866 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14867 exp, target, true);
14868 case ALTIVEC_BUILTIN_LVRX:
14869 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14870 exp, target, true);
14871 case ALTIVEC_BUILTIN_LVRXL:
14872 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
14873 exp, target, true);
14874 case VSX_BUILTIN_LXVD2X_V1TI:
14875 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
14876 exp, target, false);
14877 case VSX_BUILTIN_LXVD2X_V2DF:
14878 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
14879 exp, target, false);
14880 case VSX_BUILTIN_LXVD2X_V2DI:
14881 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
14882 exp, target, false);
14883 case VSX_BUILTIN_LXVW4X_V4SF:
14884 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
14885 exp, target, false);
14886 case VSX_BUILTIN_LXVW4X_V4SI:
14887 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
14888 exp, target, false);
14889 case VSX_BUILTIN_LXVW4X_V8HI:
14890 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
14891 exp, target, false);
14892 case VSX_BUILTIN_LXVW4X_V16QI:
14893 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
14894 exp, target, false);
14895 /* For the following on big endian, it's ok to use any appropriate
14896 unaligned-supporting load, so use a generic expander. For
14897 little-endian, the exact element-reversing instruction must
14898 be used. */
14899 case VSX_BUILTIN_LD_ELEMREV_V2DF:
14900 {
14901 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
14902 : CODE_FOR_vsx_ld_elemrev_v2df);
14903 return altivec_expand_lv_builtin (code, exp, target, false);
14904 }
14905 case VSX_BUILTIN_LD_ELEMREV_V1TI:
14906 {
14907 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
14908 : CODE_FOR_vsx_ld_elemrev_v1ti);
14909 return altivec_expand_lv_builtin (code, exp, target, false);
14910 }
14911 case VSX_BUILTIN_LD_ELEMREV_V2DI:
14912 {
14913 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
14914 : CODE_FOR_vsx_ld_elemrev_v2di);
14915 return altivec_expand_lv_builtin (code, exp, target, false);
14916 }
14917 case VSX_BUILTIN_LD_ELEMREV_V4SF:
14918 {
14919 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
14920 : CODE_FOR_vsx_ld_elemrev_v4sf);
14921 return altivec_expand_lv_builtin (code, exp, target, false);
14922 }
14923 case VSX_BUILTIN_LD_ELEMREV_V4SI:
14924 {
14925 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
14926 : CODE_FOR_vsx_ld_elemrev_v4si);
14927 return altivec_expand_lv_builtin (code, exp, target, false);
14928 }
14929 case VSX_BUILTIN_LD_ELEMREV_V8HI:
14930 {
14931 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
14932 : CODE_FOR_vsx_ld_elemrev_v8hi);
14933 return altivec_expand_lv_builtin (code, exp, target, false);
14934 }
14935 case VSX_BUILTIN_LD_ELEMREV_V16QI:
14936 {
14937 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
14938 : CODE_FOR_vsx_ld_elemrev_v16qi);
14939 return altivec_expand_lv_builtin (code, exp, target, false);
14940 }
14941 break;
14942 default:
14943 break;
14944 /* Fall through. */
14945 }
14946
14947 *expandedp = false;
14948 return NULL_RTX;
14949 }
14950
14951 /* Check whether a builtin function is supported in this target
14952 configuration. */
14953 bool
14954 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
14955 {
14956 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
14957 if ((fnmask & rs6000_builtin_mask) != fnmask)
14958 return false;
14959 else
14960 return true;
14961 }
14962
14963 /* Raise an error message for a builtin function that is called without the
14964 appropriate target options being set. */
14965
14966 static void
14967 rs6000_invalid_builtin (enum rs6000_builtins fncode)
14968 {
14969 size_t uns_fncode = (size_t) fncode;
14970 const char *name = rs6000_builtin_info[uns_fncode].name;
14971 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
14972
14973 gcc_assert (name != NULL);
14974 if ((fnmask & RS6000_BTM_CELL) != 0)
14975 error ("builtin function %qs is only valid for the cell processor", name);
14976 else if ((fnmask & RS6000_BTM_VSX) != 0)
14977 error ("builtin function %qs requires the %qs option", name, "-mvsx");
14978 else if ((fnmask & RS6000_BTM_HTM) != 0)
14979 error ("builtin function %qs requires the %qs option", name, "-mhtm");
14980 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
14981 error ("builtin function %qs requires the %qs option", name, "-maltivec");
14982 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14983 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14984 error ("builtin function %qs requires the %qs and %qs options",
14985 name, "-mhard-dfp", "-mpower8-vector");
14986 else if ((fnmask & RS6000_BTM_DFP) != 0)
14987 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
14988 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
14989 error ("builtin function %qs requires the %qs option", name,
14990 "-mpower8-vector");
14991 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14992 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14993 error ("builtin function %qs requires the %qs and %qs options",
14994 name, "-mcpu=power9", "-m64");
14995 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
14996 error ("builtin function %qs requires the %qs option", name,
14997 "-mcpu=power9");
14998 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14999 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15000 error ("builtin function %qs requires the %qs and %qs options",
15001 name, "-mcpu=power9", "-m64");
15002 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15003 error ("builtin function %qs requires the %qs option", name,
15004 "-mcpu=power9");
15005 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15006 {
15007 if (!TARGET_HARD_FLOAT)
15008 error ("builtin function %qs requires the %qs option", name,
15009 "-mhard-float");
15010 else
15011 error ("builtin function %qs requires the %qs option", name,
15012 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15013 }
15014 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15015 error ("builtin function %qs requires the %qs option", name,
15016 "-mhard-float");
15017 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15018 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15019 name);
15020 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15021 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15022 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15023 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15024 error ("builtin function %qs requires the %qs (or newer), and "
15025 "%qs or %qs options",
15026 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15027 else
15028 error ("builtin function %qs is not supported with the current options",
15029 name);
15030 }
15031
15032 /* Target hook for early folding of built-ins, shamelessly stolen
15033 from ia64.c. */
15034
15035 static tree
15036 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15037 int n_args ATTRIBUTE_UNUSED,
15038 tree *args ATTRIBUTE_UNUSED,
15039 bool ignore ATTRIBUTE_UNUSED)
15040 {
15041 #ifdef SUBTARGET_FOLD_BUILTIN
15042 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15043 #else
15044 return NULL_TREE;
15045 #endif
15046 }
15047
15048 /* Helper function to sort out which built-ins may be valid without having
15049 a LHS. */
15050 static bool
15051 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15052 {
15053 switch (fn_code)
15054 {
15055 case ALTIVEC_BUILTIN_STVX_V16QI:
15056 case ALTIVEC_BUILTIN_STVX_V8HI:
15057 case ALTIVEC_BUILTIN_STVX_V4SI:
15058 case ALTIVEC_BUILTIN_STVX_V4SF:
15059 case ALTIVEC_BUILTIN_STVX_V2DI:
15060 case ALTIVEC_BUILTIN_STVX_V2DF:
15061 case VSX_BUILTIN_STXVW4X_V16QI:
15062 case VSX_BUILTIN_STXVW4X_V8HI:
15063 case VSX_BUILTIN_STXVW4X_V4SF:
15064 case VSX_BUILTIN_STXVW4X_V4SI:
15065 case VSX_BUILTIN_STXVD2X_V2DF:
15066 case VSX_BUILTIN_STXVD2X_V2DI:
15067 return true;
15068 default:
15069 return false;
15070 }
15071 }
15072
15073 /* Helper function to handle the gimple folding of a vector compare
15074 operation. This sets up true/false vectors, and uses the
15075 VEC_COND_EXPR operation.
15076 CODE indicates which comparison is to be made. (EQ, GT, ...).
15077 TYPE indicates the type of the result. */
15078 static tree
15079 fold_build_vec_cmp (tree_code code, tree type,
15080 tree arg0, tree arg1)
15081 {
15082 tree cmp_type = build_same_sized_truth_vector_type (type);
15083 tree zero_vec = build_zero_cst (type);
15084 tree minus_one_vec = build_minus_one_cst (type);
15085 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15086 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15087 }
15088
15089 /* Helper function to handle the in-between steps for the
15090 vector compare built-ins. */
15091 static void
15092 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15093 {
15094 tree arg0 = gimple_call_arg (stmt, 0);
15095 tree arg1 = gimple_call_arg (stmt, 1);
15096 tree lhs = gimple_call_lhs (stmt);
15097 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15098 gimple *g = gimple_build_assign (lhs, cmp);
15099 gimple_set_location (g, gimple_location (stmt));
15100 gsi_replace (gsi, g, true);
15101 }
15102
15103 /* Helper function to handle the vector merge[hl] built-ins. The
15104 implementation difference between h and l versions for this code are in
15105 the values used when building of the permute vector for high word versus
15106 low word merge. The variance is keyed off the use_high parameter. */
15107 static void
15108 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15109 {
15110 tree arg0 = gimple_call_arg (stmt, 0);
15111 tree arg1 = gimple_call_arg (stmt, 1);
15112 tree lhs = gimple_call_lhs (stmt);
15113 tree lhs_type = TREE_TYPE (lhs);
15114 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15115 int midpoint = n_elts / 2;
15116 int offset = 0;
15117
15118 if (use_high == 1)
15119 offset = midpoint;
15120
15121 /* The permute_type will match the lhs for integral types. For double and
15122 float types, the permute type needs to map to the V2 or V4 type that
15123 matches size. */
15124 tree permute_type;
15125 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs_type)))
15126 permute_type = lhs_type;
15127 else
15128 {
15129 if (types_compatible_p (TREE_TYPE (lhs_type),
15130 TREE_TYPE (V2DF_type_node)))
15131 permute_type = V2DI_type_node;
15132 else if (types_compatible_p (TREE_TYPE (lhs_type),
15133 TREE_TYPE (V4SF_type_node)))
15134 permute_type = V4SI_type_node;
15135 else
15136 gcc_unreachable ();
15137 }
15138 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15139
15140 for (int i = 0; i < midpoint; i++)
15141 {
15142 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15143 offset + i));
15144 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15145 offset + n_elts + i));
15146 }
15147
15148 tree permute = elts.build ();
15149
15150 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15151 gimple_set_location (g, gimple_location (stmt));
15152 gsi_replace (gsi, g, true);
15153 }
15154
15155 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15156 a constant, use rs6000_fold_builtin.) */
15157
15158 bool
15159 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15160 {
15161 gimple *stmt = gsi_stmt (*gsi);
15162 tree fndecl = gimple_call_fndecl (stmt);
15163 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15164 enum rs6000_builtins fn_code
15165 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15166 tree arg0, arg1, lhs, temp;
15167 gimple *g;
15168
15169 size_t uns_fncode = (size_t) fn_code;
15170 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15171 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15172 const char *fn_name2 = (icode != CODE_FOR_nothing)
15173 ? get_insn_name ((int) icode)
15174 : "nothing";
15175
15176 if (TARGET_DEBUG_BUILTIN)
15177 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15178 fn_code, fn_name1, fn_name2);
15179
15180 if (!rs6000_fold_gimple)
15181 return false;
15182
15183 /* Prevent gimple folding for code that does not have a LHS, unless it is
15184 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15185 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15186 return false;
15187
15188 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15189 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15190 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15191 if (!func_valid_p)
15192 return false;
15193
15194 switch (fn_code)
15195 {
15196 /* Flavors of vec_add. We deliberately don't expand
15197 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15198 TImode, resulting in much poorer code generation. */
15199 case ALTIVEC_BUILTIN_VADDUBM:
15200 case ALTIVEC_BUILTIN_VADDUHM:
15201 case ALTIVEC_BUILTIN_VADDUWM:
15202 case P8V_BUILTIN_VADDUDM:
15203 case ALTIVEC_BUILTIN_VADDFP:
15204 case VSX_BUILTIN_XVADDDP:
15205 arg0 = gimple_call_arg (stmt, 0);
15206 arg1 = gimple_call_arg (stmt, 1);
15207 lhs = gimple_call_lhs (stmt);
15208 g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
15209 gimple_set_location (g, gimple_location (stmt));
15210 gsi_replace (gsi, g, true);
15211 return true;
15212 /* Flavors of vec_sub. We deliberately don't expand
15213 P8V_BUILTIN_VSUBUQM. */
15214 case ALTIVEC_BUILTIN_VSUBUBM:
15215 case ALTIVEC_BUILTIN_VSUBUHM:
15216 case ALTIVEC_BUILTIN_VSUBUWM:
15217 case P8V_BUILTIN_VSUBUDM:
15218 case ALTIVEC_BUILTIN_VSUBFP:
15219 case VSX_BUILTIN_XVSUBDP:
15220 arg0 = gimple_call_arg (stmt, 0);
15221 arg1 = gimple_call_arg (stmt, 1);
15222 lhs = gimple_call_lhs (stmt);
15223 g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
15224 gimple_set_location (g, gimple_location (stmt));
15225 gsi_replace (gsi, g, true);
15226 return true;
15227 case VSX_BUILTIN_XVMULSP:
15228 case VSX_BUILTIN_XVMULDP:
15229 arg0 = gimple_call_arg (stmt, 0);
15230 arg1 = gimple_call_arg (stmt, 1);
15231 lhs = gimple_call_lhs (stmt);
15232 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15233 gimple_set_location (g, gimple_location (stmt));
15234 gsi_replace (gsi, g, true);
15235 return true;
15236 /* Even element flavors of vec_mul (signed). */
15237 case ALTIVEC_BUILTIN_VMULESB:
15238 case ALTIVEC_BUILTIN_VMULESH:
15239 case P8V_BUILTIN_VMULESW:
15240 /* Even element flavors of vec_mul (unsigned). */
15241 case ALTIVEC_BUILTIN_VMULEUB:
15242 case ALTIVEC_BUILTIN_VMULEUH:
15243 case P8V_BUILTIN_VMULEUW:
15244 arg0 = gimple_call_arg (stmt, 0);
15245 arg1 = gimple_call_arg (stmt, 1);
15246 lhs = gimple_call_lhs (stmt);
15247 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15248 gimple_set_location (g, gimple_location (stmt));
15249 gsi_replace (gsi, g, true);
15250 return true;
15251 /* Odd element flavors of vec_mul (signed). */
15252 case ALTIVEC_BUILTIN_VMULOSB:
15253 case ALTIVEC_BUILTIN_VMULOSH:
15254 case P8V_BUILTIN_VMULOSW:
15255 /* Odd element flavors of vec_mul (unsigned). */
15256 case ALTIVEC_BUILTIN_VMULOUB:
15257 case ALTIVEC_BUILTIN_VMULOUH:
15258 case P8V_BUILTIN_VMULOUW:
15259 arg0 = gimple_call_arg (stmt, 0);
15260 arg1 = gimple_call_arg (stmt, 1);
15261 lhs = gimple_call_lhs (stmt);
15262 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15263 gimple_set_location (g, gimple_location (stmt));
15264 gsi_replace (gsi, g, true);
15265 return true;
15266 /* Flavors of vec_div (Integer). */
15267 case VSX_BUILTIN_DIV_V2DI:
15268 case VSX_BUILTIN_UDIV_V2DI:
15269 arg0 = gimple_call_arg (stmt, 0);
15270 arg1 = gimple_call_arg (stmt, 1);
15271 lhs = gimple_call_lhs (stmt);
15272 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15273 gimple_set_location (g, gimple_location (stmt));
15274 gsi_replace (gsi, g, true);
15275 return true;
15276 /* Flavors of vec_div (Float). */
15277 case VSX_BUILTIN_XVDIVSP:
15278 case VSX_BUILTIN_XVDIVDP:
15279 arg0 = gimple_call_arg (stmt, 0);
15280 arg1 = gimple_call_arg (stmt, 1);
15281 lhs = gimple_call_lhs (stmt);
15282 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15283 gimple_set_location (g, gimple_location (stmt));
15284 gsi_replace (gsi, g, true);
15285 return true;
15286 /* Flavors of vec_and. */
15287 case ALTIVEC_BUILTIN_VAND:
15288 arg0 = gimple_call_arg (stmt, 0);
15289 arg1 = gimple_call_arg (stmt, 1);
15290 lhs = gimple_call_lhs (stmt);
15291 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15292 gimple_set_location (g, gimple_location (stmt));
15293 gsi_replace (gsi, g, true);
15294 return true;
15295 /* Flavors of vec_andc. */
15296 case ALTIVEC_BUILTIN_VANDC:
15297 arg0 = gimple_call_arg (stmt, 0);
15298 arg1 = gimple_call_arg (stmt, 1);
15299 lhs = gimple_call_lhs (stmt);
15300 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15301 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15302 gimple_set_location (g, gimple_location (stmt));
15303 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15304 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15305 gimple_set_location (g, gimple_location (stmt));
15306 gsi_replace (gsi, g, true);
15307 return true;
15308 /* Flavors of vec_nand. */
15309 case P8V_BUILTIN_VEC_NAND:
15310 case P8V_BUILTIN_NAND_V16QI:
15311 case P8V_BUILTIN_NAND_V8HI:
15312 case P8V_BUILTIN_NAND_V4SI:
15313 case P8V_BUILTIN_NAND_V4SF:
15314 case P8V_BUILTIN_NAND_V2DF:
15315 case P8V_BUILTIN_NAND_V2DI:
15316 arg0 = gimple_call_arg (stmt, 0);
15317 arg1 = gimple_call_arg (stmt, 1);
15318 lhs = gimple_call_lhs (stmt);
15319 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15320 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15321 gimple_set_location (g, gimple_location (stmt));
15322 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15323 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15324 gimple_set_location (g, gimple_location (stmt));
15325 gsi_replace (gsi, g, true);
15326 return true;
15327 /* Flavors of vec_or. */
15328 case ALTIVEC_BUILTIN_VOR:
15329 arg0 = gimple_call_arg (stmt, 0);
15330 arg1 = gimple_call_arg (stmt, 1);
15331 lhs = gimple_call_lhs (stmt);
15332 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15333 gimple_set_location (g, gimple_location (stmt));
15334 gsi_replace (gsi, g, true);
15335 return true;
15336 /* flavors of vec_orc. */
15337 case P8V_BUILTIN_ORC_V16QI:
15338 case P8V_BUILTIN_ORC_V8HI:
15339 case P8V_BUILTIN_ORC_V4SI:
15340 case P8V_BUILTIN_ORC_V4SF:
15341 case P8V_BUILTIN_ORC_V2DF:
15342 case P8V_BUILTIN_ORC_V2DI:
15343 arg0 = gimple_call_arg (stmt, 0);
15344 arg1 = gimple_call_arg (stmt, 1);
15345 lhs = gimple_call_lhs (stmt);
15346 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15347 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15348 gimple_set_location (g, gimple_location (stmt));
15349 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15350 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15351 gimple_set_location (g, gimple_location (stmt));
15352 gsi_replace (gsi, g, true);
15353 return true;
15354 /* Flavors of vec_xor. */
15355 case ALTIVEC_BUILTIN_VXOR:
15356 arg0 = gimple_call_arg (stmt, 0);
15357 arg1 = gimple_call_arg (stmt, 1);
15358 lhs = gimple_call_lhs (stmt);
15359 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15360 gimple_set_location (g, gimple_location (stmt));
15361 gsi_replace (gsi, g, true);
15362 return true;
15363 /* Flavors of vec_nor. */
15364 case ALTIVEC_BUILTIN_VNOR:
15365 arg0 = gimple_call_arg (stmt, 0);
15366 arg1 = gimple_call_arg (stmt, 1);
15367 lhs = gimple_call_lhs (stmt);
15368 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15369 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15370 gimple_set_location (g, gimple_location (stmt));
15371 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15372 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15373 gimple_set_location (g, gimple_location (stmt));
15374 gsi_replace (gsi, g, true);
15375 return true;
15376 /* flavors of vec_abs. */
15377 case ALTIVEC_BUILTIN_ABS_V16QI:
15378 case ALTIVEC_BUILTIN_ABS_V8HI:
15379 case ALTIVEC_BUILTIN_ABS_V4SI:
15380 case ALTIVEC_BUILTIN_ABS_V4SF:
15381 case P8V_BUILTIN_ABS_V2DI:
15382 case VSX_BUILTIN_XVABSDP:
15383 arg0 = gimple_call_arg (stmt, 0);
15384 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15385 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15386 return false;
15387 lhs = gimple_call_lhs (stmt);
15388 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15389 gimple_set_location (g, gimple_location (stmt));
15390 gsi_replace (gsi, g, true);
15391 return true;
15392 /* flavors of vec_min. */
15393 case VSX_BUILTIN_XVMINDP:
15394 case P8V_BUILTIN_VMINSD:
15395 case P8V_BUILTIN_VMINUD:
15396 case ALTIVEC_BUILTIN_VMINSB:
15397 case ALTIVEC_BUILTIN_VMINSH:
15398 case ALTIVEC_BUILTIN_VMINSW:
15399 case ALTIVEC_BUILTIN_VMINUB:
15400 case ALTIVEC_BUILTIN_VMINUH:
15401 case ALTIVEC_BUILTIN_VMINUW:
15402 case ALTIVEC_BUILTIN_VMINFP:
15403 arg0 = gimple_call_arg (stmt, 0);
15404 arg1 = gimple_call_arg (stmt, 1);
15405 lhs = gimple_call_lhs (stmt);
15406 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15407 gimple_set_location (g, gimple_location (stmt));
15408 gsi_replace (gsi, g, true);
15409 return true;
15410 /* flavors of vec_max. */
15411 case VSX_BUILTIN_XVMAXDP:
15412 case P8V_BUILTIN_VMAXSD:
15413 case P8V_BUILTIN_VMAXUD:
15414 case ALTIVEC_BUILTIN_VMAXSB:
15415 case ALTIVEC_BUILTIN_VMAXSH:
15416 case ALTIVEC_BUILTIN_VMAXSW:
15417 case ALTIVEC_BUILTIN_VMAXUB:
15418 case ALTIVEC_BUILTIN_VMAXUH:
15419 case ALTIVEC_BUILTIN_VMAXUW:
15420 case ALTIVEC_BUILTIN_VMAXFP:
15421 arg0 = gimple_call_arg (stmt, 0);
15422 arg1 = gimple_call_arg (stmt, 1);
15423 lhs = gimple_call_lhs (stmt);
15424 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15425 gimple_set_location (g, gimple_location (stmt));
15426 gsi_replace (gsi, g, true);
15427 return true;
15428 /* Flavors of vec_eqv. */
15429 case P8V_BUILTIN_EQV_V16QI:
15430 case P8V_BUILTIN_EQV_V8HI:
15431 case P8V_BUILTIN_EQV_V4SI:
15432 case P8V_BUILTIN_EQV_V4SF:
15433 case P8V_BUILTIN_EQV_V2DF:
15434 case P8V_BUILTIN_EQV_V2DI:
15435 arg0 = gimple_call_arg (stmt, 0);
15436 arg1 = gimple_call_arg (stmt, 1);
15437 lhs = gimple_call_lhs (stmt);
15438 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15439 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15440 gimple_set_location (g, gimple_location (stmt));
15441 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15442 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15443 gimple_set_location (g, gimple_location (stmt));
15444 gsi_replace (gsi, g, true);
15445 return true;
15446 /* Flavors of vec_rotate_left. */
15447 case ALTIVEC_BUILTIN_VRLB:
15448 case ALTIVEC_BUILTIN_VRLH:
15449 case ALTIVEC_BUILTIN_VRLW:
15450 case P8V_BUILTIN_VRLD:
15451 arg0 = gimple_call_arg (stmt, 0);
15452 arg1 = gimple_call_arg (stmt, 1);
15453 lhs = gimple_call_lhs (stmt);
15454 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15455 gimple_set_location (g, gimple_location (stmt));
15456 gsi_replace (gsi, g, true);
15457 return true;
15458 /* Flavors of vector shift right algebraic.
15459 vec_sra{b,h,w} -> vsra{b,h,w}. */
15460 case ALTIVEC_BUILTIN_VSRAB:
15461 case ALTIVEC_BUILTIN_VSRAH:
15462 case ALTIVEC_BUILTIN_VSRAW:
15463 case P8V_BUILTIN_VSRAD:
15464 arg0 = gimple_call_arg (stmt, 0);
15465 arg1 = gimple_call_arg (stmt, 1);
15466 lhs = gimple_call_lhs (stmt);
15467 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
15468 gimple_set_location (g, gimple_location (stmt));
15469 gsi_replace (gsi, g, true);
15470 return true;
15471 /* Flavors of vector shift left.
15472 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15473 case ALTIVEC_BUILTIN_VSLB:
15474 case ALTIVEC_BUILTIN_VSLH:
15475 case ALTIVEC_BUILTIN_VSLW:
15476 case P8V_BUILTIN_VSLD:
15477 {
15478 location_t loc;
15479 gimple_seq stmts = NULL;
15480 arg0 = gimple_call_arg (stmt, 0);
15481 tree arg0_type = TREE_TYPE (arg0);
15482 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15483 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15484 return false;
15485 arg1 = gimple_call_arg (stmt, 1);
15486 tree arg1_type = TREE_TYPE (arg1);
15487 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15488 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15489 loc = gimple_location (stmt);
15490 lhs = gimple_call_lhs (stmt);
15491 /* Force arg1 into the range valid matching the arg0 type. */
15492 /* Build a vector consisting of the max valid bit-size values. */
15493 int n_elts = VECTOR_CST_NELTS (arg1);
15494 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15495 * BITS_PER_UNIT;
15496 tree element_size = build_int_cst (unsigned_element_type,
15497 tree_size_in_bits / n_elts);
15498 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15499 for (int i = 0; i < n_elts; i++)
15500 elts.safe_push (element_size);
15501 tree modulo_tree = elts.build ();
15502 /* Modulo the provided shift value against that vector. */
15503 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15504 unsigned_arg1_type, arg1);
15505 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15506 unsigned_arg1_type, unsigned_arg1,
15507 modulo_tree);
15508 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15509 /* And finally, do the shift. */
15510 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15511 gimple_set_location (g, gimple_location (stmt));
15512 gsi_replace (gsi, g, true);
15513 return true;
15514 }
15515 /* Flavors of vector shift right. */
15516 case ALTIVEC_BUILTIN_VSRB:
15517 case ALTIVEC_BUILTIN_VSRH:
15518 case ALTIVEC_BUILTIN_VSRW:
15519 case P8V_BUILTIN_VSRD:
15520 {
15521 arg0 = gimple_call_arg (stmt, 0);
15522 arg1 = gimple_call_arg (stmt, 1);
15523 lhs = gimple_call_lhs (stmt);
15524 gimple_seq stmts = NULL;
15525 /* Convert arg0 to unsigned. */
15526 tree arg0_unsigned
15527 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15528 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15529 tree res
15530 = gimple_build (&stmts, RSHIFT_EXPR,
15531 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
15532 /* Convert result back to the lhs type. */
15533 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15534 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15535 update_call_from_tree (gsi, res);
15536 return true;
15537 }
15538 /* Vector loads. */
15539 case ALTIVEC_BUILTIN_LVX_V16QI:
15540 case ALTIVEC_BUILTIN_LVX_V8HI:
15541 case ALTIVEC_BUILTIN_LVX_V4SI:
15542 case ALTIVEC_BUILTIN_LVX_V4SF:
15543 case ALTIVEC_BUILTIN_LVX_V2DI:
15544 case ALTIVEC_BUILTIN_LVX_V2DF:
15545 case ALTIVEC_BUILTIN_LVX_V1TI:
15546 {
15547 arg0 = gimple_call_arg (stmt, 0); // offset
15548 arg1 = gimple_call_arg (stmt, 1); // address
15549 lhs = gimple_call_lhs (stmt);
15550 location_t loc = gimple_location (stmt);
15551 /* Since arg1 may be cast to a different type, just use ptr_type_node
15552 here instead of trying to enforce TBAA on pointer types. */
15553 tree arg1_type = ptr_type_node;
15554 tree lhs_type = TREE_TYPE (lhs);
15555 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15556 the tree using the value from arg0. The resulting type will match
15557 the type of arg1. */
15558 gimple_seq stmts = NULL;
15559 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15560 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15561 arg1_type, arg1, temp_offset);
15562 /* Mask off any lower bits from the address. */
15563 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15564 arg1_type, temp_addr,
15565 build_int_cst (arg1_type, -16));
15566 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15567 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15568 take an offset, but since we've already incorporated the offset
15569 above, here we just pass in a zero. */
15570 gimple *g
15571 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15572 build_int_cst (arg1_type, 0)));
15573 gimple_set_location (g, loc);
15574 gsi_replace (gsi, g, true);
15575 return true;
15576 }
15577 /* Vector stores. */
15578 case ALTIVEC_BUILTIN_STVX_V16QI:
15579 case ALTIVEC_BUILTIN_STVX_V8HI:
15580 case ALTIVEC_BUILTIN_STVX_V4SI:
15581 case ALTIVEC_BUILTIN_STVX_V4SF:
15582 case ALTIVEC_BUILTIN_STVX_V2DI:
15583 case ALTIVEC_BUILTIN_STVX_V2DF:
15584 {
15585 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15586 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15587 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15588 location_t loc = gimple_location (stmt);
15589 tree arg0_type = TREE_TYPE (arg0);
15590 /* Use ptr_type_node (no TBAA) for the arg2_type.
15591 FIXME: (Richard) "A proper fix would be to transition this type as
15592 seen from the frontend to GIMPLE, for example in a similar way we
15593 do for MEM_REFs by piggy-backing that on an extra argument, a
15594 constant zero pointer of the alias pointer type to use (which would
15595 also serve as a type indicator of the store itself). I'd use a
15596 target specific internal function for this (not sure if we can have
15597 those target specific, but I guess if it's folded away then that's
15598 fine) and get away with the overload set." */
15599 tree arg2_type = ptr_type_node;
15600 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15601 the tree using the value from arg0. The resulting type will match
15602 the type of arg2. */
15603 gimple_seq stmts = NULL;
15604 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15605 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15606 arg2_type, arg2, temp_offset);
15607 /* Mask off any lower bits from the address. */
15608 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15609 arg2_type, temp_addr,
15610 build_int_cst (arg2_type, -16));
15611 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15612 /* The desired gimple result should be similar to:
15613 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15614 gimple *g
15615 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15616 build_int_cst (arg2_type, 0)), arg0);
15617 gimple_set_location (g, loc);
15618 gsi_replace (gsi, g, true);
15619 return true;
15620 }
15621
15622 /* unaligned Vector loads. */
15623 case VSX_BUILTIN_LXVW4X_V16QI:
15624 case VSX_BUILTIN_LXVW4X_V8HI:
15625 case VSX_BUILTIN_LXVW4X_V4SF:
15626 case VSX_BUILTIN_LXVW4X_V4SI:
15627 case VSX_BUILTIN_LXVD2X_V2DF:
15628 case VSX_BUILTIN_LXVD2X_V2DI:
15629 {
15630 arg0 = gimple_call_arg (stmt, 0); // offset
15631 arg1 = gimple_call_arg (stmt, 1); // address
15632 lhs = gimple_call_lhs (stmt);
15633 location_t loc = gimple_location (stmt);
15634 /* Since arg1 may be cast to a different type, just use ptr_type_node
15635 here instead of trying to enforce TBAA on pointer types. */
15636 tree arg1_type = ptr_type_node;
15637 tree lhs_type = TREE_TYPE (lhs);
15638 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15639 required alignment (power) is 4 bytes regardless of data type. */
15640 tree align_ltype = build_aligned_type (lhs_type, 4);
15641 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15642 the tree using the value from arg0. The resulting type will match
15643 the type of arg1. */
15644 gimple_seq stmts = NULL;
15645 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15646 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15647 arg1_type, arg1, temp_offset);
15648 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15649 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15650 take an offset, but since we've already incorporated the offset
15651 above, here we just pass in a zero. */
15652 gimple *g;
15653 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15654 build_int_cst (arg1_type, 0)));
15655 gimple_set_location (g, loc);
15656 gsi_replace (gsi, g, true);
15657 return true;
15658 }
15659
15660 /* unaligned Vector stores. */
15661 case VSX_BUILTIN_STXVW4X_V16QI:
15662 case VSX_BUILTIN_STXVW4X_V8HI:
15663 case VSX_BUILTIN_STXVW4X_V4SF:
15664 case VSX_BUILTIN_STXVW4X_V4SI:
15665 case VSX_BUILTIN_STXVD2X_V2DF:
15666 case VSX_BUILTIN_STXVD2X_V2DI:
15667 {
15668 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15669 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15670 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15671 location_t loc = gimple_location (stmt);
15672 tree arg0_type = TREE_TYPE (arg0);
15673 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15674 tree arg2_type = ptr_type_node;
15675 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15676 required alignment (power) is 4 bytes regardless of data type. */
15677 tree align_stype = build_aligned_type (arg0_type, 4);
15678 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15679 the tree using the value from arg1. */
15680 gimple_seq stmts = NULL;
15681 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15682 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15683 arg2_type, arg2, temp_offset);
15684 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15685 gimple *g;
15686 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15687 build_int_cst (arg2_type, 0)), arg0);
15688 gimple_set_location (g, loc);
15689 gsi_replace (gsi, g, true);
15690 return true;
15691 }
15692
15693 /* Vector Fused multiply-add (fma). */
15694 case ALTIVEC_BUILTIN_VMADDFP:
15695 case VSX_BUILTIN_XVMADDDP:
15696 case ALTIVEC_BUILTIN_VMLADDUHM:
15697 {
15698 arg0 = gimple_call_arg (stmt, 0);
15699 arg1 = gimple_call_arg (stmt, 1);
15700 tree arg2 = gimple_call_arg (stmt, 2);
15701 lhs = gimple_call_lhs (stmt);
15702 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15703 gimple_call_set_lhs (g, lhs);
15704 gimple_call_set_nothrow (g, true);
15705 gimple_set_location (g, gimple_location (stmt));
15706 gsi_replace (gsi, g, true);
15707 return true;
15708 }
15709
15710 /* Vector compares; EQ, NE, GE, GT, LE. */
15711 case ALTIVEC_BUILTIN_VCMPEQUB:
15712 case ALTIVEC_BUILTIN_VCMPEQUH:
15713 case ALTIVEC_BUILTIN_VCMPEQUW:
15714 case P8V_BUILTIN_VCMPEQUD:
15715 fold_compare_helper (gsi, EQ_EXPR, stmt);
15716 return true;
15717
15718 case P9V_BUILTIN_CMPNEB:
15719 case P9V_BUILTIN_CMPNEH:
15720 case P9V_BUILTIN_CMPNEW:
15721 fold_compare_helper (gsi, NE_EXPR, stmt);
15722 return true;
15723
15724 case VSX_BUILTIN_CMPGE_16QI:
15725 case VSX_BUILTIN_CMPGE_U16QI:
15726 case VSX_BUILTIN_CMPGE_8HI:
15727 case VSX_BUILTIN_CMPGE_U8HI:
15728 case VSX_BUILTIN_CMPGE_4SI:
15729 case VSX_BUILTIN_CMPGE_U4SI:
15730 case VSX_BUILTIN_CMPGE_2DI:
15731 case VSX_BUILTIN_CMPGE_U2DI:
15732 fold_compare_helper (gsi, GE_EXPR, stmt);
15733 return true;
15734
15735 case ALTIVEC_BUILTIN_VCMPGTSB:
15736 case ALTIVEC_BUILTIN_VCMPGTUB:
15737 case ALTIVEC_BUILTIN_VCMPGTSH:
15738 case ALTIVEC_BUILTIN_VCMPGTUH:
15739 case ALTIVEC_BUILTIN_VCMPGTSW:
15740 case ALTIVEC_BUILTIN_VCMPGTUW:
15741 case P8V_BUILTIN_VCMPGTUD:
15742 case P8V_BUILTIN_VCMPGTSD:
15743 fold_compare_helper (gsi, GT_EXPR, stmt);
15744 return true;
15745
15746 case VSX_BUILTIN_CMPLE_16QI:
15747 case VSX_BUILTIN_CMPLE_U16QI:
15748 case VSX_BUILTIN_CMPLE_8HI:
15749 case VSX_BUILTIN_CMPLE_U8HI:
15750 case VSX_BUILTIN_CMPLE_4SI:
15751 case VSX_BUILTIN_CMPLE_U4SI:
15752 case VSX_BUILTIN_CMPLE_2DI:
15753 case VSX_BUILTIN_CMPLE_U2DI:
15754 fold_compare_helper (gsi, LE_EXPR, stmt);
15755 return true;
15756
15757 /* flavors of vec_splat_[us]{8,16,32}. */
15758 case ALTIVEC_BUILTIN_VSPLTISB:
15759 case ALTIVEC_BUILTIN_VSPLTISH:
15760 case ALTIVEC_BUILTIN_VSPLTISW:
15761 {
15762 int size;
15763
15764 if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
15765 size = 8;
15766 else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
15767 size = 16;
15768 else
15769 size = 32;
15770
15771 arg0 = gimple_call_arg (stmt, 0);
15772 lhs = gimple_call_lhs (stmt);
15773
15774 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15775 5-bit signed constant in range -16 to +15. */
15776 if (TREE_CODE (arg0) != INTEGER_CST
15777 || !IN_RANGE (sext_hwi(TREE_INT_CST_LOW (arg0), size),
15778 -16, 15))
15779 return false;
15780 gimple_seq stmts = NULL;
15781 location_t loc = gimple_location (stmt);
15782 tree splat_value = gimple_convert (&stmts, loc,
15783 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15784 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15785 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15786 g = gimple_build_assign (lhs, splat_tree);
15787 gimple_set_location (g, gimple_location (stmt));
15788 gsi_replace (gsi, g, true);
15789 return true;
15790 }
15791
15792 /* Flavors of vec_splat. */
15793 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
15794 case ALTIVEC_BUILTIN_VSPLTB:
15795 case ALTIVEC_BUILTIN_VSPLTH:
15796 case ALTIVEC_BUILTIN_VSPLTW:
15797 case VSX_BUILTIN_XXSPLTD_V2DI:
15798 case VSX_BUILTIN_XXSPLTD_V2DF:
15799 {
15800 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
15801 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
15802 /* Only fold the vec_splat_*() if arg1 is both a constant value and
15803 is a valid index into the arg0 vector. */
15804 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
15805 if (TREE_CODE (arg1) != INTEGER_CST
15806 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
15807 return false;
15808 lhs = gimple_call_lhs (stmt);
15809 tree lhs_type = TREE_TYPE (lhs);
15810 tree arg0_type = TREE_TYPE (arg0);
15811 tree splat;
15812 if (TREE_CODE (arg0) == VECTOR_CST)
15813 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
15814 else
15815 {
15816 /* Determine (in bits) the length and start location of the
15817 splat value for a call to the tree_vec_extract helper. */
15818 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
15819 * BITS_PER_UNIT / n_elts;
15820 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
15821 tree len = build_int_cst (bitsizetype, splat_elem_size);
15822 tree start = build_int_cst (bitsizetype, splat_start_bit);
15823 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
15824 len, start);
15825 }
15826 /* And finally, build the new vector. */
15827 tree splat_tree = build_vector_from_val (lhs_type, splat);
15828 g = gimple_build_assign (lhs, splat_tree);
15829 gimple_set_location (g, gimple_location (stmt));
15830 gsi_replace (gsi, g, true);
15831 return true;
15832 }
15833
15834 /* vec_mergel (integrals). */
15835 case ALTIVEC_BUILTIN_VMRGLH:
15836 case ALTIVEC_BUILTIN_VMRGLW:
15837 case VSX_BUILTIN_XXMRGLW_4SI:
15838 case ALTIVEC_BUILTIN_VMRGLB:
15839 case VSX_BUILTIN_VEC_MERGEL_V2DI:
15840 case VSX_BUILTIN_XXMRGLW_4SF:
15841 case VSX_BUILTIN_VEC_MERGEL_V2DF:
15842 fold_mergehl_helper (gsi, stmt, 1);
15843 return true;
15844 /* vec_mergeh (integrals). */
15845 case ALTIVEC_BUILTIN_VMRGHH:
15846 case ALTIVEC_BUILTIN_VMRGHW:
15847 case VSX_BUILTIN_XXMRGHW_4SI:
15848 case ALTIVEC_BUILTIN_VMRGHB:
15849 case VSX_BUILTIN_VEC_MERGEH_V2DI:
15850 case VSX_BUILTIN_XXMRGHW_4SF:
15851 case VSX_BUILTIN_VEC_MERGEH_V2DF:
15852 fold_mergehl_helper (gsi, stmt, 0);
15853 return true;
15854
15855 /* d = vec_pack (a, b) */
15856 case P8V_BUILTIN_VPKUDUM:
15857 case ALTIVEC_BUILTIN_VPKUHUM:
15858 case ALTIVEC_BUILTIN_VPKUWUM:
15859 {
15860 arg0 = gimple_call_arg (stmt, 0);
15861 arg1 = gimple_call_arg (stmt, 1);
15862 lhs = gimple_call_lhs (stmt);
15863 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
15864 gimple_set_location (g, gimple_location (stmt));
15865 gsi_replace (gsi, g, true);
15866 return true;
15867 }
15868
15869 /* d = vec_unpackh (a) */
15870 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
15871 in this code is sensitive to endian-ness, and needs to be inverted to
15872 handle both LE and BE targets. */
15873 case ALTIVEC_BUILTIN_VUPKHSB:
15874 case ALTIVEC_BUILTIN_VUPKHSH:
15875 case P8V_BUILTIN_VUPKHSW:
15876 {
15877 arg0 = gimple_call_arg (stmt, 0);
15878 lhs = gimple_call_lhs (stmt);
15879 if (BYTES_BIG_ENDIAN)
15880 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15881 else
15882 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15883 gimple_set_location (g, gimple_location (stmt));
15884 gsi_replace (gsi, g, true);
15885 return true;
15886 }
15887 /* d = vec_unpackl (a) */
15888 case ALTIVEC_BUILTIN_VUPKLSB:
15889 case ALTIVEC_BUILTIN_VUPKLSH:
15890 case P8V_BUILTIN_VUPKLSW:
15891 {
15892 arg0 = gimple_call_arg (stmt, 0);
15893 lhs = gimple_call_lhs (stmt);
15894 if (BYTES_BIG_ENDIAN)
15895 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15896 else
15897 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15898 gimple_set_location (g, gimple_location (stmt));
15899 gsi_replace (gsi, g, true);
15900 return true;
15901 }
15902 /* There is no gimple type corresponding with pixel, so just return. */
15903 case ALTIVEC_BUILTIN_VUPKHPX:
15904 case ALTIVEC_BUILTIN_VUPKLPX:
15905 return false;
15906
15907 /* vec_perm. */
15908 case ALTIVEC_BUILTIN_VPERM_16QI:
15909 case ALTIVEC_BUILTIN_VPERM_8HI:
15910 case ALTIVEC_BUILTIN_VPERM_4SI:
15911 case ALTIVEC_BUILTIN_VPERM_2DI:
15912 case ALTIVEC_BUILTIN_VPERM_4SF:
15913 case ALTIVEC_BUILTIN_VPERM_2DF:
15914 {
15915 arg0 = gimple_call_arg (stmt, 0);
15916 arg1 = gimple_call_arg (stmt, 1);
15917 tree permute = gimple_call_arg (stmt, 2);
15918 lhs = gimple_call_lhs (stmt);
15919 location_t loc = gimple_location (stmt);
15920 gimple_seq stmts = NULL;
15921 // convert arg0 and arg1 to match the type of the permute
15922 // for the VEC_PERM_EXPR operation.
15923 tree permute_type = (TREE_TYPE (permute));
15924 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
15925 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
15926 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
15927 permute_type, arg0_ptype, arg1_ptype,
15928 permute);
15929 // Convert the result back to the desired lhs type upon completion.
15930 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
15931 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15932 g = gimple_build_assign (lhs, temp);
15933 gimple_set_location (g, loc);
15934 gsi_replace (gsi, g, true);
15935 return true;
15936 }
15937
15938 default:
15939 if (TARGET_DEBUG_BUILTIN)
15940 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
15941 fn_code, fn_name1, fn_name2);
15942 break;
15943 }
15944
15945 return false;
15946 }
15947
15948 /* Expand an expression EXP that calls a built-in function,
15949 with result going to TARGET if that's convenient
15950 (and in mode MODE if that's convenient).
15951 SUBTARGET may be used as the target for computing one of EXP's operands.
15952 IGNORE is nonzero if the value is to be ignored. */
15953
15954 static rtx
15955 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15956 machine_mode mode ATTRIBUTE_UNUSED,
15957 int ignore ATTRIBUTE_UNUSED)
15958 {
15959 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15960 enum rs6000_builtins fcode
15961 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
15962 size_t uns_fcode = (size_t)fcode;
15963 const struct builtin_description *d;
15964 size_t i;
15965 rtx ret;
15966 bool success;
15967 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
15968 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
15969 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
15970
15971 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
15972 floating point type, depending on whether long double is the IBM extended
15973 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
15974 we only define one variant of the built-in function, and switch the code
15975 when defining it, rather than defining two built-ins and using the
15976 overload table in rs6000-c.c to switch between the two. If we don't have
15977 the proper assembler, don't do this switch because CODE_FOR_*kf* and
15978 CODE_FOR_*tf* will be CODE_FOR_nothing. */
15979 if (FLOAT128_IEEE_P (TFmode))
15980 switch (icode)
15981 {
15982 default:
15983 break;
15984
15985 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
15986 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
15987 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
15988 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
15989 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
15990 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
15991 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
15992 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
15993 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
15994 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
15995 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
15996 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
15997 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
15998 }
15999
16000 if (TARGET_DEBUG_BUILTIN)
16001 {
16002 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16003 const char *name2 = (icode != CODE_FOR_nothing)
16004 ? get_insn_name ((int) icode)
16005 : "nothing";
16006 const char *name3;
16007
16008 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16009 {
16010 default: name3 = "unknown"; break;
16011 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16012 case RS6000_BTC_UNARY: name3 = "unary"; break;
16013 case RS6000_BTC_BINARY: name3 = "binary"; break;
16014 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16015 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16016 case RS6000_BTC_ABS: name3 = "abs"; break;
16017 case RS6000_BTC_DST: name3 = "dst"; break;
16018 }
16019
16020
16021 fprintf (stderr,
16022 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16023 (name1) ? name1 : "---", fcode,
16024 (name2) ? name2 : "---", (int) icode,
16025 name3,
16026 func_valid_p ? "" : ", not valid");
16027 }
16028
16029 if (!func_valid_p)
16030 {
16031 rs6000_invalid_builtin (fcode);
16032
16033 /* Given it is invalid, just generate a normal call. */
16034 return expand_call (exp, target, ignore);
16035 }
16036
16037 switch (fcode)
16038 {
16039 case RS6000_BUILTIN_RECIP:
16040 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16041
16042 case RS6000_BUILTIN_RECIPF:
16043 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16044
16045 case RS6000_BUILTIN_RSQRTF:
16046 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16047
16048 case RS6000_BUILTIN_RSQRT:
16049 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16050
16051 case POWER7_BUILTIN_BPERMD:
16052 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16053 ? CODE_FOR_bpermd_di
16054 : CODE_FOR_bpermd_si), exp, target);
16055
16056 case RS6000_BUILTIN_GET_TB:
16057 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16058 target);
16059
16060 case RS6000_BUILTIN_MFTB:
16061 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16062 ? CODE_FOR_rs6000_mftb_di
16063 : CODE_FOR_rs6000_mftb_si),
16064 target);
16065
16066 case RS6000_BUILTIN_MFFS:
16067 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16068
16069 case RS6000_BUILTIN_MTFSF:
16070 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16071
16072 case RS6000_BUILTIN_CPU_INIT:
16073 case RS6000_BUILTIN_CPU_IS:
16074 case RS6000_BUILTIN_CPU_SUPPORTS:
16075 return cpu_expand_builtin (fcode, exp, target);
16076
16077 case MISC_BUILTIN_SPEC_BARRIER:
16078 {
16079 emit_insn (gen_speculation_barrier ());
16080 return NULL_RTX;
16081 }
16082
16083 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16084 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16085 {
16086 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16087 : (int) CODE_FOR_altivec_lvsl_direct);
16088 machine_mode tmode = insn_data[icode2].operand[0].mode;
16089 machine_mode mode = insn_data[icode2].operand[1].mode;
16090 tree arg;
16091 rtx op, addr, pat;
16092
16093 gcc_assert (TARGET_ALTIVEC);
16094
16095 arg = CALL_EXPR_ARG (exp, 0);
16096 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16097 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16098 addr = memory_address (mode, op);
16099 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16100 op = addr;
16101 else
16102 {
16103 /* For the load case need to negate the address. */
16104 op = gen_reg_rtx (GET_MODE (addr));
16105 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16106 }
16107 op = gen_rtx_MEM (mode, op);
16108
16109 if (target == 0
16110 || GET_MODE (target) != tmode
16111 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16112 target = gen_reg_rtx (tmode);
16113
16114 pat = GEN_FCN (icode2) (target, op);
16115 if (!pat)
16116 return 0;
16117 emit_insn (pat);
16118
16119 return target;
16120 }
16121
16122 case ALTIVEC_BUILTIN_VCFUX:
16123 case ALTIVEC_BUILTIN_VCFSX:
16124 case ALTIVEC_BUILTIN_VCTUXS:
16125 case ALTIVEC_BUILTIN_VCTSXS:
16126 /* FIXME: There's got to be a nicer way to handle this case than
16127 constructing a new CALL_EXPR. */
16128 if (call_expr_nargs (exp) == 1)
16129 {
16130 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16131 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16132 }
16133 break;
16134
16135 /* For the pack and unpack int128 routines, fix up the builtin so it
16136 uses the correct IBM128 type. */
16137 case MISC_BUILTIN_PACK_IF:
16138 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16139 {
16140 icode = CODE_FOR_packtf;
16141 fcode = MISC_BUILTIN_PACK_TF;
16142 uns_fcode = (size_t)fcode;
16143 }
16144 break;
16145
16146 case MISC_BUILTIN_UNPACK_IF:
16147 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16148 {
16149 icode = CODE_FOR_unpacktf;
16150 fcode = MISC_BUILTIN_UNPACK_TF;
16151 uns_fcode = (size_t)fcode;
16152 }
16153 break;
16154
16155 default:
16156 break;
16157 }
16158
16159 if (TARGET_ALTIVEC)
16160 {
16161 ret = altivec_expand_builtin (exp, target, &success);
16162
16163 if (success)
16164 return ret;
16165 }
16166 if (TARGET_HTM)
16167 {
16168 ret = htm_expand_builtin (exp, target, &success);
16169
16170 if (success)
16171 return ret;
16172 }
16173
16174 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16175 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16176 gcc_assert (attr == RS6000_BTC_UNARY
16177 || attr == RS6000_BTC_BINARY
16178 || attr == RS6000_BTC_TERNARY
16179 || attr == RS6000_BTC_SPECIAL);
16180
16181 /* Handle simple unary operations. */
16182 d = bdesc_1arg;
16183 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16184 if (d->code == fcode)
16185 return rs6000_expand_unop_builtin (icode, exp, target);
16186
16187 /* Handle simple binary operations. */
16188 d = bdesc_2arg;
16189 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16190 if (d->code == fcode)
16191 return rs6000_expand_binop_builtin (icode, exp, target);
16192
16193 /* Handle simple ternary operations. */
16194 d = bdesc_3arg;
16195 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16196 if (d->code == fcode)
16197 return rs6000_expand_ternop_builtin (icode, exp, target);
16198
16199 /* Handle simple no-argument operations. */
16200 d = bdesc_0arg;
16201 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16202 if (d->code == fcode)
16203 return rs6000_expand_zeroop_builtin (icode, target);
16204
16205 gcc_unreachable ();
16206 }
16207
16208 /* Create a builtin vector type with a name. Taking care not to give
16209 the canonical type a name. */
16210
16211 static tree
16212 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16213 {
16214 tree result = build_vector_type (elt_type, num_elts);
16215
16216 /* Copy so we don't give the canonical type a name. */
16217 result = build_variant_type_copy (result);
16218
16219 add_builtin_type (name, result);
16220
16221 return result;
16222 }
16223
16224 static void
16225 rs6000_init_builtins (void)
16226 {
16227 tree tdecl;
16228 tree ftype;
16229 machine_mode mode;
16230
16231 if (TARGET_DEBUG_BUILTIN)
16232 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16233 (TARGET_ALTIVEC) ? ", altivec" : "",
16234 (TARGET_VSX) ? ", vsx" : "");
16235
16236 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16237 : "__vector long long",
16238 intDI_type_node, 2);
16239 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16240 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16241 intSI_type_node, 4);
16242 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16243 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16244 intHI_type_node, 8);
16245 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16246 intQI_type_node, 16);
16247
16248 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16249 unsigned_intQI_type_node, 16);
16250 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16251 unsigned_intHI_type_node, 8);
16252 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16253 unsigned_intSI_type_node, 4);
16254 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16255 ? "__vector unsigned long"
16256 : "__vector unsigned long long",
16257 unsigned_intDI_type_node, 2);
16258
16259 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16260
16261 const_str_type_node
16262 = build_pointer_type (build_qualified_type (char_type_node,
16263 TYPE_QUAL_CONST));
16264
16265 /* We use V1TI mode as a special container to hold __int128_t items that
16266 must live in VSX registers. */
16267 if (intTI_type_node)
16268 {
16269 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16270 intTI_type_node, 1);
16271 unsigned_V1TI_type_node
16272 = rs6000_vector_type ("__vector unsigned __int128",
16273 unsigned_intTI_type_node, 1);
16274 }
16275
16276 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16277 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16278 'vector unsigned short'. */
16279
16280 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16281 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16282 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16283 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16284 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16285
16286 long_integer_type_internal_node = long_integer_type_node;
16287 long_unsigned_type_internal_node = long_unsigned_type_node;
16288 long_long_integer_type_internal_node = long_long_integer_type_node;
16289 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16290 intQI_type_internal_node = intQI_type_node;
16291 uintQI_type_internal_node = unsigned_intQI_type_node;
16292 intHI_type_internal_node = intHI_type_node;
16293 uintHI_type_internal_node = unsigned_intHI_type_node;
16294 intSI_type_internal_node = intSI_type_node;
16295 uintSI_type_internal_node = unsigned_intSI_type_node;
16296 intDI_type_internal_node = intDI_type_node;
16297 uintDI_type_internal_node = unsigned_intDI_type_node;
16298 intTI_type_internal_node = intTI_type_node;
16299 uintTI_type_internal_node = unsigned_intTI_type_node;
16300 float_type_internal_node = float_type_node;
16301 double_type_internal_node = double_type_node;
16302 long_double_type_internal_node = long_double_type_node;
16303 dfloat64_type_internal_node = dfloat64_type_node;
16304 dfloat128_type_internal_node = dfloat128_type_node;
16305 void_type_internal_node = void_type_node;
16306
16307 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16308 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16309 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16310 format that uses a pair of doubles, depending on the switches and
16311 defaults.
16312
16313 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16314 floating point, we need make sure the type is non-zero or else self-test
16315 fails during bootstrap.
16316
16317 Always create __ibm128 as a separate type, even if the current long double
16318 format is IBM extended double.
16319
16320 For IEEE 128-bit floating point, always create the type __ieee128. If the
16321 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16322 __ieee128. */
16323 if (TARGET_FLOAT128_TYPE)
16324 {
16325 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16326 ibm128_float_type_node = long_double_type_node;
16327 else
16328 {
16329 ibm128_float_type_node = make_node (REAL_TYPE);
16330 TYPE_PRECISION (ibm128_float_type_node) = 128;
16331 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16332 layout_type (ibm128_float_type_node);
16333 }
16334
16335 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16336 "__ibm128");
16337
16338 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16339 ieee128_float_type_node = long_double_type_node;
16340 else
16341 ieee128_float_type_node = float128_type_node;
16342
16343 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16344 "__ieee128");
16345 }
16346
16347 else
16348 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16349
16350 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16351 tree type node. */
16352 builtin_mode_to_type[QImode][0] = integer_type_node;
16353 builtin_mode_to_type[HImode][0] = integer_type_node;
16354 builtin_mode_to_type[SImode][0] = intSI_type_node;
16355 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16356 builtin_mode_to_type[DImode][0] = intDI_type_node;
16357 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16358 builtin_mode_to_type[TImode][0] = intTI_type_node;
16359 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16360 builtin_mode_to_type[SFmode][0] = float_type_node;
16361 builtin_mode_to_type[DFmode][0] = double_type_node;
16362 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16363 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16364 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16365 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16366 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16367 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16368 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16369 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16370 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16371 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16372 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16373 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16374 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16375 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16376 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16377 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16378 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16379
16380 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16381 TYPE_NAME (bool_char_type_node) = tdecl;
16382
16383 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16384 TYPE_NAME (bool_short_type_node) = tdecl;
16385
16386 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16387 TYPE_NAME (bool_int_type_node) = tdecl;
16388
16389 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16390 TYPE_NAME (pixel_type_node) = tdecl;
16391
16392 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16393 bool_char_type_node, 16);
16394 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16395 bool_short_type_node, 8);
16396 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16397 bool_int_type_node, 4);
16398 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16399 ? "__vector __bool long"
16400 : "__vector __bool long long",
16401 bool_long_long_type_node, 2);
16402 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16403 pixel_type_node, 8);
16404
16405 /* Create Altivec and VSX builtins on machines with at least the
16406 general purpose extensions (970 and newer) to allow the use of
16407 the target attribute. */
16408 if (TARGET_EXTRA_BUILTINS)
16409 altivec_init_builtins ();
16410 if (TARGET_HTM)
16411 htm_init_builtins ();
16412
16413 if (TARGET_EXTRA_BUILTINS)
16414 rs6000_common_init_builtins ();
16415
16416 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16417 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16418 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16419
16420 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16421 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16422 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16423
16424 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16425 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16426 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16427
16428 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16429 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16430 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16431
16432 mode = (TARGET_64BIT) ? DImode : SImode;
16433 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16434 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16435 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16436
16437 ftype = build_function_type_list (unsigned_intDI_type_node,
16438 NULL_TREE);
16439 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16440
16441 if (TARGET_64BIT)
16442 ftype = build_function_type_list (unsigned_intDI_type_node,
16443 NULL_TREE);
16444 else
16445 ftype = build_function_type_list (unsigned_intSI_type_node,
16446 NULL_TREE);
16447 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16448
16449 ftype = build_function_type_list (double_type_node, NULL_TREE);
16450 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16451
16452 ftype = build_function_type_list (void_type_node,
16453 intSI_type_node, double_type_node,
16454 NULL_TREE);
16455 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16456
16457 ftype = build_function_type_list (void_type_node, NULL_TREE);
16458 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16459 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16460 MISC_BUILTIN_SPEC_BARRIER);
16461
16462 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16463 NULL_TREE);
16464 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16465 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16466
16467 /* AIX libm provides clog as __clog. */
16468 if (TARGET_XCOFF &&
16469 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16470 set_user_assembler_name (tdecl, "__clog");
16471
16472 #ifdef SUBTARGET_INIT_BUILTINS
16473 SUBTARGET_INIT_BUILTINS;
16474 #endif
16475 }
16476
16477 /* Returns the rs6000 builtin decl for CODE. */
16478
16479 static tree
16480 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16481 {
16482 HOST_WIDE_INT fnmask;
16483
16484 if (code >= RS6000_BUILTIN_COUNT)
16485 return error_mark_node;
16486
16487 fnmask = rs6000_builtin_info[code].mask;
16488 if ((fnmask & rs6000_builtin_mask) != fnmask)
16489 {
16490 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16491 return error_mark_node;
16492 }
16493
16494 return rs6000_builtin_decls[code];
16495 }
16496
16497 static void
16498 altivec_init_builtins (void)
16499 {
16500 const struct builtin_description *d;
16501 size_t i;
16502 tree ftype;
16503 tree decl;
16504 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16505
16506 tree pvoid_type_node = build_pointer_type (void_type_node);
16507
16508 tree pcvoid_type_node
16509 = build_pointer_type (build_qualified_type (void_type_node,
16510 TYPE_QUAL_CONST));
16511
16512 tree int_ftype_opaque
16513 = build_function_type_list (integer_type_node,
16514 opaque_V4SI_type_node, NULL_TREE);
16515 tree opaque_ftype_opaque
16516 = build_function_type_list (integer_type_node, NULL_TREE);
16517 tree opaque_ftype_opaque_int
16518 = build_function_type_list (opaque_V4SI_type_node,
16519 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16520 tree opaque_ftype_opaque_opaque_int
16521 = build_function_type_list (opaque_V4SI_type_node,
16522 opaque_V4SI_type_node, opaque_V4SI_type_node,
16523 integer_type_node, NULL_TREE);
16524 tree opaque_ftype_opaque_opaque_opaque
16525 = build_function_type_list (opaque_V4SI_type_node,
16526 opaque_V4SI_type_node, opaque_V4SI_type_node,
16527 opaque_V4SI_type_node, NULL_TREE);
16528 tree opaque_ftype_opaque_opaque
16529 = build_function_type_list (opaque_V4SI_type_node,
16530 opaque_V4SI_type_node, opaque_V4SI_type_node,
16531 NULL_TREE);
16532 tree int_ftype_int_opaque_opaque
16533 = build_function_type_list (integer_type_node,
16534 integer_type_node, opaque_V4SI_type_node,
16535 opaque_V4SI_type_node, NULL_TREE);
16536 tree int_ftype_int_v4si_v4si
16537 = build_function_type_list (integer_type_node,
16538 integer_type_node, V4SI_type_node,
16539 V4SI_type_node, NULL_TREE);
16540 tree int_ftype_int_v2di_v2di
16541 = build_function_type_list (integer_type_node,
16542 integer_type_node, V2DI_type_node,
16543 V2DI_type_node, NULL_TREE);
16544 tree void_ftype_v4si
16545 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16546 tree v8hi_ftype_void
16547 = build_function_type_list (V8HI_type_node, NULL_TREE);
16548 tree void_ftype_void
16549 = build_function_type_list (void_type_node, NULL_TREE);
16550 tree void_ftype_int
16551 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16552
16553 tree opaque_ftype_long_pcvoid
16554 = build_function_type_list (opaque_V4SI_type_node,
16555 long_integer_type_node, pcvoid_type_node,
16556 NULL_TREE);
16557 tree v16qi_ftype_long_pcvoid
16558 = build_function_type_list (V16QI_type_node,
16559 long_integer_type_node, pcvoid_type_node,
16560 NULL_TREE);
16561 tree v8hi_ftype_long_pcvoid
16562 = build_function_type_list (V8HI_type_node,
16563 long_integer_type_node, pcvoid_type_node,
16564 NULL_TREE);
16565 tree v4si_ftype_long_pcvoid
16566 = build_function_type_list (V4SI_type_node,
16567 long_integer_type_node, pcvoid_type_node,
16568 NULL_TREE);
16569 tree v4sf_ftype_long_pcvoid
16570 = build_function_type_list (V4SF_type_node,
16571 long_integer_type_node, pcvoid_type_node,
16572 NULL_TREE);
16573 tree v2df_ftype_long_pcvoid
16574 = build_function_type_list (V2DF_type_node,
16575 long_integer_type_node, pcvoid_type_node,
16576 NULL_TREE);
16577 tree v2di_ftype_long_pcvoid
16578 = build_function_type_list (V2DI_type_node,
16579 long_integer_type_node, pcvoid_type_node,
16580 NULL_TREE);
16581 tree v1ti_ftype_long_pcvoid
16582 = build_function_type_list (V1TI_type_node,
16583 long_integer_type_node, pcvoid_type_node,
16584 NULL_TREE);
16585
16586 tree void_ftype_opaque_long_pvoid
16587 = build_function_type_list (void_type_node,
16588 opaque_V4SI_type_node, long_integer_type_node,
16589 pvoid_type_node, NULL_TREE);
16590 tree void_ftype_v4si_long_pvoid
16591 = build_function_type_list (void_type_node,
16592 V4SI_type_node, long_integer_type_node,
16593 pvoid_type_node, NULL_TREE);
16594 tree void_ftype_v16qi_long_pvoid
16595 = build_function_type_list (void_type_node,
16596 V16QI_type_node, long_integer_type_node,
16597 pvoid_type_node, NULL_TREE);
16598
16599 tree void_ftype_v16qi_pvoid_long
16600 = build_function_type_list (void_type_node,
16601 V16QI_type_node, pvoid_type_node,
16602 long_integer_type_node, NULL_TREE);
16603
16604 tree void_ftype_v8hi_long_pvoid
16605 = build_function_type_list (void_type_node,
16606 V8HI_type_node, long_integer_type_node,
16607 pvoid_type_node, NULL_TREE);
16608 tree void_ftype_v4sf_long_pvoid
16609 = build_function_type_list (void_type_node,
16610 V4SF_type_node, long_integer_type_node,
16611 pvoid_type_node, NULL_TREE);
16612 tree void_ftype_v2df_long_pvoid
16613 = build_function_type_list (void_type_node,
16614 V2DF_type_node, long_integer_type_node,
16615 pvoid_type_node, NULL_TREE);
16616 tree void_ftype_v1ti_long_pvoid
16617 = build_function_type_list (void_type_node,
16618 V1TI_type_node, long_integer_type_node,
16619 pvoid_type_node, NULL_TREE);
16620 tree void_ftype_v2di_long_pvoid
16621 = build_function_type_list (void_type_node,
16622 V2DI_type_node, long_integer_type_node,
16623 pvoid_type_node, NULL_TREE);
16624 tree int_ftype_int_v8hi_v8hi
16625 = build_function_type_list (integer_type_node,
16626 integer_type_node, V8HI_type_node,
16627 V8HI_type_node, NULL_TREE);
16628 tree int_ftype_int_v16qi_v16qi
16629 = build_function_type_list (integer_type_node,
16630 integer_type_node, V16QI_type_node,
16631 V16QI_type_node, NULL_TREE);
16632 tree int_ftype_int_v4sf_v4sf
16633 = build_function_type_list (integer_type_node,
16634 integer_type_node, V4SF_type_node,
16635 V4SF_type_node, NULL_TREE);
16636 tree int_ftype_int_v2df_v2df
16637 = build_function_type_list (integer_type_node,
16638 integer_type_node, V2DF_type_node,
16639 V2DF_type_node, NULL_TREE);
16640 tree v2di_ftype_v2di
16641 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16642 tree v4si_ftype_v4si
16643 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16644 tree v8hi_ftype_v8hi
16645 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16646 tree v16qi_ftype_v16qi
16647 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16648 tree v4sf_ftype_v4sf
16649 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16650 tree v2df_ftype_v2df
16651 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16652 tree void_ftype_pcvoid_int_int
16653 = build_function_type_list (void_type_node,
16654 pcvoid_type_node, integer_type_node,
16655 integer_type_node, NULL_TREE);
16656
16657 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16658 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16659 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16660 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16661 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16662 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16663 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16664 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16665 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16666 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16667 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16668 ALTIVEC_BUILTIN_LVXL_V2DF);
16669 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16670 ALTIVEC_BUILTIN_LVXL_V2DI);
16671 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16672 ALTIVEC_BUILTIN_LVXL_V4SF);
16673 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16674 ALTIVEC_BUILTIN_LVXL_V4SI);
16675 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16676 ALTIVEC_BUILTIN_LVXL_V8HI);
16677 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16678 ALTIVEC_BUILTIN_LVXL_V16QI);
16679 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16680 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16681 ALTIVEC_BUILTIN_LVX_V1TI);
16682 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16683 ALTIVEC_BUILTIN_LVX_V2DF);
16684 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16685 ALTIVEC_BUILTIN_LVX_V2DI);
16686 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16687 ALTIVEC_BUILTIN_LVX_V4SF);
16688 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16689 ALTIVEC_BUILTIN_LVX_V4SI);
16690 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16691 ALTIVEC_BUILTIN_LVX_V8HI);
16692 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16693 ALTIVEC_BUILTIN_LVX_V16QI);
16694 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16695 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16696 ALTIVEC_BUILTIN_STVX_V2DF);
16697 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16698 ALTIVEC_BUILTIN_STVX_V2DI);
16699 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16700 ALTIVEC_BUILTIN_STVX_V4SF);
16701 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16702 ALTIVEC_BUILTIN_STVX_V4SI);
16703 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16704 ALTIVEC_BUILTIN_STVX_V8HI);
16705 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16706 ALTIVEC_BUILTIN_STVX_V16QI);
16707 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16708 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16709 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16710 ALTIVEC_BUILTIN_STVXL_V2DF);
16711 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16712 ALTIVEC_BUILTIN_STVXL_V2DI);
16713 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16714 ALTIVEC_BUILTIN_STVXL_V4SF);
16715 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16716 ALTIVEC_BUILTIN_STVXL_V4SI);
16717 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16718 ALTIVEC_BUILTIN_STVXL_V8HI);
16719 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16720 ALTIVEC_BUILTIN_STVXL_V16QI);
16721 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16722 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16723 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16724 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16725 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16726 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16727 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16728 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16729 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16730 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16731 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16732 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16733 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16734 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16735 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16736 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16737
16738 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16739 VSX_BUILTIN_LXVD2X_V2DF);
16740 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16741 VSX_BUILTIN_LXVD2X_V2DI);
16742 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16743 VSX_BUILTIN_LXVW4X_V4SF);
16744 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16745 VSX_BUILTIN_LXVW4X_V4SI);
16746 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16747 VSX_BUILTIN_LXVW4X_V8HI);
16748 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16749 VSX_BUILTIN_LXVW4X_V16QI);
16750 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16751 VSX_BUILTIN_STXVD2X_V2DF);
16752 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16753 VSX_BUILTIN_STXVD2X_V2DI);
16754 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16755 VSX_BUILTIN_STXVW4X_V4SF);
16756 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16757 VSX_BUILTIN_STXVW4X_V4SI);
16758 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16759 VSX_BUILTIN_STXVW4X_V8HI);
16760 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16761 VSX_BUILTIN_STXVW4X_V16QI);
16762
16763 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16764 VSX_BUILTIN_LD_ELEMREV_V2DF);
16765 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16766 VSX_BUILTIN_LD_ELEMREV_V2DI);
16767 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16768 VSX_BUILTIN_LD_ELEMREV_V4SF);
16769 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16770 VSX_BUILTIN_LD_ELEMREV_V4SI);
16771 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16772 VSX_BUILTIN_LD_ELEMREV_V8HI);
16773 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16774 VSX_BUILTIN_LD_ELEMREV_V16QI);
16775 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16776 VSX_BUILTIN_ST_ELEMREV_V2DF);
16777 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
16778 VSX_BUILTIN_ST_ELEMREV_V1TI);
16779 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16780 VSX_BUILTIN_ST_ELEMREV_V2DI);
16781 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16782 VSX_BUILTIN_ST_ELEMREV_V4SF);
16783 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16784 VSX_BUILTIN_ST_ELEMREV_V4SI);
16785 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
16786 VSX_BUILTIN_ST_ELEMREV_V8HI);
16787 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
16788 VSX_BUILTIN_ST_ELEMREV_V16QI);
16789
16790 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16791 VSX_BUILTIN_VEC_LD);
16792 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16793 VSX_BUILTIN_VEC_ST);
16794 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16795 VSX_BUILTIN_VEC_XL);
16796 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
16797 VSX_BUILTIN_VEC_XL_BE);
16798 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16799 VSX_BUILTIN_VEC_XST);
16800 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
16801 VSX_BUILTIN_VEC_XST_BE);
16802
16803 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16804 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16805 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16806
16807 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16808 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16809 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16810 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16811 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16812 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16813 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16814 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16815 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16816 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16817 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16818 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16819
16820 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16821 ALTIVEC_BUILTIN_VEC_ADDE);
16822 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16823 ALTIVEC_BUILTIN_VEC_ADDEC);
16824 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16825 ALTIVEC_BUILTIN_VEC_CMPNE);
16826 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16827 ALTIVEC_BUILTIN_VEC_MUL);
16828 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
16829 ALTIVEC_BUILTIN_VEC_SUBE);
16830 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
16831 ALTIVEC_BUILTIN_VEC_SUBEC);
16832
16833 /* Cell builtins. */
16834 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16835 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16836 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16837 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16838
16839 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16840 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16841 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16842 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16843
16844 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16845 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16846 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16847 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16848
16849 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16850 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16851 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16852 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16853
16854 if (TARGET_P9_VECTOR)
16855 {
16856 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
16857 P9V_BUILTIN_STXVL);
16858 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
16859 P9V_BUILTIN_XST_LEN_R);
16860 }
16861
16862 /* Add the DST variants. */
16863 d = bdesc_dst;
16864 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16865 {
16866 HOST_WIDE_INT mask = d->mask;
16867
16868 /* It is expected that these dst built-in functions may have
16869 d->icode equal to CODE_FOR_nothing. */
16870 if ((mask & builtin_mask) != mask)
16871 {
16872 if (TARGET_DEBUG_BUILTIN)
16873 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
16874 d->name);
16875 continue;
16876 }
16877 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16878 }
16879
16880 /* Initialize the predicates. */
16881 d = bdesc_altivec_preds;
16882 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16883 {
16884 machine_mode mode1;
16885 tree type;
16886 HOST_WIDE_INT mask = d->mask;
16887
16888 if ((mask & builtin_mask) != mask)
16889 {
16890 if (TARGET_DEBUG_BUILTIN)
16891 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
16892 d->name);
16893 continue;
16894 }
16895
16896 if (rs6000_overloaded_builtin_p (d->code))
16897 mode1 = VOIDmode;
16898 else
16899 {
16900 /* Cannot define builtin if the instruction is disabled. */
16901 gcc_assert (d->icode != CODE_FOR_nothing);
16902 mode1 = insn_data[d->icode].operand[1].mode;
16903 }
16904
16905 switch (mode1)
16906 {
16907 case E_VOIDmode:
16908 type = int_ftype_int_opaque_opaque;
16909 break;
16910 case E_V2DImode:
16911 type = int_ftype_int_v2di_v2di;
16912 break;
16913 case E_V4SImode:
16914 type = int_ftype_int_v4si_v4si;
16915 break;
16916 case E_V8HImode:
16917 type = int_ftype_int_v8hi_v8hi;
16918 break;
16919 case E_V16QImode:
16920 type = int_ftype_int_v16qi_v16qi;
16921 break;
16922 case E_V4SFmode:
16923 type = int_ftype_int_v4sf_v4sf;
16924 break;
16925 case E_V2DFmode:
16926 type = int_ftype_int_v2df_v2df;
16927 break;
16928 default:
16929 gcc_unreachable ();
16930 }
16931
16932 def_builtin (d->name, type, d->code);
16933 }
16934
16935 /* Initialize the abs* operators. */
16936 d = bdesc_abs;
16937 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16938 {
16939 machine_mode mode0;
16940 tree type;
16941 HOST_WIDE_INT mask = d->mask;
16942
16943 if ((mask & builtin_mask) != mask)
16944 {
16945 if (TARGET_DEBUG_BUILTIN)
16946 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
16947 d->name);
16948 continue;
16949 }
16950
16951 /* Cannot define builtin if the instruction is disabled. */
16952 gcc_assert (d->icode != CODE_FOR_nothing);
16953 mode0 = insn_data[d->icode].operand[0].mode;
16954
16955 switch (mode0)
16956 {
16957 case E_V2DImode:
16958 type = v2di_ftype_v2di;
16959 break;
16960 case E_V4SImode:
16961 type = v4si_ftype_v4si;
16962 break;
16963 case E_V8HImode:
16964 type = v8hi_ftype_v8hi;
16965 break;
16966 case E_V16QImode:
16967 type = v16qi_ftype_v16qi;
16968 break;
16969 case E_V4SFmode:
16970 type = v4sf_ftype_v4sf;
16971 break;
16972 case E_V2DFmode:
16973 type = v2df_ftype_v2df;
16974 break;
16975 default:
16976 gcc_unreachable ();
16977 }
16978
16979 def_builtin (d->name, type, d->code);
16980 }
16981
16982 /* Initialize target builtin that implements
16983 targetm.vectorize.builtin_mask_for_load. */
16984
16985 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
16986 v16qi_ftype_long_pcvoid,
16987 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
16988 BUILT_IN_MD, NULL, NULL_TREE);
16989 TREE_READONLY (decl) = 1;
16990 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
16991 altivec_builtin_mask_for_load = decl;
16992
16993 /* Access to the vec_init patterns. */
16994 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
16995 integer_type_node, integer_type_node,
16996 integer_type_node, NULL_TREE);
16997 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
16998
16999 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17000 short_integer_type_node,
17001 short_integer_type_node,
17002 short_integer_type_node,
17003 short_integer_type_node,
17004 short_integer_type_node,
17005 short_integer_type_node,
17006 short_integer_type_node, NULL_TREE);
17007 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17008
17009 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17010 char_type_node, char_type_node,
17011 char_type_node, char_type_node,
17012 char_type_node, char_type_node,
17013 char_type_node, char_type_node,
17014 char_type_node, char_type_node,
17015 char_type_node, char_type_node,
17016 char_type_node, char_type_node,
17017 char_type_node, NULL_TREE);
17018 def_builtin ("__builtin_vec_init_v16qi", ftype,
17019 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17020
17021 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17022 float_type_node, float_type_node,
17023 float_type_node, NULL_TREE);
17024 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17025
17026 /* VSX builtins. */
17027 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17028 double_type_node, NULL_TREE);
17029 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17030
17031 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17032 intDI_type_node, NULL_TREE);
17033 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17034
17035 /* Access to the vec_set patterns. */
17036 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17037 intSI_type_node,
17038 integer_type_node, NULL_TREE);
17039 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17040
17041 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17042 intHI_type_node,
17043 integer_type_node, NULL_TREE);
17044 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17045
17046 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17047 intQI_type_node,
17048 integer_type_node, NULL_TREE);
17049 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17050
17051 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17052 float_type_node,
17053 integer_type_node, NULL_TREE);
17054 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17055
17056 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17057 double_type_node,
17058 integer_type_node, NULL_TREE);
17059 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17060
17061 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17062 intDI_type_node,
17063 integer_type_node, NULL_TREE);
17064 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17065
17066 /* Access to the vec_extract patterns. */
17067 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17068 integer_type_node, NULL_TREE);
17069 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17070
17071 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17072 integer_type_node, NULL_TREE);
17073 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17074
17075 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17076 integer_type_node, NULL_TREE);
17077 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17078
17079 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17080 integer_type_node, NULL_TREE);
17081 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17082
17083 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17084 integer_type_node, NULL_TREE);
17085 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17086
17087 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17088 integer_type_node, NULL_TREE);
17089 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17090
17091
17092 if (V1TI_type_node)
17093 {
17094 tree v1ti_ftype_long_pcvoid
17095 = build_function_type_list (V1TI_type_node,
17096 long_integer_type_node, pcvoid_type_node,
17097 NULL_TREE);
17098 tree void_ftype_v1ti_long_pvoid
17099 = build_function_type_list (void_type_node,
17100 V1TI_type_node, long_integer_type_node,
17101 pvoid_type_node, NULL_TREE);
17102 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17103 VSX_BUILTIN_LD_ELEMREV_V1TI);
17104 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17105 VSX_BUILTIN_LXVD2X_V1TI);
17106 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17107 VSX_BUILTIN_STXVD2X_V1TI);
17108 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17109 NULL_TREE, NULL_TREE);
17110 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17111 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17112 intTI_type_node,
17113 integer_type_node, NULL_TREE);
17114 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17115 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17116 integer_type_node, NULL_TREE);
17117 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17118 }
17119
17120 }
17121
17122 static void
17123 htm_init_builtins (void)
17124 {
17125 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17126 const struct builtin_description *d;
17127 size_t i;
17128
17129 d = bdesc_htm;
17130 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17131 {
17132 tree op[MAX_HTM_OPERANDS], type;
17133 HOST_WIDE_INT mask = d->mask;
17134 unsigned attr = rs6000_builtin_info[d->code].attr;
17135 bool void_func = (attr & RS6000_BTC_VOID);
17136 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17137 int nopnds = 0;
17138 tree gpr_type_node;
17139 tree rettype;
17140 tree argtype;
17141
17142 /* It is expected that these htm built-in functions may have
17143 d->icode equal to CODE_FOR_nothing. */
17144
17145 if (TARGET_32BIT && TARGET_POWERPC64)
17146 gpr_type_node = long_long_unsigned_type_node;
17147 else
17148 gpr_type_node = long_unsigned_type_node;
17149
17150 if (attr & RS6000_BTC_SPR)
17151 {
17152 rettype = gpr_type_node;
17153 argtype = gpr_type_node;
17154 }
17155 else if (d->code == HTM_BUILTIN_TABORTDC
17156 || d->code == HTM_BUILTIN_TABORTDCI)
17157 {
17158 rettype = unsigned_type_node;
17159 argtype = gpr_type_node;
17160 }
17161 else
17162 {
17163 rettype = unsigned_type_node;
17164 argtype = unsigned_type_node;
17165 }
17166
17167 if ((mask & builtin_mask) != mask)
17168 {
17169 if (TARGET_DEBUG_BUILTIN)
17170 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17171 continue;
17172 }
17173
17174 if (d->name == 0)
17175 {
17176 if (TARGET_DEBUG_BUILTIN)
17177 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17178 (long unsigned) i);
17179 continue;
17180 }
17181
17182 op[nopnds++] = (void_func) ? void_type_node : rettype;
17183
17184 if (attr_args == RS6000_BTC_UNARY)
17185 op[nopnds++] = argtype;
17186 else if (attr_args == RS6000_BTC_BINARY)
17187 {
17188 op[nopnds++] = argtype;
17189 op[nopnds++] = argtype;
17190 }
17191 else if (attr_args == RS6000_BTC_TERNARY)
17192 {
17193 op[nopnds++] = argtype;
17194 op[nopnds++] = argtype;
17195 op[nopnds++] = argtype;
17196 }
17197
17198 switch (nopnds)
17199 {
17200 case 1:
17201 type = build_function_type_list (op[0], NULL_TREE);
17202 break;
17203 case 2:
17204 type = build_function_type_list (op[0], op[1], NULL_TREE);
17205 break;
17206 case 3:
17207 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17208 break;
17209 case 4:
17210 type = build_function_type_list (op[0], op[1], op[2], op[3],
17211 NULL_TREE);
17212 break;
17213 default:
17214 gcc_unreachable ();
17215 }
17216
17217 def_builtin (d->name, type, d->code);
17218 }
17219 }
17220
17221 /* Hash function for builtin functions with up to 3 arguments and a return
17222 type. */
17223 hashval_t
17224 builtin_hasher::hash (builtin_hash_struct *bh)
17225 {
17226 unsigned ret = 0;
17227 int i;
17228
17229 for (i = 0; i < 4; i++)
17230 {
17231 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17232 ret = (ret * 2) + bh->uns_p[i];
17233 }
17234
17235 return ret;
17236 }
17237
17238 /* Compare builtin hash entries H1 and H2 for equivalence. */
17239 bool
17240 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17241 {
17242 return ((p1->mode[0] == p2->mode[0])
17243 && (p1->mode[1] == p2->mode[1])
17244 && (p1->mode[2] == p2->mode[2])
17245 && (p1->mode[3] == p2->mode[3])
17246 && (p1->uns_p[0] == p2->uns_p[0])
17247 && (p1->uns_p[1] == p2->uns_p[1])
17248 && (p1->uns_p[2] == p2->uns_p[2])
17249 && (p1->uns_p[3] == p2->uns_p[3]));
17250 }
17251
17252 /* Map types for builtin functions with an explicit return type and up to 3
17253 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17254 of the argument. */
17255 static tree
17256 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17257 machine_mode mode_arg1, machine_mode mode_arg2,
17258 enum rs6000_builtins builtin, const char *name)
17259 {
17260 struct builtin_hash_struct h;
17261 struct builtin_hash_struct *h2;
17262 int num_args = 3;
17263 int i;
17264 tree ret_type = NULL_TREE;
17265 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17266
17267 /* Create builtin_hash_table. */
17268 if (builtin_hash_table == NULL)
17269 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17270
17271 h.type = NULL_TREE;
17272 h.mode[0] = mode_ret;
17273 h.mode[1] = mode_arg0;
17274 h.mode[2] = mode_arg1;
17275 h.mode[3] = mode_arg2;
17276 h.uns_p[0] = 0;
17277 h.uns_p[1] = 0;
17278 h.uns_p[2] = 0;
17279 h.uns_p[3] = 0;
17280
17281 /* If the builtin is a type that produces unsigned results or takes unsigned
17282 arguments, and it is returned as a decl for the vectorizer (such as
17283 widening multiplies, permute), make sure the arguments and return value
17284 are type correct. */
17285 switch (builtin)
17286 {
17287 /* unsigned 1 argument functions. */
17288 case CRYPTO_BUILTIN_VSBOX:
17289 case P8V_BUILTIN_VGBBD:
17290 case MISC_BUILTIN_CDTBCD:
17291 case MISC_BUILTIN_CBCDTD:
17292 h.uns_p[0] = 1;
17293 h.uns_p[1] = 1;
17294 break;
17295
17296 /* unsigned 2 argument functions. */
17297 case ALTIVEC_BUILTIN_VMULEUB:
17298 case ALTIVEC_BUILTIN_VMULEUH:
17299 case P8V_BUILTIN_VMULEUW:
17300 case ALTIVEC_BUILTIN_VMULOUB:
17301 case ALTIVEC_BUILTIN_VMULOUH:
17302 case P8V_BUILTIN_VMULOUW:
17303 case CRYPTO_BUILTIN_VCIPHER:
17304 case CRYPTO_BUILTIN_VCIPHERLAST:
17305 case CRYPTO_BUILTIN_VNCIPHER:
17306 case CRYPTO_BUILTIN_VNCIPHERLAST:
17307 case CRYPTO_BUILTIN_VPMSUMB:
17308 case CRYPTO_BUILTIN_VPMSUMH:
17309 case CRYPTO_BUILTIN_VPMSUMW:
17310 case CRYPTO_BUILTIN_VPMSUMD:
17311 case CRYPTO_BUILTIN_VPMSUM:
17312 case MISC_BUILTIN_ADDG6S:
17313 case MISC_BUILTIN_DIVWEU:
17314 case MISC_BUILTIN_DIVDEU:
17315 case VSX_BUILTIN_UDIV_V2DI:
17316 case ALTIVEC_BUILTIN_VMAXUB:
17317 case ALTIVEC_BUILTIN_VMINUB:
17318 case ALTIVEC_BUILTIN_VMAXUH:
17319 case ALTIVEC_BUILTIN_VMINUH:
17320 case ALTIVEC_BUILTIN_VMAXUW:
17321 case ALTIVEC_BUILTIN_VMINUW:
17322 case P8V_BUILTIN_VMAXUD:
17323 case P8V_BUILTIN_VMINUD:
17324 h.uns_p[0] = 1;
17325 h.uns_p[1] = 1;
17326 h.uns_p[2] = 1;
17327 break;
17328
17329 /* unsigned 3 argument functions. */
17330 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17331 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17332 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17333 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17334 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17335 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17336 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17337 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17338 case VSX_BUILTIN_VPERM_16QI_UNS:
17339 case VSX_BUILTIN_VPERM_8HI_UNS:
17340 case VSX_BUILTIN_VPERM_4SI_UNS:
17341 case VSX_BUILTIN_VPERM_2DI_UNS:
17342 case VSX_BUILTIN_XXSEL_16QI_UNS:
17343 case VSX_BUILTIN_XXSEL_8HI_UNS:
17344 case VSX_BUILTIN_XXSEL_4SI_UNS:
17345 case VSX_BUILTIN_XXSEL_2DI_UNS:
17346 case CRYPTO_BUILTIN_VPERMXOR:
17347 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17348 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17349 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17350 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17351 case CRYPTO_BUILTIN_VSHASIGMAW:
17352 case CRYPTO_BUILTIN_VSHASIGMAD:
17353 case CRYPTO_BUILTIN_VSHASIGMA:
17354 h.uns_p[0] = 1;
17355 h.uns_p[1] = 1;
17356 h.uns_p[2] = 1;
17357 h.uns_p[3] = 1;
17358 break;
17359
17360 /* signed permute functions with unsigned char mask. */
17361 case ALTIVEC_BUILTIN_VPERM_16QI:
17362 case ALTIVEC_BUILTIN_VPERM_8HI:
17363 case ALTIVEC_BUILTIN_VPERM_4SI:
17364 case ALTIVEC_BUILTIN_VPERM_4SF:
17365 case ALTIVEC_BUILTIN_VPERM_2DI:
17366 case ALTIVEC_BUILTIN_VPERM_2DF:
17367 case VSX_BUILTIN_VPERM_16QI:
17368 case VSX_BUILTIN_VPERM_8HI:
17369 case VSX_BUILTIN_VPERM_4SI:
17370 case VSX_BUILTIN_VPERM_4SF:
17371 case VSX_BUILTIN_VPERM_2DI:
17372 case VSX_BUILTIN_VPERM_2DF:
17373 h.uns_p[3] = 1;
17374 break;
17375
17376 /* unsigned args, signed return. */
17377 case VSX_BUILTIN_XVCVUXDSP:
17378 case VSX_BUILTIN_XVCVUXDDP_UNS:
17379 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17380 h.uns_p[1] = 1;
17381 break;
17382
17383 /* signed args, unsigned return. */
17384 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17385 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17386 case MISC_BUILTIN_UNPACK_TD:
17387 case MISC_BUILTIN_UNPACK_V1TI:
17388 h.uns_p[0] = 1;
17389 break;
17390
17391 /* unsigned arguments, bool return (compares). */
17392 case ALTIVEC_BUILTIN_VCMPEQUB:
17393 case ALTIVEC_BUILTIN_VCMPEQUH:
17394 case ALTIVEC_BUILTIN_VCMPEQUW:
17395 case P8V_BUILTIN_VCMPEQUD:
17396 case VSX_BUILTIN_CMPGE_U16QI:
17397 case VSX_BUILTIN_CMPGE_U8HI:
17398 case VSX_BUILTIN_CMPGE_U4SI:
17399 case VSX_BUILTIN_CMPGE_U2DI:
17400 case ALTIVEC_BUILTIN_VCMPGTUB:
17401 case ALTIVEC_BUILTIN_VCMPGTUH:
17402 case ALTIVEC_BUILTIN_VCMPGTUW:
17403 case P8V_BUILTIN_VCMPGTUD:
17404 h.uns_p[1] = 1;
17405 h.uns_p[2] = 1;
17406 break;
17407
17408 /* unsigned arguments for 128-bit pack instructions. */
17409 case MISC_BUILTIN_PACK_TD:
17410 case MISC_BUILTIN_PACK_V1TI:
17411 h.uns_p[1] = 1;
17412 h.uns_p[2] = 1;
17413 break;
17414
17415 /* unsigned second arguments (vector shift right). */
17416 case ALTIVEC_BUILTIN_VSRB:
17417 case ALTIVEC_BUILTIN_VSRH:
17418 case ALTIVEC_BUILTIN_VSRW:
17419 case P8V_BUILTIN_VSRD:
17420 h.uns_p[2] = 1;
17421 break;
17422
17423 default:
17424 break;
17425 }
17426
17427 /* Figure out how many args are present. */
17428 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17429 num_args--;
17430
17431 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17432 if (!ret_type && h.uns_p[0])
17433 ret_type = builtin_mode_to_type[h.mode[0]][0];
17434
17435 if (!ret_type)
17436 fatal_error (input_location,
17437 "internal error: builtin function %qs had an unexpected "
17438 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17439
17440 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17441 arg_type[i] = NULL_TREE;
17442
17443 for (i = 0; i < num_args; i++)
17444 {
17445 int m = (int) h.mode[i+1];
17446 int uns_p = h.uns_p[i+1];
17447
17448 arg_type[i] = builtin_mode_to_type[m][uns_p];
17449 if (!arg_type[i] && uns_p)
17450 arg_type[i] = builtin_mode_to_type[m][0];
17451
17452 if (!arg_type[i])
17453 fatal_error (input_location,
17454 "internal error: builtin function %qs, argument %d "
17455 "had unexpected argument type %qs", name, i,
17456 GET_MODE_NAME (m));
17457 }
17458
17459 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17460 if (*found == NULL)
17461 {
17462 h2 = ggc_alloc<builtin_hash_struct> ();
17463 *h2 = h;
17464 *found = h2;
17465
17466 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17467 arg_type[2], NULL_TREE);
17468 }
17469
17470 return (*found)->type;
17471 }
17472
17473 static void
17474 rs6000_common_init_builtins (void)
17475 {
17476 const struct builtin_description *d;
17477 size_t i;
17478
17479 tree opaque_ftype_opaque = NULL_TREE;
17480 tree opaque_ftype_opaque_opaque = NULL_TREE;
17481 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17482 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17483
17484 /* Create Altivec and VSX builtins on machines with at least the
17485 general purpose extensions (970 and newer) to allow the use of
17486 the target attribute. */
17487
17488 if (TARGET_EXTRA_BUILTINS)
17489 builtin_mask |= RS6000_BTM_COMMON;
17490
17491 /* Add the ternary operators. */
17492 d = bdesc_3arg;
17493 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17494 {
17495 tree type;
17496 HOST_WIDE_INT mask = d->mask;
17497
17498 if ((mask & builtin_mask) != mask)
17499 {
17500 if (TARGET_DEBUG_BUILTIN)
17501 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17502 continue;
17503 }
17504
17505 if (rs6000_overloaded_builtin_p (d->code))
17506 {
17507 if (! (type = opaque_ftype_opaque_opaque_opaque))
17508 type = opaque_ftype_opaque_opaque_opaque
17509 = build_function_type_list (opaque_V4SI_type_node,
17510 opaque_V4SI_type_node,
17511 opaque_V4SI_type_node,
17512 opaque_V4SI_type_node,
17513 NULL_TREE);
17514 }
17515 else
17516 {
17517 enum insn_code icode = d->icode;
17518 if (d->name == 0)
17519 {
17520 if (TARGET_DEBUG_BUILTIN)
17521 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17522 (long unsigned)i);
17523
17524 continue;
17525 }
17526
17527 if (icode == CODE_FOR_nothing)
17528 {
17529 if (TARGET_DEBUG_BUILTIN)
17530 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17531 d->name);
17532
17533 continue;
17534 }
17535
17536 type = builtin_function_type (insn_data[icode].operand[0].mode,
17537 insn_data[icode].operand[1].mode,
17538 insn_data[icode].operand[2].mode,
17539 insn_data[icode].operand[3].mode,
17540 d->code, d->name);
17541 }
17542
17543 def_builtin (d->name, type, d->code);
17544 }
17545
17546 /* Add the binary operators. */
17547 d = bdesc_2arg;
17548 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17549 {
17550 machine_mode mode0, mode1, mode2;
17551 tree type;
17552 HOST_WIDE_INT mask = d->mask;
17553
17554 if ((mask & builtin_mask) != mask)
17555 {
17556 if (TARGET_DEBUG_BUILTIN)
17557 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17558 continue;
17559 }
17560
17561 if (rs6000_overloaded_builtin_p (d->code))
17562 {
17563 if (! (type = opaque_ftype_opaque_opaque))
17564 type = opaque_ftype_opaque_opaque
17565 = build_function_type_list (opaque_V4SI_type_node,
17566 opaque_V4SI_type_node,
17567 opaque_V4SI_type_node,
17568 NULL_TREE);
17569 }
17570 else
17571 {
17572 enum insn_code icode = d->icode;
17573 if (d->name == 0)
17574 {
17575 if (TARGET_DEBUG_BUILTIN)
17576 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17577 (long unsigned)i);
17578
17579 continue;
17580 }
17581
17582 if (icode == CODE_FOR_nothing)
17583 {
17584 if (TARGET_DEBUG_BUILTIN)
17585 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17586 d->name);
17587
17588 continue;
17589 }
17590
17591 mode0 = insn_data[icode].operand[0].mode;
17592 mode1 = insn_data[icode].operand[1].mode;
17593 mode2 = insn_data[icode].operand[2].mode;
17594
17595 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17596 d->code, d->name);
17597 }
17598
17599 def_builtin (d->name, type, d->code);
17600 }
17601
17602 /* Add the simple unary operators. */
17603 d = bdesc_1arg;
17604 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17605 {
17606 machine_mode mode0, mode1;
17607 tree type;
17608 HOST_WIDE_INT mask = d->mask;
17609
17610 if ((mask & builtin_mask) != mask)
17611 {
17612 if (TARGET_DEBUG_BUILTIN)
17613 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17614 continue;
17615 }
17616
17617 if (rs6000_overloaded_builtin_p (d->code))
17618 {
17619 if (! (type = opaque_ftype_opaque))
17620 type = opaque_ftype_opaque
17621 = build_function_type_list (opaque_V4SI_type_node,
17622 opaque_V4SI_type_node,
17623 NULL_TREE);
17624 }
17625 else
17626 {
17627 enum insn_code icode = d->icode;
17628 if (d->name == 0)
17629 {
17630 if (TARGET_DEBUG_BUILTIN)
17631 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17632 (long unsigned)i);
17633
17634 continue;
17635 }
17636
17637 if (icode == CODE_FOR_nothing)
17638 {
17639 if (TARGET_DEBUG_BUILTIN)
17640 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17641 d->name);
17642
17643 continue;
17644 }
17645
17646 mode0 = insn_data[icode].operand[0].mode;
17647 mode1 = insn_data[icode].operand[1].mode;
17648
17649 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17650 d->code, d->name);
17651 }
17652
17653 def_builtin (d->name, type, d->code);
17654 }
17655
17656 /* Add the simple no-argument operators. */
17657 d = bdesc_0arg;
17658 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17659 {
17660 machine_mode mode0;
17661 tree type;
17662 HOST_WIDE_INT mask = d->mask;
17663
17664 if ((mask & builtin_mask) != mask)
17665 {
17666 if (TARGET_DEBUG_BUILTIN)
17667 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17668 continue;
17669 }
17670 if (rs6000_overloaded_builtin_p (d->code))
17671 {
17672 if (!opaque_ftype_opaque)
17673 opaque_ftype_opaque
17674 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17675 type = opaque_ftype_opaque;
17676 }
17677 else
17678 {
17679 enum insn_code icode = d->icode;
17680 if (d->name == 0)
17681 {
17682 if (TARGET_DEBUG_BUILTIN)
17683 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17684 (long unsigned) i);
17685 continue;
17686 }
17687 if (icode == CODE_FOR_nothing)
17688 {
17689 if (TARGET_DEBUG_BUILTIN)
17690 fprintf (stderr,
17691 "rs6000_builtin, skip no-argument %s (no code)\n",
17692 d->name);
17693 continue;
17694 }
17695 mode0 = insn_data[icode].operand[0].mode;
17696 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17697 d->code, d->name);
17698 }
17699 def_builtin (d->name, type, d->code);
17700 }
17701 }
17702
17703 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17704 static void
17705 init_float128_ibm (machine_mode mode)
17706 {
17707 if (!TARGET_XL_COMPAT)
17708 {
17709 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17710 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17711 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17712 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17713
17714 if (!TARGET_HARD_FLOAT)
17715 {
17716 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17717 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17718 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17719 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17720 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17721 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17722 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17723 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17724
17725 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17726 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17727 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17728 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17729 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17730 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17731 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17732 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17733 }
17734 }
17735 else
17736 {
17737 set_optab_libfunc (add_optab, mode, "_xlqadd");
17738 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17739 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17740 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17741 }
17742
17743 /* Add various conversions for IFmode to use the traditional TFmode
17744 names. */
17745 if (mode == IFmode)
17746 {
17747 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
17748 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
17749 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
17750 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
17751 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
17752 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
17753
17754 if (TARGET_POWERPC64)
17755 {
17756 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17757 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17758 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17759 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17760 }
17761 }
17762 }
17763
17764 /* Create a decl for either complex long double multiply or complex long double
17765 divide when long double is IEEE 128-bit floating point. We can't use
17766 __multc3 and __divtc3 because the original long double using IBM extended
17767 double used those names. The complex multiply/divide functions are encoded
17768 as builtin functions with a complex result and 4 scalar inputs. */
17769
17770 static void
17771 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
17772 {
17773 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
17774 name, NULL_TREE);
17775
17776 set_builtin_decl (fncode, fndecl, true);
17777
17778 if (TARGET_DEBUG_BUILTIN)
17779 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
17780
17781 return;
17782 }
17783
17784 /* Set up IEEE 128-bit floating point routines. Use different names if the
17785 arguments can be passed in a vector register. The historical PowerPC
17786 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17787 continue to use that if we aren't using vector registers to pass IEEE
17788 128-bit floating point. */
17789
17790 static void
17791 init_float128_ieee (machine_mode mode)
17792 {
17793 if (FLOAT128_VECTOR_P (mode))
17794 {
17795 static bool complex_muldiv_init_p = false;
17796
17797 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
17798 we have clone or target attributes, this will be called a second
17799 time. We want to create the built-in function only once. */
17800 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
17801 {
17802 complex_muldiv_init_p = true;
17803 built_in_function fncode_mul =
17804 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
17805 - MIN_MODE_COMPLEX_FLOAT);
17806 built_in_function fncode_div =
17807 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
17808 - MIN_MODE_COMPLEX_FLOAT);
17809
17810 tree fntype = build_function_type_list (complex_long_double_type_node,
17811 long_double_type_node,
17812 long_double_type_node,
17813 long_double_type_node,
17814 long_double_type_node,
17815 NULL_TREE);
17816
17817 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
17818 create_complex_muldiv ("__divkc3", fncode_div, fntype);
17819 }
17820
17821 set_optab_libfunc (add_optab, mode, "__addkf3");
17822 set_optab_libfunc (sub_optab, mode, "__subkf3");
17823 set_optab_libfunc (neg_optab, mode, "__negkf2");
17824 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17825 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17826 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17827 set_optab_libfunc (abs_optab, mode, "__abskf2");
17828 set_optab_libfunc (powi_optab, mode, "__powikf2");
17829
17830 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17831 set_optab_libfunc (ne_optab, mode, "__nekf2");
17832 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17833 set_optab_libfunc (ge_optab, mode, "__gekf2");
17834 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17835 set_optab_libfunc (le_optab, mode, "__lekf2");
17836 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17837
17838 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17839 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17840 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17841 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17842
17843 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
17844 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17845 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
17846
17847 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
17848 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17849 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
17850
17851 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
17852 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
17853 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
17854 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
17855 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
17856 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
17857
17858 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17859 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17860 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17861 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17862
17863 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17864 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17865 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17866 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17867
17868 if (TARGET_POWERPC64)
17869 {
17870 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17871 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17872 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17873 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17874 }
17875 }
17876
17877 else
17878 {
17879 set_optab_libfunc (add_optab, mode, "_q_add");
17880 set_optab_libfunc (sub_optab, mode, "_q_sub");
17881 set_optab_libfunc (neg_optab, mode, "_q_neg");
17882 set_optab_libfunc (smul_optab, mode, "_q_mul");
17883 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17884 if (TARGET_PPC_GPOPT)
17885 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17886
17887 set_optab_libfunc (eq_optab, mode, "_q_feq");
17888 set_optab_libfunc (ne_optab, mode, "_q_fne");
17889 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17890 set_optab_libfunc (ge_optab, mode, "_q_fge");
17891 set_optab_libfunc (lt_optab, mode, "_q_flt");
17892 set_optab_libfunc (le_optab, mode, "_q_fle");
17893
17894 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17895 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17896 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17897 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17898 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17899 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17900 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17901 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17902 }
17903 }
17904
17905 static void
17906 rs6000_init_libfuncs (void)
17907 {
17908 /* __float128 support. */
17909 if (TARGET_FLOAT128_TYPE)
17910 {
17911 init_float128_ibm (IFmode);
17912 init_float128_ieee (KFmode);
17913 }
17914
17915 /* AIX/Darwin/64-bit Linux quad floating point routines. */
17916 if (TARGET_LONG_DOUBLE_128)
17917 {
17918 if (!TARGET_IEEEQUAD)
17919 init_float128_ibm (TFmode);
17920
17921 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
17922 else
17923 init_float128_ieee (TFmode);
17924 }
17925 }
17926
17927 /* Emit a potentially record-form instruction, setting DST from SRC.
17928 If DOT is 0, that is all; otherwise, set CCREG to the result of the
17929 signed comparison of DST with zero. If DOT is 1, the generated RTL
17930 doesn't care about the DST result; if DOT is 2, it does. If CCREG
17931 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
17932 a separate COMPARE. */
17933
17934 void
17935 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
17936 {
17937 if (dot == 0)
17938 {
17939 emit_move_insn (dst, src);
17940 return;
17941 }
17942
17943 if (cc_reg_not_cr0_operand (ccreg, CCmode))
17944 {
17945 emit_move_insn (dst, src);
17946 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
17947 return;
17948 }
17949
17950 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
17951 if (dot == 1)
17952 {
17953 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
17954 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
17955 }
17956 else
17957 {
17958 rtx set = gen_rtx_SET (dst, src);
17959 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
17960 }
17961 }
17962
17963 \f
17964 /* A validation routine: say whether CODE, a condition code, and MODE
17965 match. The other alternatives either don't make sense or should
17966 never be generated. */
17967
17968 void
17969 validate_condition_mode (enum rtx_code code, machine_mode mode)
17970 {
17971 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
17972 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
17973 && GET_MODE_CLASS (mode) == MODE_CC);
17974
17975 /* These don't make sense. */
17976 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
17977 || mode != CCUNSmode);
17978
17979 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
17980 || mode == CCUNSmode);
17981
17982 gcc_assert (mode == CCFPmode
17983 || (code != ORDERED && code != UNORDERED
17984 && code != UNEQ && code != LTGT
17985 && code != UNGT && code != UNLT
17986 && code != UNGE && code != UNLE));
17987
17988 /* These should never be generated except for
17989 flag_finite_math_only. */
17990 gcc_assert (mode != CCFPmode
17991 || flag_finite_math_only
17992 || (code != LE && code != GE
17993 && code != UNEQ && code != LTGT
17994 && code != UNGT && code != UNLT));
17995
17996 /* These are invalid; the information is not there. */
17997 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
17998 }
17999
18000 \f
18001 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18002 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18003 not zero, store there the bit offset (counted from the right) where
18004 the single stretch of 1 bits begins; and similarly for B, the bit
18005 offset where it ends. */
18006
18007 bool
18008 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18009 {
18010 unsigned HOST_WIDE_INT val = INTVAL (mask);
18011 unsigned HOST_WIDE_INT bit;
18012 int nb, ne;
18013 int n = GET_MODE_PRECISION (mode);
18014
18015 if (mode != DImode && mode != SImode)
18016 return false;
18017
18018 if (INTVAL (mask) >= 0)
18019 {
18020 bit = val & -val;
18021 ne = exact_log2 (bit);
18022 nb = exact_log2 (val + bit);
18023 }
18024 else if (val + 1 == 0)
18025 {
18026 nb = n;
18027 ne = 0;
18028 }
18029 else if (val & 1)
18030 {
18031 val = ~val;
18032 bit = val & -val;
18033 nb = exact_log2 (bit);
18034 ne = exact_log2 (val + bit);
18035 }
18036 else
18037 {
18038 bit = val & -val;
18039 ne = exact_log2 (bit);
18040 if (val + bit == 0)
18041 nb = n;
18042 else
18043 nb = 0;
18044 }
18045
18046 nb--;
18047
18048 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18049 return false;
18050
18051 if (b)
18052 *b = nb;
18053 if (e)
18054 *e = ne;
18055
18056 return true;
18057 }
18058
18059 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18060 or rldicr instruction, to implement an AND with it in mode MODE. */
18061
18062 bool
18063 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18064 {
18065 int nb, ne;
18066
18067 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18068 return false;
18069
18070 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18071 does not wrap. */
18072 if (mode == DImode)
18073 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18074
18075 /* For SImode, rlwinm can do everything. */
18076 if (mode == SImode)
18077 return (nb < 32 && ne < 32);
18078
18079 return false;
18080 }
18081
18082 /* Return the instruction template for an AND with mask in mode MODE, with
18083 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18084
18085 const char *
18086 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18087 {
18088 int nb, ne;
18089
18090 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18091 gcc_unreachable ();
18092
18093 if (mode == DImode && ne == 0)
18094 {
18095 operands[3] = GEN_INT (63 - nb);
18096 if (dot)
18097 return "rldicl. %0,%1,0,%3";
18098 return "rldicl %0,%1,0,%3";
18099 }
18100
18101 if (mode == DImode && nb == 63)
18102 {
18103 operands[3] = GEN_INT (63 - ne);
18104 if (dot)
18105 return "rldicr. %0,%1,0,%3";
18106 return "rldicr %0,%1,0,%3";
18107 }
18108
18109 if (nb < 32 && ne < 32)
18110 {
18111 operands[3] = GEN_INT (31 - nb);
18112 operands[4] = GEN_INT (31 - ne);
18113 if (dot)
18114 return "rlwinm. %0,%1,0,%3,%4";
18115 return "rlwinm %0,%1,0,%3,%4";
18116 }
18117
18118 gcc_unreachable ();
18119 }
18120
18121 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18122 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18123 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18124
18125 bool
18126 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18127 {
18128 int nb, ne;
18129
18130 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18131 return false;
18132
18133 int n = GET_MODE_PRECISION (mode);
18134 int sh = -1;
18135
18136 if (CONST_INT_P (XEXP (shift, 1)))
18137 {
18138 sh = INTVAL (XEXP (shift, 1));
18139 if (sh < 0 || sh >= n)
18140 return false;
18141 }
18142
18143 rtx_code code = GET_CODE (shift);
18144
18145 /* Convert any shift by 0 to a rotate, to simplify below code. */
18146 if (sh == 0)
18147 code = ROTATE;
18148
18149 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18150 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18151 code = ASHIFT;
18152 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18153 {
18154 code = LSHIFTRT;
18155 sh = n - sh;
18156 }
18157
18158 /* DImode rotates need rld*. */
18159 if (mode == DImode && code == ROTATE)
18160 return (nb == 63 || ne == 0 || ne == sh);
18161
18162 /* SImode rotates need rlw*. */
18163 if (mode == SImode && code == ROTATE)
18164 return (nb < 32 && ne < 32 && sh < 32);
18165
18166 /* Wrap-around masks are only okay for rotates. */
18167 if (ne > nb)
18168 return false;
18169
18170 /* Variable shifts are only okay for rotates. */
18171 if (sh < 0)
18172 return false;
18173
18174 /* Don't allow ASHIFT if the mask is wrong for that. */
18175 if (code == ASHIFT && ne < sh)
18176 return false;
18177
18178 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18179 if the mask is wrong for that. */
18180 if (nb < 32 && ne < 32 && sh < 32
18181 && !(code == LSHIFTRT && nb >= 32 - sh))
18182 return true;
18183
18184 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18185 if the mask is wrong for that. */
18186 if (code == LSHIFTRT)
18187 sh = 64 - sh;
18188 if (nb == 63 || ne == 0 || ne == sh)
18189 return !(code == LSHIFTRT && nb >= sh);
18190
18191 return false;
18192 }
18193
18194 /* Return the instruction template for a shift with mask in mode MODE, with
18195 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18196
18197 const char *
18198 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18199 {
18200 int nb, ne;
18201
18202 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18203 gcc_unreachable ();
18204
18205 if (mode == DImode && ne == 0)
18206 {
18207 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18208 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18209 operands[3] = GEN_INT (63 - nb);
18210 if (dot)
18211 return "rld%I2cl. %0,%1,%2,%3";
18212 return "rld%I2cl %0,%1,%2,%3";
18213 }
18214
18215 if (mode == DImode && nb == 63)
18216 {
18217 operands[3] = GEN_INT (63 - ne);
18218 if (dot)
18219 return "rld%I2cr. %0,%1,%2,%3";
18220 return "rld%I2cr %0,%1,%2,%3";
18221 }
18222
18223 if (mode == DImode
18224 && GET_CODE (operands[4]) != LSHIFTRT
18225 && CONST_INT_P (operands[2])
18226 && ne == INTVAL (operands[2]))
18227 {
18228 operands[3] = GEN_INT (63 - nb);
18229 if (dot)
18230 return "rld%I2c. %0,%1,%2,%3";
18231 return "rld%I2c %0,%1,%2,%3";
18232 }
18233
18234 if (nb < 32 && ne < 32)
18235 {
18236 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18237 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18238 operands[3] = GEN_INT (31 - nb);
18239 operands[4] = GEN_INT (31 - ne);
18240 /* This insn can also be a 64-bit rotate with mask that really makes
18241 it just a shift right (with mask); the %h below are to adjust for
18242 that situation (shift count is >= 32 in that case). */
18243 if (dot)
18244 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18245 return "rlw%I2nm %0,%1,%h2,%3,%4";
18246 }
18247
18248 gcc_unreachable ();
18249 }
18250
18251 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18252 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18253 ASHIFT, or LSHIFTRT) in mode MODE. */
18254
18255 bool
18256 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18257 {
18258 int nb, ne;
18259
18260 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18261 return false;
18262
18263 int n = GET_MODE_PRECISION (mode);
18264
18265 int sh = INTVAL (XEXP (shift, 1));
18266 if (sh < 0 || sh >= n)
18267 return false;
18268
18269 rtx_code code = GET_CODE (shift);
18270
18271 /* Convert any shift by 0 to a rotate, to simplify below code. */
18272 if (sh == 0)
18273 code = ROTATE;
18274
18275 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18276 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18277 code = ASHIFT;
18278 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18279 {
18280 code = LSHIFTRT;
18281 sh = n - sh;
18282 }
18283
18284 /* DImode rotates need rldimi. */
18285 if (mode == DImode && code == ROTATE)
18286 return (ne == sh);
18287
18288 /* SImode rotates need rlwimi. */
18289 if (mode == SImode && code == ROTATE)
18290 return (nb < 32 && ne < 32 && sh < 32);
18291
18292 /* Wrap-around masks are only okay for rotates. */
18293 if (ne > nb)
18294 return false;
18295
18296 /* Don't allow ASHIFT if the mask is wrong for that. */
18297 if (code == ASHIFT && ne < sh)
18298 return false;
18299
18300 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18301 if the mask is wrong for that. */
18302 if (nb < 32 && ne < 32 && sh < 32
18303 && !(code == LSHIFTRT && nb >= 32 - sh))
18304 return true;
18305
18306 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18307 if the mask is wrong for that. */
18308 if (code == LSHIFTRT)
18309 sh = 64 - sh;
18310 if (ne == sh)
18311 return !(code == LSHIFTRT && nb >= sh);
18312
18313 return false;
18314 }
18315
18316 /* Return the instruction template for an insert with mask in mode MODE, with
18317 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18318
18319 const char *
18320 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18321 {
18322 int nb, ne;
18323
18324 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18325 gcc_unreachable ();
18326
18327 /* Prefer rldimi because rlwimi is cracked. */
18328 if (TARGET_POWERPC64
18329 && (!dot || mode == DImode)
18330 && GET_CODE (operands[4]) != LSHIFTRT
18331 && ne == INTVAL (operands[2]))
18332 {
18333 operands[3] = GEN_INT (63 - nb);
18334 if (dot)
18335 return "rldimi. %0,%1,%2,%3";
18336 return "rldimi %0,%1,%2,%3";
18337 }
18338
18339 if (nb < 32 && ne < 32)
18340 {
18341 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18342 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18343 operands[3] = GEN_INT (31 - nb);
18344 operands[4] = GEN_INT (31 - ne);
18345 if (dot)
18346 return "rlwimi. %0,%1,%2,%3,%4";
18347 return "rlwimi %0,%1,%2,%3,%4";
18348 }
18349
18350 gcc_unreachable ();
18351 }
18352
18353 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18354 using two machine instructions. */
18355
18356 bool
18357 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18358 {
18359 /* There are two kinds of AND we can handle with two insns:
18360 1) those we can do with two rl* insn;
18361 2) ori[s];xori[s].
18362
18363 We do not handle that last case yet. */
18364
18365 /* If there is just one stretch of ones, we can do it. */
18366 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18367 return true;
18368
18369 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18370 one insn, we can do the whole thing with two. */
18371 unsigned HOST_WIDE_INT val = INTVAL (c);
18372 unsigned HOST_WIDE_INT bit1 = val & -val;
18373 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18374 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18375 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18376 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18377 }
18378
18379 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18380 If EXPAND is true, split rotate-and-mask instructions we generate to
18381 their constituent parts as well (this is used during expand); if DOT
18382 is 1, make the last insn a record-form instruction clobbering the
18383 destination GPR and setting the CC reg (from operands[3]); if 2, set
18384 that GPR as well as the CC reg. */
18385
18386 void
18387 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18388 {
18389 gcc_assert (!(expand && dot));
18390
18391 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18392
18393 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18394 shift right. This generates better code than doing the masks without
18395 shifts, or shifting first right and then left. */
18396 int nb, ne;
18397 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18398 {
18399 gcc_assert (mode == DImode);
18400
18401 int shift = 63 - nb;
18402 if (expand)
18403 {
18404 rtx tmp1 = gen_reg_rtx (DImode);
18405 rtx tmp2 = gen_reg_rtx (DImode);
18406 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18407 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18408 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18409 }
18410 else
18411 {
18412 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18413 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18414 emit_move_insn (operands[0], tmp);
18415 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18416 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18417 }
18418 return;
18419 }
18420
18421 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18422 that does the rest. */
18423 unsigned HOST_WIDE_INT bit1 = val & -val;
18424 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18425 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18426 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18427
18428 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18429 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18430
18431 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18432
18433 /* Two "no-rotate"-and-mask instructions, for SImode. */
18434 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18435 {
18436 gcc_assert (mode == SImode);
18437
18438 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18439 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18440 emit_move_insn (reg, tmp);
18441 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18442 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18443 return;
18444 }
18445
18446 gcc_assert (mode == DImode);
18447
18448 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18449 insns; we have to do the first in SImode, because it wraps. */
18450 if (mask2 <= 0xffffffff
18451 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18452 {
18453 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18454 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18455 GEN_INT (mask1));
18456 rtx reg_low = gen_lowpart (SImode, reg);
18457 emit_move_insn (reg_low, tmp);
18458 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18459 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18460 return;
18461 }
18462
18463 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18464 at the top end), rotate back and clear the other hole. */
18465 int right = exact_log2 (bit3);
18466 int left = 64 - right;
18467
18468 /* Rotate the mask too. */
18469 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18470
18471 if (expand)
18472 {
18473 rtx tmp1 = gen_reg_rtx (DImode);
18474 rtx tmp2 = gen_reg_rtx (DImode);
18475 rtx tmp3 = gen_reg_rtx (DImode);
18476 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18477 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18478 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18479 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18480 }
18481 else
18482 {
18483 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18484 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18485 emit_move_insn (operands[0], tmp);
18486 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18487 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18488 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18489 }
18490 }
18491 \f
18492 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18493 for lfq and stfq insns iff the registers are hard registers. */
18494
18495 int
18496 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18497 {
18498 /* We might have been passed a SUBREG. */
18499 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18500 return 0;
18501
18502 /* We might have been passed non floating point registers. */
18503 if (!FP_REGNO_P (REGNO (reg1))
18504 || !FP_REGNO_P (REGNO (reg2)))
18505 return 0;
18506
18507 return (REGNO (reg1) == REGNO (reg2) - 1);
18508 }
18509
18510 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18511 addr1 and addr2 must be in consecutive memory locations
18512 (addr2 == addr1 + 8). */
18513
18514 int
18515 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18516 {
18517 rtx addr1, addr2;
18518 unsigned int reg1, reg2;
18519 int offset1, offset2;
18520
18521 /* The mems cannot be volatile. */
18522 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18523 return 0;
18524
18525 addr1 = XEXP (mem1, 0);
18526 addr2 = XEXP (mem2, 0);
18527
18528 /* Extract an offset (if used) from the first addr. */
18529 if (GET_CODE (addr1) == PLUS)
18530 {
18531 /* If not a REG, return zero. */
18532 if (GET_CODE (XEXP (addr1, 0)) != REG)
18533 return 0;
18534 else
18535 {
18536 reg1 = REGNO (XEXP (addr1, 0));
18537 /* The offset must be constant! */
18538 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18539 return 0;
18540 offset1 = INTVAL (XEXP (addr1, 1));
18541 }
18542 }
18543 else if (GET_CODE (addr1) != REG)
18544 return 0;
18545 else
18546 {
18547 reg1 = REGNO (addr1);
18548 /* This was a simple (mem (reg)) expression. Offset is 0. */
18549 offset1 = 0;
18550 }
18551
18552 /* And now for the second addr. */
18553 if (GET_CODE (addr2) == PLUS)
18554 {
18555 /* If not a REG, return zero. */
18556 if (GET_CODE (XEXP (addr2, 0)) != REG)
18557 return 0;
18558 else
18559 {
18560 reg2 = REGNO (XEXP (addr2, 0));
18561 /* The offset must be constant. */
18562 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18563 return 0;
18564 offset2 = INTVAL (XEXP (addr2, 1));
18565 }
18566 }
18567 else if (GET_CODE (addr2) != REG)
18568 return 0;
18569 else
18570 {
18571 reg2 = REGNO (addr2);
18572 /* This was a simple (mem (reg)) expression. Offset is 0. */
18573 offset2 = 0;
18574 }
18575
18576 /* Both of these must have the same base register. */
18577 if (reg1 != reg2)
18578 return 0;
18579
18580 /* The offset for the second addr must be 8 more than the first addr. */
18581 if (offset2 != offset1 + 8)
18582 return 0;
18583
18584 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18585 instructions. */
18586 return 1;
18587 }
18588 \f
18589 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18590 need to use DDmode, in all other cases we can use the same mode. */
18591 static machine_mode
18592 rs6000_secondary_memory_needed_mode (machine_mode mode)
18593 {
18594 if (lra_in_progress && mode == SDmode)
18595 return DDmode;
18596 return mode;
18597 }
18598
18599 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18600 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18601 only work on the traditional altivec registers, note if an altivec register
18602 was chosen. */
18603
18604 static enum rs6000_reg_type
18605 register_to_reg_type (rtx reg, bool *is_altivec)
18606 {
18607 HOST_WIDE_INT regno;
18608 enum reg_class rclass;
18609
18610 if (GET_CODE (reg) == SUBREG)
18611 reg = SUBREG_REG (reg);
18612
18613 if (!REG_P (reg))
18614 return NO_REG_TYPE;
18615
18616 regno = REGNO (reg);
18617 if (regno >= FIRST_PSEUDO_REGISTER)
18618 {
18619 if (!lra_in_progress && !reload_completed)
18620 return PSEUDO_REG_TYPE;
18621
18622 regno = true_regnum (reg);
18623 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18624 return PSEUDO_REG_TYPE;
18625 }
18626
18627 gcc_assert (regno >= 0);
18628
18629 if (is_altivec && ALTIVEC_REGNO_P (regno))
18630 *is_altivec = true;
18631
18632 rclass = rs6000_regno_regclass[regno];
18633 return reg_class_to_reg_type[(int)rclass];
18634 }
18635
18636 /* Helper function to return the cost of adding a TOC entry address. */
18637
18638 static inline int
18639 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18640 {
18641 int ret;
18642
18643 if (TARGET_CMODEL != CMODEL_SMALL)
18644 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18645
18646 else
18647 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18648
18649 return ret;
18650 }
18651
18652 /* Helper function for rs6000_secondary_reload to determine whether the memory
18653 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18654 needs reloading. Return negative if the memory is not handled by the memory
18655 helper functions and to try a different reload method, 0 if no additional
18656 instructions are need, and positive to give the extra cost for the
18657 memory. */
18658
18659 static int
18660 rs6000_secondary_reload_memory (rtx addr,
18661 enum reg_class rclass,
18662 machine_mode mode)
18663 {
18664 int extra_cost = 0;
18665 rtx reg, and_arg, plus_arg0, plus_arg1;
18666 addr_mask_type addr_mask;
18667 const char *type = NULL;
18668 const char *fail_msg = NULL;
18669
18670 if (GPR_REG_CLASS_P (rclass))
18671 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18672
18673 else if (rclass == FLOAT_REGS)
18674 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18675
18676 else if (rclass == ALTIVEC_REGS)
18677 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18678
18679 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18680 else if (rclass == VSX_REGS)
18681 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18682 & ~RELOAD_REG_AND_M16);
18683
18684 /* If the register allocator hasn't made up its mind yet on the register
18685 class to use, settle on defaults to use. */
18686 else if (rclass == NO_REGS)
18687 {
18688 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18689 & ~RELOAD_REG_AND_M16);
18690
18691 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18692 addr_mask &= ~(RELOAD_REG_INDEXED
18693 | RELOAD_REG_PRE_INCDEC
18694 | RELOAD_REG_PRE_MODIFY);
18695 }
18696
18697 else
18698 addr_mask = 0;
18699
18700 /* If the register isn't valid in this register class, just return now. */
18701 if ((addr_mask & RELOAD_REG_VALID) == 0)
18702 {
18703 if (TARGET_DEBUG_ADDR)
18704 {
18705 fprintf (stderr,
18706 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18707 "not valid in class\n",
18708 GET_MODE_NAME (mode), reg_class_names[rclass]);
18709 debug_rtx (addr);
18710 }
18711
18712 return -1;
18713 }
18714
18715 switch (GET_CODE (addr))
18716 {
18717 /* Does the register class supports auto update forms for this mode? We
18718 don't need a scratch register, since the powerpc only supports
18719 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18720 case PRE_INC:
18721 case PRE_DEC:
18722 reg = XEXP (addr, 0);
18723 if (!base_reg_operand (addr, GET_MODE (reg)))
18724 {
18725 fail_msg = "no base register #1";
18726 extra_cost = -1;
18727 }
18728
18729 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18730 {
18731 extra_cost = 1;
18732 type = "update";
18733 }
18734 break;
18735
18736 case PRE_MODIFY:
18737 reg = XEXP (addr, 0);
18738 plus_arg1 = XEXP (addr, 1);
18739 if (!base_reg_operand (reg, GET_MODE (reg))
18740 || GET_CODE (plus_arg1) != PLUS
18741 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18742 {
18743 fail_msg = "bad PRE_MODIFY";
18744 extra_cost = -1;
18745 }
18746
18747 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18748 {
18749 extra_cost = 1;
18750 type = "update";
18751 }
18752 break;
18753
18754 /* Do we need to simulate AND -16 to clear the bottom address bits used
18755 in VMX load/stores? Only allow the AND for vector sizes. */
18756 case AND:
18757 and_arg = XEXP (addr, 0);
18758 if (GET_MODE_SIZE (mode) != 16
18759 || GET_CODE (XEXP (addr, 1)) != CONST_INT
18760 || INTVAL (XEXP (addr, 1)) != -16)
18761 {
18762 fail_msg = "bad Altivec AND #1";
18763 extra_cost = -1;
18764 }
18765
18766 if (rclass != ALTIVEC_REGS)
18767 {
18768 if (legitimate_indirect_address_p (and_arg, false))
18769 extra_cost = 1;
18770
18771 else if (legitimate_indexed_address_p (and_arg, false))
18772 extra_cost = 2;
18773
18774 else
18775 {
18776 fail_msg = "bad Altivec AND #2";
18777 extra_cost = -1;
18778 }
18779
18780 type = "and";
18781 }
18782 break;
18783
18784 /* If this is an indirect address, make sure it is a base register. */
18785 case REG:
18786 case SUBREG:
18787 if (!legitimate_indirect_address_p (addr, false))
18788 {
18789 extra_cost = 1;
18790 type = "move";
18791 }
18792 break;
18793
18794 /* If this is an indexed address, make sure the register class can handle
18795 indexed addresses for this mode. */
18796 case PLUS:
18797 plus_arg0 = XEXP (addr, 0);
18798 plus_arg1 = XEXP (addr, 1);
18799
18800 /* (plus (plus (reg) (constant)) (constant)) is generated during
18801 push_reload processing, so handle it now. */
18802 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18803 {
18804 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18805 {
18806 extra_cost = 1;
18807 type = "offset";
18808 }
18809 }
18810
18811 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18812 push_reload processing, so handle it now. */
18813 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
18814 {
18815 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
18816 {
18817 extra_cost = 1;
18818 type = "indexed #2";
18819 }
18820 }
18821
18822 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
18823 {
18824 fail_msg = "no base register #2";
18825 extra_cost = -1;
18826 }
18827
18828 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
18829 {
18830 if ((addr_mask & RELOAD_REG_INDEXED) == 0
18831 || !legitimate_indexed_address_p (addr, false))
18832 {
18833 extra_cost = 1;
18834 type = "indexed";
18835 }
18836 }
18837
18838 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
18839 && CONST_INT_P (plus_arg1))
18840 {
18841 if (!quad_address_offset_p (INTVAL (plus_arg1)))
18842 {
18843 extra_cost = 1;
18844 type = "vector d-form offset";
18845 }
18846 }
18847
18848 /* Make sure the register class can handle offset addresses. */
18849 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
18850 {
18851 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18852 {
18853 extra_cost = 1;
18854 type = "offset #2";
18855 }
18856 }
18857
18858 else
18859 {
18860 fail_msg = "bad PLUS";
18861 extra_cost = -1;
18862 }
18863
18864 break;
18865
18866 case LO_SUM:
18867 /* Quad offsets are restricted and can't handle normal addresses. */
18868 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18869 {
18870 extra_cost = -1;
18871 type = "vector d-form lo_sum";
18872 }
18873
18874 else if (!legitimate_lo_sum_address_p (mode, addr, false))
18875 {
18876 fail_msg = "bad LO_SUM";
18877 extra_cost = -1;
18878 }
18879
18880 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18881 {
18882 extra_cost = 1;
18883 type = "lo_sum";
18884 }
18885 break;
18886
18887 /* Static addresses need to create a TOC entry. */
18888 case CONST:
18889 case SYMBOL_REF:
18890 case LABEL_REF:
18891 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18892 {
18893 extra_cost = -1;
18894 type = "vector d-form lo_sum #2";
18895 }
18896
18897 else
18898 {
18899 type = "address";
18900 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
18901 }
18902 break;
18903
18904 /* TOC references look like offsetable memory. */
18905 case UNSPEC:
18906 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
18907 {
18908 fail_msg = "bad UNSPEC";
18909 extra_cost = -1;
18910 }
18911
18912 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18913 {
18914 extra_cost = -1;
18915 type = "vector d-form lo_sum #3";
18916 }
18917
18918 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18919 {
18920 extra_cost = 1;
18921 type = "toc reference";
18922 }
18923 break;
18924
18925 default:
18926 {
18927 fail_msg = "bad address";
18928 extra_cost = -1;
18929 }
18930 }
18931
18932 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
18933 {
18934 if (extra_cost < 0)
18935 fprintf (stderr,
18936 "rs6000_secondary_reload_memory error: mode = %s, "
18937 "class = %s, addr_mask = '%s', %s\n",
18938 GET_MODE_NAME (mode),
18939 reg_class_names[rclass],
18940 rs6000_debug_addr_mask (addr_mask, false),
18941 (fail_msg != NULL) ? fail_msg : "<bad address>");
18942
18943 else
18944 fprintf (stderr,
18945 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18946 "addr_mask = '%s', extra cost = %d, %s\n",
18947 GET_MODE_NAME (mode),
18948 reg_class_names[rclass],
18949 rs6000_debug_addr_mask (addr_mask, false),
18950 extra_cost,
18951 (type) ? type : "<none>");
18952
18953 debug_rtx (addr);
18954 }
18955
18956 return extra_cost;
18957 }
18958
18959 /* Helper function for rs6000_secondary_reload to return true if a move to a
18960 different register classe is really a simple move. */
18961
18962 static bool
18963 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
18964 enum rs6000_reg_type from_type,
18965 machine_mode mode)
18966 {
18967 int size = GET_MODE_SIZE (mode);
18968
18969 /* Add support for various direct moves available. In this function, we only
18970 look at cases where we don't need any extra registers, and one or more
18971 simple move insns are issued. Originally small integers are not allowed
18972 in FPR/VSX registers. Single precision binary floating is not a simple
18973 move because we need to convert to the single precision memory layout.
18974 The 4-byte SDmode can be moved. TDmode values are disallowed since they
18975 need special direct move handling, which we do not support yet. */
18976 if (TARGET_DIRECT_MOVE
18977 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
18978 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
18979 {
18980 if (TARGET_POWERPC64)
18981 {
18982 /* ISA 2.07: MTVSRD or MVFVSRD. */
18983 if (size == 8)
18984 return true;
18985
18986 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
18987 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
18988 return true;
18989 }
18990
18991 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
18992 if (TARGET_P8_VECTOR)
18993 {
18994 if (mode == SImode)
18995 return true;
18996
18997 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
18998 return true;
18999 }
19000
19001 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19002 if (mode == SDmode)
19003 return true;
19004 }
19005
19006 /* Power6+: MFTGPR or MFFGPR. */
19007 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19008 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19009 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19010 return true;
19011
19012 /* Move to/from SPR. */
19013 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19014 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19015 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19016 return true;
19017
19018 return false;
19019 }
19020
19021 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19022 special direct moves that involve allocating an extra register, return the
19023 insn code of the helper function if there is such a function or
19024 CODE_FOR_nothing if not. */
19025
19026 static bool
19027 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19028 enum rs6000_reg_type from_type,
19029 machine_mode mode,
19030 secondary_reload_info *sri,
19031 bool altivec_p)
19032 {
19033 bool ret = false;
19034 enum insn_code icode = CODE_FOR_nothing;
19035 int cost = 0;
19036 int size = GET_MODE_SIZE (mode);
19037
19038 if (TARGET_POWERPC64 && size == 16)
19039 {
19040 /* Handle moving 128-bit values from GPRs to VSX point registers on
19041 ISA 2.07 (power8, power9) when running in 64-bit mode using
19042 XXPERMDI to glue the two 64-bit values back together. */
19043 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19044 {
19045 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19046 icode = reg_addr[mode].reload_vsx_gpr;
19047 }
19048
19049 /* Handle moving 128-bit values from VSX point registers to GPRs on
19050 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19051 bottom 64-bit value. */
19052 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19053 {
19054 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19055 icode = reg_addr[mode].reload_gpr_vsx;
19056 }
19057 }
19058
19059 else if (TARGET_POWERPC64 && mode == SFmode)
19060 {
19061 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19062 {
19063 cost = 3; /* xscvdpspn, mfvsrd, and. */
19064 icode = reg_addr[mode].reload_gpr_vsx;
19065 }
19066
19067 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19068 {
19069 cost = 2; /* mtvsrz, xscvspdpn. */
19070 icode = reg_addr[mode].reload_vsx_gpr;
19071 }
19072 }
19073
19074 else if (!TARGET_POWERPC64 && size == 8)
19075 {
19076 /* Handle moving 64-bit values from GPRs to floating point registers on
19077 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19078 32-bit values back together. Altivec register classes must be handled
19079 specially since a different instruction is used, and the secondary
19080 reload support requires a single instruction class in the scratch
19081 register constraint. However, right now TFmode is not allowed in
19082 Altivec registers, so the pattern will never match. */
19083 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19084 {
19085 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19086 icode = reg_addr[mode].reload_fpr_gpr;
19087 }
19088 }
19089
19090 if (icode != CODE_FOR_nothing)
19091 {
19092 ret = true;
19093 if (sri)
19094 {
19095 sri->icode = icode;
19096 sri->extra_cost = cost;
19097 }
19098 }
19099
19100 return ret;
19101 }
19102
19103 /* Return whether a move between two register classes can be done either
19104 directly (simple move) or via a pattern that uses a single extra temporary
19105 (using ISA 2.07's direct move in this case. */
19106
19107 static bool
19108 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19109 enum rs6000_reg_type from_type,
19110 machine_mode mode,
19111 secondary_reload_info *sri,
19112 bool altivec_p)
19113 {
19114 /* Fall back to load/store reloads if either type is not a register. */
19115 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19116 return false;
19117
19118 /* If we haven't allocated registers yet, assume the move can be done for the
19119 standard register types. */
19120 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19121 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19122 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19123 return true;
19124
19125 /* Moves to the same set of registers is a simple move for non-specialized
19126 registers. */
19127 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19128 return true;
19129
19130 /* Check whether a simple move can be done directly. */
19131 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19132 {
19133 if (sri)
19134 {
19135 sri->icode = CODE_FOR_nothing;
19136 sri->extra_cost = 0;
19137 }
19138 return true;
19139 }
19140
19141 /* Now check if we can do it in a few steps. */
19142 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19143 altivec_p);
19144 }
19145
19146 /* Inform reload about cases where moving X with a mode MODE to a register in
19147 RCLASS requires an extra scratch or immediate register. Return the class
19148 needed for the immediate register.
19149
19150 For VSX and Altivec, we may need a register to convert sp+offset into
19151 reg+sp.
19152
19153 For misaligned 64-bit gpr loads and stores we need a register to
19154 convert an offset address to indirect. */
19155
19156 static reg_class_t
19157 rs6000_secondary_reload (bool in_p,
19158 rtx x,
19159 reg_class_t rclass_i,
19160 machine_mode mode,
19161 secondary_reload_info *sri)
19162 {
19163 enum reg_class rclass = (enum reg_class) rclass_i;
19164 reg_class_t ret = ALL_REGS;
19165 enum insn_code icode;
19166 bool default_p = false;
19167 bool done_p = false;
19168
19169 /* Allow subreg of memory before/during reload. */
19170 bool memory_p = (MEM_P (x)
19171 || (!reload_completed && GET_CODE (x) == SUBREG
19172 && MEM_P (SUBREG_REG (x))));
19173
19174 sri->icode = CODE_FOR_nothing;
19175 sri->t_icode = CODE_FOR_nothing;
19176 sri->extra_cost = 0;
19177 icode = ((in_p)
19178 ? reg_addr[mode].reload_load
19179 : reg_addr[mode].reload_store);
19180
19181 if (REG_P (x) || register_operand (x, mode))
19182 {
19183 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19184 bool altivec_p = (rclass == ALTIVEC_REGS);
19185 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19186
19187 if (!in_p)
19188 std::swap (to_type, from_type);
19189
19190 /* Can we do a direct move of some sort? */
19191 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19192 altivec_p))
19193 {
19194 icode = (enum insn_code)sri->icode;
19195 default_p = false;
19196 done_p = true;
19197 ret = NO_REGS;
19198 }
19199 }
19200
19201 /* Make sure 0.0 is not reloaded or forced into memory. */
19202 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19203 {
19204 ret = NO_REGS;
19205 default_p = false;
19206 done_p = true;
19207 }
19208
19209 /* If this is a scalar floating point value and we want to load it into the
19210 traditional Altivec registers, do it via a move via a traditional floating
19211 point register, unless we have D-form addressing. Also make sure that
19212 non-zero constants use a FPR. */
19213 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19214 && !mode_supports_vmx_dform (mode)
19215 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19216 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19217 {
19218 ret = FLOAT_REGS;
19219 default_p = false;
19220 done_p = true;
19221 }
19222
19223 /* Handle reload of load/stores if we have reload helper functions. */
19224 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19225 {
19226 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19227 mode);
19228
19229 if (extra_cost >= 0)
19230 {
19231 done_p = true;
19232 ret = NO_REGS;
19233 if (extra_cost > 0)
19234 {
19235 sri->extra_cost = extra_cost;
19236 sri->icode = icode;
19237 }
19238 }
19239 }
19240
19241 /* Handle unaligned loads and stores of integer registers. */
19242 if (!done_p && TARGET_POWERPC64
19243 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19244 && memory_p
19245 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19246 {
19247 rtx addr = XEXP (x, 0);
19248 rtx off = address_offset (addr);
19249
19250 if (off != NULL_RTX)
19251 {
19252 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19253 unsigned HOST_WIDE_INT offset = INTVAL (off);
19254
19255 /* We need a secondary reload when our legitimate_address_p
19256 says the address is good (as otherwise the entire address
19257 will be reloaded), and the offset is not a multiple of
19258 four or we have an address wrap. Address wrap will only
19259 occur for LO_SUMs since legitimate_offset_address_p
19260 rejects addresses for 16-byte mems that will wrap. */
19261 if (GET_CODE (addr) == LO_SUM
19262 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19263 && ((offset & 3) != 0
19264 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19265 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19266 && (offset & 3) != 0))
19267 {
19268 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19269 if (in_p)
19270 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19271 : CODE_FOR_reload_di_load);
19272 else
19273 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19274 : CODE_FOR_reload_di_store);
19275 sri->extra_cost = 2;
19276 ret = NO_REGS;
19277 done_p = true;
19278 }
19279 else
19280 default_p = true;
19281 }
19282 else
19283 default_p = true;
19284 }
19285
19286 if (!done_p && !TARGET_POWERPC64
19287 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19288 && memory_p
19289 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19290 {
19291 rtx addr = XEXP (x, 0);
19292 rtx off = address_offset (addr);
19293
19294 if (off != NULL_RTX)
19295 {
19296 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19297 unsigned HOST_WIDE_INT offset = INTVAL (off);
19298
19299 /* We need a secondary reload when our legitimate_address_p
19300 says the address is good (as otherwise the entire address
19301 will be reloaded), and we have a wrap.
19302
19303 legitimate_lo_sum_address_p allows LO_SUM addresses to
19304 have any offset so test for wrap in the low 16 bits.
19305
19306 legitimate_offset_address_p checks for the range
19307 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19308 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19309 [0x7ff4,0x7fff] respectively, so test for the
19310 intersection of these ranges, [0x7ffc,0x7fff] and
19311 [0x7ff4,0x7ff7] respectively.
19312
19313 Note that the address we see here may have been
19314 manipulated by legitimize_reload_address. */
19315 if (GET_CODE (addr) == LO_SUM
19316 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19317 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19318 {
19319 if (in_p)
19320 sri->icode = CODE_FOR_reload_si_load;
19321 else
19322 sri->icode = CODE_FOR_reload_si_store;
19323 sri->extra_cost = 2;
19324 ret = NO_REGS;
19325 done_p = true;
19326 }
19327 else
19328 default_p = true;
19329 }
19330 else
19331 default_p = true;
19332 }
19333
19334 if (!done_p)
19335 default_p = true;
19336
19337 if (default_p)
19338 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19339
19340 gcc_assert (ret != ALL_REGS);
19341
19342 if (TARGET_DEBUG_ADDR)
19343 {
19344 fprintf (stderr,
19345 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19346 "mode = %s",
19347 reg_class_names[ret],
19348 in_p ? "true" : "false",
19349 reg_class_names[rclass],
19350 GET_MODE_NAME (mode));
19351
19352 if (reload_completed)
19353 fputs (", after reload", stderr);
19354
19355 if (!done_p)
19356 fputs (", done_p not set", stderr);
19357
19358 if (default_p)
19359 fputs (", default secondary reload", stderr);
19360
19361 if (sri->icode != CODE_FOR_nothing)
19362 fprintf (stderr, ", reload func = %s, extra cost = %d",
19363 insn_data[sri->icode].name, sri->extra_cost);
19364
19365 else if (sri->extra_cost > 0)
19366 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19367
19368 fputs ("\n", stderr);
19369 debug_rtx (x);
19370 }
19371
19372 return ret;
19373 }
19374
19375 /* Better tracing for rs6000_secondary_reload_inner. */
19376
19377 static void
19378 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19379 bool store_p)
19380 {
19381 rtx set, clobber;
19382
19383 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19384
19385 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19386 store_p ? "store" : "load");
19387
19388 if (store_p)
19389 set = gen_rtx_SET (mem, reg);
19390 else
19391 set = gen_rtx_SET (reg, mem);
19392
19393 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19394 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19395 }
19396
19397 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19398 ATTRIBUTE_NORETURN;
19399
19400 static void
19401 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19402 bool store_p)
19403 {
19404 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19405 gcc_unreachable ();
19406 }
19407
19408 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19409 reload helper functions. These were identified in
19410 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19411 reload, it calls the insns:
19412 reload_<RELOAD:mode>_<P:mptrsize>_store
19413 reload_<RELOAD:mode>_<P:mptrsize>_load
19414
19415 which in turn calls this function, to do whatever is necessary to create
19416 valid addresses. */
19417
19418 void
19419 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19420 {
19421 int regno = true_regnum (reg);
19422 machine_mode mode = GET_MODE (reg);
19423 addr_mask_type addr_mask;
19424 rtx addr;
19425 rtx new_addr;
19426 rtx op_reg, op0, op1;
19427 rtx and_op;
19428 rtx cc_clobber;
19429 rtvec rv;
19430
19431 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19432 || !base_reg_operand (scratch, GET_MODE (scratch)))
19433 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19434
19435 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19436 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19437
19438 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19439 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19440
19441 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19442 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19443
19444 else
19445 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19446
19447 /* Make sure the mode is valid in this register class. */
19448 if ((addr_mask & RELOAD_REG_VALID) == 0)
19449 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19450
19451 if (TARGET_DEBUG_ADDR)
19452 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19453
19454 new_addr = addr = XEXP (mem, 0);
19455 switch (GET_CODE (addr))
19456 {
19457 /* Does the register class support auto update forms for this mode? If
19458 not, do the update now. We don't need a scratch register, since the
19459 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19460 case PRE_INC:
19461 case PRE_DEC:
19462 op_reg = XEXP (addr, 0);
19463 if (!base_reg_operand (op_reg, Pmode))
19464 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19465
19466 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19467 {
19468 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
19469 new_addr = op_reg;
19470 }
19471 break;
19472
19473 case PRE_MODIFY:
19474 op0 = XEXP (addr, 0);
19475 op1 = XEXP (addr, 1);
19476 if (!base_reg_operand (op0, Pmode)
19477 || GET_CODE (op1) != PLUS
19478 || !rtx_equal_p (op0, XEXP (op1, 0)))
19479 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19480
19481 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19482 {
19483 emit_insn (gen_rtx_SET (op0, op1));
19484 new_addr = reg;
19485 }
19486 break;
19487
19488 /* Do we need to simulate AND -16 to clear the bottom address bits used
19489 in VMX load/stores? */
19490 case AND:
19491 op0 = XEXP (addr, 0);
19492 op1 = XEXP (addr, 1);
19493 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19494 {
19495 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19496 op_reg = op0;
19497
19498 else if (GET_CODE (op1) == PLUS)
19499 {
19500 emit_insn (gen_rtx_SET (scratch, op1));
19501 op_reg = scratch;
19502 }
19503
19504 else
19505 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19506
19507 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19508 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19509 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19510 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19511 new_addr = scratch;
19512 }
19513 break;
19514
19515 /* If this is an indirect address, make sure it is a base register. */
19516 case REG:
19517 case SUBREG:
19518 if (!base_reg_operand (addr, GET_MODE (addr)))
19519 {
19520 emit_insn (gen_rtx_SET (scratch, addr));
19521 new_addr = scratch;
19522 }
19523 break;
19524
19525 /* If this is an indexed address, make sure the register class can handle
19526 indexed addresses for this mode. */
19527 case PLUS:
19528 op0 = XEXP (addr, 0);
19529 op1 = XEXP (addr, 1);
19530 if (!base_reg_operand (op0, Pmode))
19531 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19532
19533 else if (int_reg_operand (op1, Pmode))
19534 {
19535 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19536 {
19537 emit_insn (gen_rtx_SET (scratch, addr));
19538 new_addr = scratch;
19539 }
19540 }
19541
19542 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19543 {
19544 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19545 || !quad_address_p (addr, mode, false))
19546 {
19547 emit_insn (gen_rtx_SET (scratch, addr));
19548 new_addr = scratch;
19549 }
19550 }
19551
19552 /* Make sure the register class can handle offset addresses. */
19553 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19554 {
19555 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19556 {
19557 emit_insn (gen_rtx_SET (scratch, addr));
19558 new_addr = scratch;
19559 }
19560 }
19561
19562 else
19563 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19564
19565 break;
19566
19567 case LO_SUM:
19568 op0 = XEXP (addr, 0);
19569 op1 = XEXP (addr, 1);
19570 if (!base_reg_operand (op0, Pmode))
19571 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19572
19573 else if (int_reg_operand (op1, Pmode))
19574 {
19575 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19576 {
19577 emit_insn (gen_rtx_SET (scratch, addr));
19578 new_addr = scratch;
19579 }
19580 }
19581
19582 /* Quad offsets are restricted and can't handle normal addresses. */
19583 else if (mode_supports_dq_form (mode))
19584 {
19585 emit_insn (gen_rtx_SET (scratch, addr));
19586 new_addr = scratch;
19587 }
19588
19589 /* Make sure the register class can handle offset addresses. */
19590 else if (legitimate_lo_sum_address_p (mode, addr, false))
19591 {
19592 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19593 {
19594 emit_insn (gen_rtx_SET (scratch, addr));
19595 new_addr = scratch;
19596 }
19597 }
19598
19599 else
19600 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19601
19602 break;
19603
19604 case SYMBOL_REF:
19605 case CONST:
19606 case LABEL_REF:
19607 rs6000_emit_move (scratch, addr, Pmode);
19608 new_addr = scratch;
19609 break;
19610
19611 default:
19612 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19613 }
19614
19615 /* Adjust the address if it changed. */
19616 if (addr != new_addr)
19617 {
19618 mem = replace_equiv_address_nv (mem, new_addr);
19619 if (TARGET_DEBUG_ADDR)
19620 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19621 }
19622
19623 /* Now create the move. */
19624 if (store_p)
19625 emit_insn (gen_rtx_SET (mem, reg));
19626 else
19627 emit_insn (gen_rtx_SET (reg, mem));
19628
19629 return;
19630 }
19631
19632 /* Convert reloads involving 64-bit gprs and misaligned offset
19633 addressing, or multiple 32-bit gprs and offsets that are too large,
19634 to use indirect addressing. */
19635
19636 void
19637 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19638 {
19639 int regno = true_regnum (reg);
19640 enum reg_class rclass;
19641 rtx addr;
19642 rtx scratch_or_premodify = scratch;
19643
19644 if (TARGET_DEBUG_ADDR)
19645 {
19646 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19647 store_p ? "store" : "load");
19648 fprintf (stderr, "reg:\n");
19649 debug_rtx (reg);
19650 fprintf (stderr, "mem:\n");
19651 debug_rtx (mem);
19652 fprintf (stderr, "scratch:\n");
19653 debug_rtx (scratch);
19654 }
19655
19656 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
19657 gcc_assert (GET_CODE (mem) == MEM);
19658 rclass = REGNO_REG_CLASS (regno);
19659 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19660 addr = XEXP (mem, 0);
19661
19662 if (GET_CODE (addr) == PRE_MODIFY)
19663 {
19664 gcc_assert (REG_P (XEXP (addr, 0))
19665 && GET_CODE (XEXP (addr, 1)) == PLUS
19666 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19667 scratch_or_premodify = XEXP (addr, 0);
19668 if (!HARD_REGISTER_P (scratch_or_premodify))
19669 /* If we have a pseudo here then reload will have arranged
19670 to have it replaced, but only in the original insn.
19671 Use the replacement here too. */
19672 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
19673
19674 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
19675 expressions from the original insn, without unsharing them.
19676 Any RTL that points into the original insn will of course
19677 have register replacements applied. That is why we don't
19678 need to look for replacements under the PLUS. */
19679 addr = XEXP (addr, 1);
19680 }
19681 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19682
19683 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19684
19685 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19686
19687 /* Now create the move. */
19688 if (store_p)
19689 emit_insn (gen_rtx_SET (mem, reg));
19690 else
19691 emit_insn (gen_rtx_SET (reg, mem));
19692
19693 return;
19694 }
19695
19696 /* Given an rtx X being reloaded into a reg required to be
19697 in class CLASS, return the class of reg to actually use.
19698 In general this is just CLASS; but on some machines
19699 in some cases it is preferable to use a more restrictive class.
19700
19701 On the RS/6000, we have to return NO_REGS when we want to reload a
19702 floating-point CONST_DOUBLE to force it to be copied to memory.
19703
19704 We also don't want to reload integer values into floating-point
19705 registers if we can at all help it. In fact, this can
19706 cause reload to die, if it tries to generate a reload of CTR
19707 into a FP register and discovers it doesn't have the memory location
19708 required.
19709
19710 ??? Would it be a good idea to have reload do the converse, that is
19711 try to reload floating modes into FP registers if possible?
19712 */
19713
19714 static enum reg_class
19715 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19716 {
19717 machine_mode mode = GET_MODE (x);
19718 bool is_constant = CONSTANT_P (x);
19719
19720 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19721 reload class for it. */
19722 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19723 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19724 return NO_REGS;
19725
19726 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19727 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19728 return NO_REGS;
19729
19730 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19731 the reloading of address expressions using PLUS into floating point
19732 registers. */
19733 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19734 {
19735 if (is_constant)
19736 {
19737 /* Zero is always allowed in all VSX registers. */
19738 if (x == CONST0_RTX (mode))
19739 return rclass;
19740
19741 /* If this is a vector constant that can be formed with a few Altivec
19742 instructions, we want altivec registers. */
19743 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19744 return ALTIVEC_REGS;
19745
19746 /* If this is an integer constant that can easily be loaded into
19747 vector registers, allow it. */
19748 if (CONST_INT_P (x))
19749 {
19750 HOST_WIDE_INT value = INTVAL (x);
19751
19752 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19753 2.06 can generate it in the Altivec registers with
19754 VSPLTI<x>. */
19755 if (value == -1)
19756 {
19757 if (TARGET_P8_VECTOR)
19758 return rclass;
19759 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19760 return ALTIVEC_REGS;
19761 else
19762 return NO_REGS;
19763 }
19764
19765 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19766 a sign extend in the Altivec registers. */
19767 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
19768 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
19769 return ALTIVEC_REGS;
19770 }
19771
19772 /* Force constant to memory. */
19773 return NO_REGS;
19774 }
19775
19776 /* D-form addressing can easily reload the value. */
19777 if (mode_supports_vmx_dform (mode)
19778 || mode_supports_dq_form (mode))
19779 return rclass;
19780
19781 /* If this is a scalar floating point value and we don't have D-form
19782 addressing, prefer the traditional floating point registers so that we
19783 can use D-form (register+offset) addressing. */
19784 if (rclass == VSX_REGS
19785 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
19786 return FLOAT_REGS;
19787
19788 /* Prefer the Altivec registers if Altivec is handling the vector
19789 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19790 loads. */
19791 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
19792 || mode == V1TImode)
19793 return ALTIVEC_REGS;
19794
19795 return rclass;
19796 }
19797
19798 if (is_constant || GET_CODE (x) == PLUS)
19799 {
19800 if (reg_class_subset_p (GENERAL_REGS, rclass))
19801 return GENERAL_REGS;
19802 if (reg_class_subset_p (BASE_REGS, rclass))
19803 return BASE_REGS;
19804 return NO_REGS;
19805 }
19806
19807 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
19808 return GENERAL_REGS;
19809
19810 return rclass;
19811 }
19812
19813 /* Debug version of rs6000_preferred_reload_class. */
19814 static enum reg_class
19815 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
19816 {
19817 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
19818
19819 fprintf (stderr,
19820 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19821 "mode = %s, x:\n",
19822 reg_class_names[ret], reg_class_names[rclass],
19823 GET_MODE_NAME (GET_MODE (x)));
19824 debug_rtx (x);
19825
19826 return ret;
19827 }
19828
19829 /* If we are copying between FP or AltiVec registers and anything else, we need
19830 a memory location. The exception is when we are targeting ppc64 and the
19831 move to/from fpr to gpr instructions are available. Also, under VSX, you
19832 can copy vector registers from the FP register set to the Altivec register
19833 set and vice versa. */
19834
19835 static bool
19836 rs6000_secondary_memory_needed (machine_mode mode,
19837 reg_class_t from_class,
19838 reg_class_t to_class)
19839 {
19840 enum rs6000_reg_type from_type, to_type;
19841 bool altivec_p = ((from_class == ALTIVEC_REGS)
19842 || (to_class == ALTIVEC_REGS));
19843
19844 /* If a simple/direct move is available, we don't need secondary memory */
19845 from_type = reg_class_to_reg_type[(int)from_class];
19846 to_type = reg_class_to_reg_type[(int)to_class];
19847
19848 if (rs6000_secondary_reload_move (to_type, from_type, mode,
19849 (secondary_reload_info *)0, altivec_p))
19850 return false;
19851
19852 /* If we have a floating point or vector register class, we need to use
19853 memory to transfer the data. */
19854 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
19855 return true;
19856
19857 return false;
19858 }
19859
19860 /* Debug version of rs6000_secondary_memory_needed. */
19861 static bool
19862 rs6000_debug_secondary_memory_needed (machine_mode mode,
19863 reg_class_t from_class,
19864 reg_class_t to_class)
19865 {
19866 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
19867
19868 fprintf (stderr,
19869 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19870 "to_class = %s, mode = %s\n",
19871 ret ? "true" : "false",
19872 reg_class_names[from_class],
19873 reg_class_names[to_class],
19874 GET_MODE_NAME (mode));
19875
19876 return ret;
19877 }
19878
19879 /* Return the register class of a scratch register needed to copy IN into
19880 or out of a register in RCLASS in MODE. If it can be done directly,
19881 NO_REGS is returned. */
19882
19883 static enum reg_class
19884 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
19885 rtx in)
19886 {
19887 int regno;
19888
19889 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
19890 #if TARGET_MACHO
19891 && MACHOPIC_INDIRECT
19892 #endif
19893 ))
19894 {
19895 /* We cannot copy a symbolic operand directly into anything
19896 other than BASE_REGS for TARGET_ELF. So indicate that a
19897 register from BASE_REGS is needed as an intermediate
19898 register.
19899
19900 On Darwin, pic addresses require a load from memory, which
19901 needs a base register. */
19902 if (rclass != BASE_REGS
19903 && (GET_CODE (in) == SYMBOL_REF
19904 || GET_CODE (in) == HIGH
19905 || GET_CODE (in) == LABEL_REF
19906 || GET_CODE (in) == CONST))
19907 return BASE_REGS;
19908 }
19909
19910 if (GET_CODE (in) == REG)
19911 {
19912 regno = REGNO (in);
19913 if (regno >= FIRST_PSEUDO_REGISTER)
19914 {
19915 regno = true_regnum (in);
19916 if (regno >= FIRST_PSEUDO_REGISTER)
19917 regno = -1;
19918 }
19919 }
19920 else if (GET_CODE (in) == SUBREG)
19921 {
19922 regno = true_regnum (in);
19923 if (regno >= FIRST_PSEUDO_REGISTER)
19924 regno = -1;
19925 }
19926 else
19927 regno = -1;
19928
19929 /* If we have VSX register moves, prefer moving scalar values between
19930 Altivec registers and GPR by going via an FPR (and then via memory)
19931 instead of reloading the secondary memory address for Altivec moves. */
19932 if (TARGET_VSX
19933 && GET_MODE_SIZE (mode) < 16
19934 && !mode_supports_vmx_dform (mode)
19935 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
19936 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
19937 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19938 && (regno >= 0 && INT_REGNO_P (regno)))))
19939 return FLOAT_REGS;
19940
19941 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
19942 into anything. */
19943 if (rclass == GENERAL_REGS || rclass == BASE_REGS
19944 || (regno >= 0 && INT_REGNO_P (regno)))
19945 return NO_REGS;
19946
19947 /* Constants, memory, and VSX registers can go into VSX registers (both the
19948 traditional floating point and the altivec registers). */
19949 if (rclass == VSX_REGS
19950 && (regno == -1 || VSX_REGNO_P (regno)))
19951 return NO_REGS;
19952
19953 /* Constants, memory, and FP registers can go into FP registers. */
19954 if ((regno == -1 || FP_REGNO_P (regno))
19955 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
19956 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
19957
19958 /* Memory, and AltiVec registers can go into AltiVec registers. */
19959 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
19960 && rclass == ALTIVEC_REGS)
19961 return NO_REGS;
19962
19963 /* We can copy among the CR registers. */
19964 if ((rclass == CR_REGS || rclass == CR0_REGS)
19965 && regno >= 0 && CR_REGNO_P (regno))
19966 return NO_REGS;
19967
19968 /* Otherwise, we need GENERAL_REGS. */
19969 return GENERAL_REGS;
19970 }
19971
19972 /* Debug version of rs6000_secondary_reload_class. */
19973 static enum reg_class
19974 rs6000_debug_secondary_reload_class (enum reg_class rclass,
19975 machine_mode mode, rtx in)
19976 {
19977 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
19978 fprintf (stderr,
19979 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
19980 "mode = %s, input rtx:\n",
19981 reg_class_names[ret], reg_class_names[rclass],
19982 GET_MODE_NAME (mode));
19983 debug_rtx (in);
19984
19985 return ret;
19986 }
19987
19988 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
19989
19990 static bool
19991 rs6000_can_change_mode_class (machine_mode from,
19992 machine_mode to,
19993 reg_class_t rclass)
19994 {
19995 unsigned from_size = GET_MODE_SIZE (from);
19996 unsigned to_size = GET_MODE_SIZE (to);
19997
19998 if (from_size != to_size)
19999 {
20000 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20001
20002 if (reg_classes_intersect_p (xclass, rclass))
20003 {
20004 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20005 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20006 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20007 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20008
20009 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20010 single register under VSX because the scalar part of the register
20011 is in the upper 64-bits, and not the lower 64-bits. Types like
20012 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20013 IEEE floating point can't overlap, and neither can small
20014 values. */
20015
20016 if (to_float128_vector_p && from_float128_vector_p)
20017 return true;
20018
20019 else if (to_float128_vector_p || from_float128_vector_p)
20020 return false;
20021
20022 /* TDmode in floating-mode registers must always go into a register
20023 pair with the most significant word in the even-numbered register
20024 to match ISA requirements. In little-endian mode, this does not
20025 match subreg numbering, so we cannot allow subregs. */
20026 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20027 return false;
20028
20029 if (from_size < 8 || to_size < 8)
20030 return false;
20031
20032 if (from_size == 8 && (8 * to_nregs) != to_size)
20033 return false;
20034
20035 if (to_size == 8 && (8 * from_nregs) != from_size)
20036 return false;
20037
20038 return true;
20039 }
20040 else
20041 return true;
20042 }
20043
20044 /* Since the VSX register set includes traditional floating point registers
20045 and altivec registers, just check for the size being different instead of
20046 trying to check whether the modes are vector modes. Otherwise it won't
20047 allow say DF and DI to change classes. For types like TFmode and TDmode
20048 that take 2 64-bit registers, rather than a single 128-bit register, don't
20049 allow subregs of those types to other 128 bit types. */
20050 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20051 {
20052 unsigned num_regs = (from_size + 15) / 16;
20053 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20054 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20055 return false;
20056
20057 return (from_size == 8 || from_size == 16);
20058 }
20059
20060 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20061 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20062 return false;
20063
20064 return true;
20065 }
20066
20067 /* Debug version of rs6000_can_change_mode_class. */
20068 static bool
20069 rs6000_debug_can_change_mode_class (machine_mode from,
20070 machine_mode to,
20071 reg_class_t rclass)
20072 {
20073 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20074
20075 fprintf (stderr,
20076 "rs6000_can_change_mode_class, return %s, from = %s, "
20077 "to = %s, rclass = %s\n",
20078 ret ? "true" : "false",
20079 GET_MODE_NAME (from), GET_MODE_NAME (to),
20080 reg_class_names[rclass]);
20081
20082 return ret;
20083 }
20084 \f
20085 /* Return a string to do a move operation of 128 bits of data. */
20086
20087 const char *
20088 rs6000_output_move_128bit (rtx operands[])
20089 {
20090 rtx dest = operands[0];
20091 rtx src = operands[1];
20092 machine_mode mode = GET_MODE (dest);
20093 int dest_regno;
20094 int src_regno;
20095 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20096 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20097
20098 if (REG_P (dest))
20099 {
20100 dest_regno = REGNO (dest);
20101 dest_gpr_p = INT_REGNO_P (dest_regno);
20102 dest_fp_p = FP_REGNO_P (dest_regno);
20103 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20104 dest_vsx_p = dest_fp_p | dest_vmx_p;
20105 }
20106 else
20107 {
20108 dest_regno = -1;
20109 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20110 }
20111
20112 if (REG_P (src))
20113 {
20114 src_regno = REGNO (src);
20115 src_gpr_p = INT_REGNO_P (src_regno);
20116 src_fp_p = FP_REGNO_P (src_regno);
20117 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20118 src_vsx_p = src_fp_p | src_vmx_p;
20119 }
20120 else
20121 {
20122 src_regno = -1;
20123 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20124 }
20125
20126 /* Register moves. */
20127 if (dest_regno >= 0 && src_regno >= 0)
20128 {
20129 if (dest_gpr_p)
20130 {
20131 if (src_gpr_p)
20132 return "#";
20133
20134 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20135 return (WORDS_BIG_ENDIAN
20136 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20137 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20138
20139 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20140 return "#";
20141 }
20142
20143 else if (TARGET_VSX && dest_vsx_p)
20144 {
20145 if (src_vsx_p)
20146 return "xxlor %x0,%x1,%x1";
20147
20148 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20149 return (WORDS_BIG_ENDIAN
20150 ? "mtvsrdd %x0,%1,%L1"
20151 : "mtvsrdd %x0,%L1,%1");
20152
20153 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20154 return "#";
20155 }
20156
20157 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20158 return "vor %0,%1,%1";
20159
20160 else if (dest_fp_p && src_fp_p)
20161 return "#";
20162 }
20163
20164 /* Loads. */
20165 else if (dest_regno >= 0 && MEM_P (src))
20166 {
20167 if (dest_gpr_p)
20168 {
20169 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20170 return "lq %0,%1";
20171 else
20172 return "#";
20173 }
20174
20175 else if (TARGET_ALTIVEC && dest_vmx_p
20176 && altivec_indexed_or_indirect_operand (src, mode))
20177 return "lvx %0,%y1";
20178
20179 else if (TARGET_VSX && dest_vsx_p)
20180 {
20181 if (mode_supports_dq_form (mode)
20182 && quad_address_p (XEXP (src, 0), mode, true))
20183 return "lxv %x0,%1";
20184
20185 else if (TARGET_P9_VECTOR)
20186 return "lxvx %x0,%y1";
20187
20188 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20189 return "lxvw4x %x0,%y1";
20190
20191 else
20192 return "lxvd2x %x0,%y1";
20193 }
20194
20195 else if (TARGET_ALTIVEC && dest_vmx_p)
20196 return "lvx %0,%y1";
20197
20198 else if (dest_fp_p)
20199 return "#";
20200 }
20201
20202 /* Stores. */
20203 else if (src_regno >= 0 && MEM_P (dest))
20204 {
20205 if (src_gpr_p)
20206 {
20207 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20208 return "stq %1,%0";
20209 else
20210 return "#";
20211 }
20212
20213 else if (TARGET_ALTIVEC && src_vmx_p
20214 && altivec_indexed_or_indirect_operand (dest, mode))
20215 return "stvx %1,%y0";
20216
20217 else if (TARGET_VSX && src_vsx_p)
20218 {
20219 if (mode_supports_dq_form (mode)
20220 && quad_address_p (XEXP (dest, 0), mode, true))
20221 return "stxv %x1,%0";
20222
20223 else if (TARGET_P9_VECTOR)
20224 return "stxvx %x1,%y0";
20225
20226 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20227 return "stxvw4x %x1,%y0";
20228
20229 else
20230 return "stxvd2x %x1,%y0";
20231 }
20232
20233 else if (TARGET_ALTIVEC && src_vmx_p)
20234 return "stvx %1,%y0";
20235
20236 else if (src_fp_p)
20237 return "#";
20238 }
20239
20240 /* Constants. */
20241 else if (dest_regno >= 0
20242 && (GET_CODE (src) == CONST_INT
20243 || GET_CODE (src) == CONST_WIDE_INT
20244 || GET_CODE (src) == CONST_DOUBLE
20245 || GET_CODE (src) == CONST_VECTOR))
20246 {
20247 if (dest_gpr_p)
20248 return "#";
20249
20250 else if ((dest_vmx_p && TARGET_ALTIVEC)
20251 || (dest_vsx_p && TARGET_VSX))
20252 return output_vec_const_move (operands);
20253 }
20254
20255 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20256 }
20257
20258 /* Validate a 128-bit move. */
20259 bool
20260 rs6000_move_128bit_ok_p (rtx operands[])
20261 {
20262 machine_mode mode = GET_MODE (operands[0]);
20263 return (gpc_reg_operand (operands[0], mode)
20264 || gpc_reg_operand (operands[1], mode));
20265 }
20266
20267 /* Return true if a 128-bit move needs to be split. */
20268 bool
20269 rs6000_split_128bit_ok_p (rtx operands[])
20270 {
20271 if (!reload_completed)
20272 return false;
20273
20274 if (!gpr_or_gpr_p (operands[0], operands[1]))
20275 return false;
20276
20277 if (quad_load_store_p (operands[0], operands[1]))
20278 return false;
20279
20280 return true;
20281 }
20282
20283 \f
20284 /* Given a comparison operation, return the bit number in CCR to test. We
20285 know this is a valid comparison.
20286
20287 SCC_P is 1 if this is for an scc. That means that %D will have been
20288 used instead of %C, so the bits will be in different places.
20289
20290 Return -1 if OP isn't a valid comparison for some reason. */
20291
20292 int
20293 ccr_bit (rtx op, int scc_p)
20294 {
20295 enum rtx_code code = GET_CODE (op);
20296 machine_mode cc_mode;
20297 int cc_regnum;
20298 int base_bit;
20299 rtx reg;
20300
20301 if (!COMPARISON_P (op))
20302 return -1;
20303
20304 reg = XEXP (op, 0);
20305
20306 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20307
20308 cc_mode = GET_MODE (reg);
20309 cc_regnum = REGNO (reg);
20310 base_bit = 4 * (cc_regnum - CR0_REGNO);
20311
20312 validate_condition_mode (code, cc_mode);
20313
20314 /* When generating a sCOND operation, only positive conditions are
20315 allowed. */
20316 gcc_assert (!scc_p
20317 || code == EQ || code == GT || code == LT || code == UNORDERED
20318 || code == GTU || code == LTU);
20319
20320 switch (code)
20321 {
20322 case NE:
20323 return scc_p ? base_bit + 3 : base_bit + 2;
20324 case EQ:
20325 return base_bit + 2;
20326 case GT: case GTU: case UNLE:
20327 return base_bit + 1;
20328 case LT: case LTU: case UNGE:
20329 return base_bit;
20330 case ORDERED: case UNORDERED:
20331 return base_bit + 3;
20332
20333 case GE: case GEU:
20334 /* If scc, we will have done a cror to put the bit in the
20335 unordered position. So test that bit. For integer, this is ! LT
20336 unless this is an scc insn. */
20337 return scc_p ? base_bit + 3 : base_bit;
20338
20339 case LE: case LEU:
20340 return scc_p ? base_bit + 3 : base_bit + 1;
20341
20342 default:
20343 gcc_unreachable ();
20344 }
20345 }
20346 \f
20347 /* Return the GOT register. */
20348
20349 rtx
20350 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20351 {
20352 /* The second flow pass currently (June 1999) can't update
20353 regs_ever_live without disturbing other parts of the compiler, so
20354 update it here to make the prolog/epilogue code happy. */
20355 if (!can_create_pseudo_p ()
20356 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20357 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20358
20359 crtl->uses_pic_offset_table = 1;
20360
20361 return pic_offset_table_rtx;
20362 }
20363 \f
20364 static rs6000_stack_t stack_info;
20365
20366 /* Function to init struct machine_function.
20367 This will be called, via a pointer variable,
20368 from push_function_context. */
20369
20370 static struct machine_function *
20371 rs6000_init_machine_status (void)
20372 {
20373 stack_info.reload_completed = 0;
20374 return ggc_cleared_alloc<machine_function> ();
20375 }
20376 \f
20377 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20378
20379 /* Write out a function code label. */
20380
20381 void
20382 rs6000_output_function_entry (FILE *file, const char *fname)
20383 {
20384 if (fname[0] != '.')
20385 {
20386 switch (DEFAULT_ABI)
20387 {
20388 default:
20389 gcc_unreachable ();
20390
20391 case ABI_AIX:
20392 if (DOT_SYMBOLS)
20393 putc ('.', file);
20394 else
20395 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20396 break;
20397
20398 case ABI_ELFv2:
20399 case ABI_V4:
20400 case ABI_DARWIN:
20401 break;
20402 }
20403 }
20404
20405 RS6000_OUTPUT_BASENAME (file, fname);
20406 }
20407
20408 /* Print an operand. Recognize special options, documented below. */
20409
20410 #if TARGET_ELF
20411 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20412 only introduced by the linker, when applying the sda21
20413 relocation. */
20414 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20415 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20416 #else
20417 #define SMALL_DATA_RELOC "sda21"
20418 #define SMALL_DATA_REG 0
20419 #endif
20420
20421 void
20422 print_operand (FILE *file, rtx x, int code)
20423 {
20424 int i;
20425 unsigned HOST_WIDE_INT uval;
20426
20427 switch (code)
20428 {
20429 /* %a is output_address. */
20430
20431 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20432 output_operand. */
20433
20434 case 'D':
20435 /* Like 'J' but get to the GT bit only. */
20436 gcc_assert (REG_P (x));
20437
20438 /* Bit 1 is GT bit. */
20439 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20440
20441 /* Add one for shift count in rlinm for scc. */
20442 fprintf (file, "%d", i + 1);
20443 return;
20444
20445 case 'e':
20446 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20447 if (! INT_P (x))
20448 {
20449 output_operand_lossage ("invalid %%e value");
20450 return;
20451 }
20452
20453 uval = INTVAL (x);
20454 if ((uval & 0xffff) == 0 && uval != 0)
20455 putc ('s', file);
20456 return;
20457
20458 case 'E':
20459 /* X is a CR register. Print the number of the EQ bit of the CR */
20460 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20461 output_operand_lossage ("invalid %%E value");
20462 else
20463 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20464 return;
20465
20466 case 'f':
20467 /* X is a CR register. Print the shift count needed to move it
20468 to the high-order four bits. */
20469 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20470 output_operand_lossage ("invalid %%f value");
20471 else
20472 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20473 return;
20474
20475 case 'F':
20476 /* Similar, but print the count for the rotate in the opposite
20477 direction. */
20478 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20479 output_operand_lossage ("invalid %%F value");
20480 else
20481 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20482 return;
20483
20484 case 'G':
20485 /* X is a constant integer. If it is negative, print "m",
20486 otherwise print "z". This is to make an aze or ame insn. */
20487 if (GET_CODE (x) != CONST_INT)
20488 output_operand_lossage ("invalid %%G value");
20489 else if (INTVAL (x) >= 0)
20490 putc ('z', file);
20491 else
20492 putc ('m', file);
20493 return;
20494
20495 case 'h':
20496 /* If constant, output low-order five bits. Otherwise, write
20497 normally. */
20498 if (INT_P (x))
20499 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20500 else
20501 print_operand (file, x, 0);
20502 return;
20503
20504 case 'H':
20505 /* If constant, output low-order six bits. Otherwise, write
20506 normally. */
20507 if (INT_P (x))
20508 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20509 else
20510 print_operand (file, x, 0);
20511 return;
20512
20513 case 'I':
20514 /* Print `i' if this is a constant, else nothing. */
20515 if (INT_P (x))
20516 putc ('i', file);
20517 return;
20518
20519 case 'j':
20520 /* Write the bit number in CCR for jump. */
20521 i = ccr_bit (x, 0);
20522 if (i == -1)
20523 output_operand_lossage ("invalid %%j code");
20524 else
20525 fprintf (file, "%d", i);
20526 return;
20527
20528 case 'J':
20529 /* Similar, but add one for shift count in rlinm for scc and pass
20530 scc flag to `ccr_bit'. */
20531 i = ccr_bit (x, 1);
20532 if (i == -1)
20533 output_operand_lossage ("invalid %%J code");
20534 else
20535 /* If we want bit 31, write a shift count of zero, not 32. */
20536 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20537 return;
20538
20539 case 'k':
20540 /* X must be a constant. Write the 1's complement of the
20541 constant. */
20542 if (! INT_P (x))
20543 output_operand_lossage ("invalid %%k value");
20544 else
20545 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20546 return;
20547
20548 case 'K':
20549 /* X must be a symbolic constant on ELF. Write an
20550 expression suitable for an 'addi' that adds in the low 16
20551 bits of the MEM. */
20552 if (GET_CODE (x) == CONST)
20553 {
20554 if (GET_CODE (XEXP (x, 0)) != PLUS
20555 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20556 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20557 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20558 output_operand_lossage ("invalid %%K value");
20559 }
20560 print_operand_address (file, x);
20561 fputs ("@l", file);
20562 return;
20563
20564 /* %l is output_asm_label. */
20565
20566 case 'L':
20567 /* Write second word of DImode or DFmode reference. Works on register
20568 or non-indexed memory only. */
20569 if (REG_P (x))
20570 fputs (reg_names[REGNO (x) + 1], file);
20571 else if (MEM_P (x))
20572 {
20573 machine_mode mode = GET_MODE (x);
20574 /* Handle possible auto-increment. Since it is pre-increment and
20575 we have already done it, we can just use an offset of word. */
20576 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20577 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20578 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20579 UNITS_PER_WORD));
20580 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20581 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20582 UNITS_PER_WORD));
20583 else
20584 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20585 UNITS_PER_WORD),
20586 0));
20587
20588 if (small_data_operand (x, GET_MODE (x)))
20589 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20590 reg_names[SMALL_DATA_REG]);
20591 }
20592 return;
20593
20594 case 'N': /* Unused */
20595 /* Write the number of elements in the vector times 4. */
20596 if (GET_CODE (x) != PARALLEL)
20597 output_operand_lossage ("invalid %%N value");
20598 else
20599 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20600 return;
20601
20602 case 'O': /* Unused */
20603 /* Similar, but subtract 1 first. */
20604 if (GET_CODE (x) != PARALLEL)
20605 output_operand_lossage ("invalid %%O value");
20606 else
20607 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20608 return;
20609
20610 case 'p':
20611 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20612 if (! INT_P (x)
20613 || INTVAL (x) < 0
20614 || (i = exact_log2 (INTVAL (x))) < 0)
20615 output_operand_lossage ("invalid %%p value");
20616 else
20617 fprintf (file, "%d", i);
20618 return;
20619
20620 case 'P':
20621 /* The operand must be an indirect memory reference. The result
20622 is the register name. */
20623 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20624 || REGNO (XEXP (x, 0)) >= 32)
20625 output_operand_lossage ("invalid %%P value");
20626 else
20627 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20628 return;
20629
20630 case 'q':
20631 /* This outputs the logical code corresponding to a boolean
20632 expression. The expression may have one or both operands
20633 negated (if one, only the first one). For condition register
20634 logical operations, it will also treat the negated
20635 CR codes as NOTs, but not handle NOTs of them. */
20636 {
20637 const char *const *t = 0;
20638 const char *s;
20639 enum rtx_code code = GET_CODE (x);
20640 static const char * const tbl[3][3] = {
20641 { "and", "andc", "nor" },
20642 { "or", "orc", "nand" },
20643 { "xor", "eqv", "xor" } };
20644
20645 if (code == AND)
20646 t = tbl[0];
20647 else if (code == IOR)
20648 t = tbl[1];
20649 else if (code == XOR)
20650 t = tbl[2];
20651 else
20652 output_operand_lossage ("invalid %%q value");
20653
20654 if (GET_CODE (XEXP (x, 0)) != NOT)
20655 s = t[0];
20656 else
20657 {
20658 if (GET_CODE (XEXP (x, 1)) == NOT)
20659 s = t[2];
20660 else
20661 s = t[1];
20662 }
20663
20664 fputs (s, file);
20665 }
20666 return;
20667
20668 case 'Q':
20669 if (! TARGET_MFCRF)
20670 return;
20671 fputc (',', file);
20672 /* FALLTHRU */
20673
20674 case 'R':
20675 /* X is a CR register. Print the mask for `mtcrf'. */
20676 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20677 output_operand_lossage ("invalid %%R value");
20678 else
20679 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20680 return;
20681
20682 case 's':
20683 /* Low 5 bits of 32 - value */
20684 if (! INT_P (x))
20685 output_operand_lossage ("invalid %%s value");
20686 else
20687 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20688 return;
20689
20690 case 't':
20691 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20692 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
20693
20694 /* Bit 3 is OV bit. */
20695 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20696
20697 /* If we want bit 31, write a shift count of zero, not 32. */
20698 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20699 return;
20700
20701 case 'T':
20702 /* Print the symbolic name of a branch target register. */
20703 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
20704 && REGNO (x) != CTR_REGNO))
20705 output_operand_lossage ("invalid %%T value");
20706 else if (REGNO (x) == LR_REGNO)
20707 fputs ("lr", file);
20708 else
20709 fputs ("ctr", file);
20710 return;
20711
20712 case 'u':
20713 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20714 for use in unsigned operand. */
20715 if (! INT_P (x))
20716 {
20717 output_operand_lossage ("invalid %%u value");
20718 return;
20719 }
20720
20721 uval = INTVAL (x);
20722 if ((uval & 0xffff) == 0)
20723 uval >>= 16;
20724
20725 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20726 return;
20727
20728 case 'v':
20729 /* High-order 16 bits of constant for use in signed operand. */
20730 if (! INT_P (x))
20731 output_operand_lossage ("invalid %%v value");
20732 else
20733 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20734 (INTVAL (x) >> 16) & 0xffff);
20735 return;
20736
20737 case 'U':
20738 /* Print `u' if this has an auto-increment or auto-decrement. */
20739 if (MEM_P (x)
20740 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20741 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20742 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20743 putc ('u', file);
20744 return;
20745
20746 case 'V':
20747 /* Print the trap code for this operand. */
20748 switch (GET_CODE (x))
20749 {
20750 case EQ:
20751 fputs ("eq", file); /* 4 */
20752 break;
20753 case NE:
20754 fputs ("ne", file); /* 24 */
20755 break;
20756 case LT:
20757 fputs ("lt", file); /* 16 */
20758 break;
20759 case LE:
20760 fputs ("le", file); /* 20 */
20761 break;
20762 case GT:
20763 fputs ("gt", file); /* 8 */
20764 break;
20765 case GE:
20766 fputs ("ge", file); /* 12 */
20767 break;
20768 case LTU:
20769 fputs ("llt", file); /* 2 */
20770 break;
20771 case LEU:
20772 fputs ("lle", file); /* 6 */
20773 break;
20774 case GTU:
20775 fputs ("lgt", file); /* 1 */
20776 break;
20777 case GEU:
20778 fputs ("lge", file); /* 5 */
20779 break;
20780 default:
20781 gcc_unreachable ();
20782 }
20783 break;
20784
20785 case 'w':
20786 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20787 normally. */
20788 if (INT_P (x))
20789 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
20790 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
20791 else
20792 print_operand (file, x, 0);
20793 return;
20794
20795 case 'x':
20796 /* X is a FPR or Altivec register used in a VSX context. */
20797 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
20798 output_operand_lossage ("invalid %%x value");
20799 else
20800 {
20801 int reg = REGNO (x);
20802 int vsx_reg = (FP_REGNO_P (reg)
20803 ? reg - 32
20804 : reg - FIRST_ALTIVEC_REGNO + 32);
20805
20806 #ifdef TARGET_REGNAMES
20807 if (TARGET_REGNAMES)
20808 fprintf (file, "%%vs%d", vsx_reg);
20809 else
20810 #endif
20811 fprintf (file, "%d", vsx_reg);
20812 }
20813 return;
20814
20815 case 'X':
20816 if (MEM_P (x)
20817 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
20818 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
20819 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
20820 putc ('x', file);
20821 return;
20822
20823 case 'Y':
20824 /* Like 'L', for third word of TImode/PTImode */
20825 if (REG_P (x))
20826 fputs (reg_names[REGNO (x) + 2], file);
20827 else if (MEM_P (x))
20828 {
20829 machine_mode mode = GET_MODE (x);
20830 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20831 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20832 output_address (mode, plus_constant (Pmode,
20833 XEXP (XEXP (x, 0), 0), 8));
20834 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20835 output_address (mode, plus_constant (Pmode,
20836 XEXP (XEXP (x, 0), 0), 8));
20837 else
20838 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
20839 if (small_data_operand (x, GET_MODE (x)))
20840 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20841 reg_names[SMALL_DATA_REG]);
20842 }
20843 return;
20844
20845 case 'z':
20846 /* X is a SYMBOL_REF. Write out the name preceded by a
20847 period and without any trailing data in brackets. Used for function
20848 names. If we are configured for System V (or the embedded ABI) on
20849 the PowerPC, do not emit the period, since those systems do not use
20850 TOCs and the like. */
20851 gcc_assert (GET_CODE (x) == SYMBOL_REF);
20852
20853 /* For macho, check to see if we need a stub. */
20854 if (TARGET_MACHO)
20855 {
20856 const char *name = XSTR (x, 0);
20857 #if TARGET_MACHO
20858 if (darwin_emit_branch_islands
20859 && MACHOPIC_INDIRECT
20860 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
20861 name = machopic_indirection_name (x, /*stub_p=*/true);
20862 #endif
20863 assemble_name (file, name);
20864 }
20865 else if (!DOT_SYMBOLS)
20866 assemble_name (file, XSTR (x, 0));
20867 else
20868 rs6000_output_function_entry (file, XSTR (x, 0));
20869 return;
20870
20871 case 'Z':
20872 /* Like 'L', for last word of TImode/PTImode. */
20873 if (REG_P (x))
20874 fputs (reg_names[REGNO (x) + 3], file);
20875 else if (MEM_P (x))
20876 {
20877 machine_mode mode = GET_MODE (x);
20878 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20879 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20880 output_address (mode, plus_constant (Pmode,
20881 XEXP (XEXP (x, 0), 0), 12));
20882 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20883 output_address (mode, plus_constant (Pmode,
20884 XEXP (XEXP (x, 0), 0), 12));
20885 else
20886 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
20887 if (small_data_operand (x, GET_MODE (x)))
20888 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20889 reg_names[SMALL_DATA_REG]);
20890 }
20891 return;
20892
20893 /* Print AltiVec memory operand. */
20894 case 'y':
20895 {
20896 rtx tmp;
20897
20898 gcc_assert (MEM_P (x));
20899
20900 tmp = XEXP (x, 0);
20901
20902 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
20903 && GET_CODE (tmp) == AND
20904 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
20905 && INTVAL (XEXP (tmp, 1)) == -16)
20906 tmp = XEXP (tmp, 0);
20907 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
20908 && GET_CODE (tmp) == PRE_MODIFY)
20909 tmp = XEXP (tmp, 1);
20910 if (REG_P (tmp))
20911 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
20912 else
20913 {
20914 if (GET_CODE (tmp) != PLUS
20915 || !REG_P (XEXP (tmp, 0))
20916 || !REG_P (XEXP (tmp, 1)))
20917 {
20918 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
20919 break;
20920 }
20921
20922 if (REGNO (XEXP (tmp, 0)) == 0)
20923 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
20924 reg_names[ REGNO (XEXP (tmp, 0)) ]);
20925 else
20926 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
20927 reg_names[ REGNO (XEXP (tmp, 1)) ]);
20928 }
20929 break;
20930 }
20931
20932 case 0:
20933 if (REG_P (x))
20934 fprintf (file, "%s", reg_names[REGNO (x)]);
20935 else if (MEM_P (x))
20936 {
20937 /* We need to handle PRE_INC and PRE_DEC here, since we need to
20938 know the width from the mode. */
20939 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
20940 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
20941 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
20942 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
20943 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
20944 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
20945 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20946 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
20947 else
20948 output_address (GET_MODE (x), XEXP (x, 0));
20949 }
20950 else
20951 {
20952 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
20953 /* This hack along with a corresponding hack in
20954 rs6000_output_addr_const_extra arranges to output addends
20955 where the assembler expects to find them. eg.
20956 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
20957 without this hack would be output as "x@toc+4". We
20958 want "x+4@toc". */
20959 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
20960 else
20961 output_addr_const (file, x);
20962 }
20963 return;
20964
20965 case '&':
20966 if (const char *name = get_some_local_dynamic_name ())
20967 assemble_name (file, name);
20968 else
20969 output_operand_lossage ("'%%&' used without any "
20970 "local dynamic TLS references");
20971 return;
20972
20973 default:
20974 output_operand_lossage ("invalid %%xn code");
20975 }
20976 }
20977 \f
20978 /* Print the address of an operand. */
20979
20980 void
20981 print_operand_address (FILE *file, rtx x)
20982 {
20983 if (REG_P (x))
20984 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
20985 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
20986 || GET_CODE (x) == LABEL_REF)
20987 {
20988 output_addr_const (file, x);
20989 if (small_data_operand (x, GET_MODE (x)))
20990 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20991 reg_names[SMALL_DATA_REG]);
20992 else
20993 gcc_assert (!TARGET_TOC);
20994 }
20995 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
20996 && REG_P (XEXP (x, 1)))
20997 {
20998 if (REGNO (XEXP (x, 0)) == 0)
20999 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21000 reg_names[ REGNO (XEXP (x, 0)) ]);
21001 else
21002 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21003 reg_names[ REGNO (XEXP (x, 1)) ]);
21004 }
21005 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21006 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21007 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21008 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21009 #if TARGET_MACHO
21010 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21011 && CONSTANT_P (XEXP (x, 1)))
21012 {
21013 fprintf (file, "lo16(");
21014 output_addr_const (file, XEXP (x, 1));
21015 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21016 }
21017 #endif
21018 #if TARGET_ELF
21019 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21020 && CONSTANT_P (XEXP (x, 1)))
21021 {
21022 output_addr_const (file, XEXP (x, 1));
21023 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21024 }
21025 #endif
21026 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21027 {
21028 /* This hack along with a corresponding hack in
21029 rs6000_output_addr_const_extra arranges to output addends
21030 where the assembler expects to find them. eg.
21031 (lo_sum (reg 9)
21032 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21033 without this hack would be output as "x@toc+8@l(9)". We
21034 want "x+8@toc@l(9)". */
21035 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21036 if (GET_CODE (x) == LO_SUM)
21037 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21038 else
21039 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21040 }
21041 else
21042 gcc_unreachable ();
21043 }
21044 \f
21045 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21046
21047 static bool
21048 rs6000_output_addr_const_extra (FILE *file, rtx x)
21049 {
21050 if (GET_CODE (x) == UNSPEC)
21051 switch (XINT (x, 1))
21052 {
21053 case UNSPEC_TOCREL:
21054 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21055 && REG_P (XVECEXP (x, 0, 1))
21056 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21057 output_addr_const (file, XVECEXP (x, 0, 0));
21058 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21059 {
21060 if (INTVAL (tocrel_offset_oac) >= 0)
21061 fprintf (file, "+");
21062 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21063 }
21064 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21065 {
21066 putc ('-', file);
21067 assemble_name (file, toc_label_name);
21068 need_toc_init = 1;
21069 }
21070 else if (TARGET_ELF)
21071 fputs ("@toc", file);
21072 return true;
21073
21074 #if TARGET_MACHO
21075 case UNSPEC_MACHOPIC_OFFSET:
21076 output_addr_const (file, XVECEXP (x, 0, 0));
21077 putc ('-', file);
21078 machopic_output_function_base_name (file);
21079 return true;
21080 #endif
21081 }
21082 return false;
21083 }
21084 \f
21085 /* Target hook for assembling integer objects. The PowerPC version has
21086 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21087 is defined. It also needs to handle DI-mode objects on 64-bit
21088 targets. */
21089
21090 static bool
21091 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21092 {
21093 #ifdef RELOCATABLE_NEEDS_FIXUP
21094 /* Special handling for SI values. */
21095 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21096 {
21097 static int recurse = 0;
21098
21099 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21100 the .fixup section. Since the TOC section is already relocated, we
21101 don't need to mark it here. We used to skip the text section, but it
21102 should never be valid for relocated addresses to be placed in the text
21103 section. */
21104 if (DEFAULT_ABI == ABI_V4
21105 && (TARGET_RELOCATABLE || flag_pic > 1)
21106 && in_section != toc_section
21107 && !recurse
21108 && !CONST_SCALAR_INT_P (x)
21109 && CONSTANT_P (x))
21110 {
21111 char buf[256];
21112
21113 recurse = 1;
21114 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21115 fixuplabelno++;
21116 ASM_OUTPUT_LABEL (asm_out_file, buf);
21117 fprintf (asm_out_file, "\t.long\t(");
21118 output_addr_const (asm_out_file, x);
21119 fprintf (asm_out_file, ")@fixup\n");
21120 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21121 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21122 fprintf (asm_out_file, "\t.long\t");
21123 assemble_name (asm_out_file, buf);
21124 fprintf (asm_out_file, "\n\t.previous\n");
21125 recurse = 0;
21126 return true;
21127 }
21128 /* Remove initial .'s to turn a -mcall-aixdesc function
21129 address into the address of the descriptor, not the function
21130 itself. */
21131 else if (GET_CODE (x) == SYMBOL_REF
21132 && XSTR (x, 0)[0] == '.'
21133 && DEFAULT_ABI == ABI_AIX)
21134 {
21135 const char *name = XSTR (x, 0);
21136 while (*name == '.')
21137 name++;
21138
21139 fprintf (asm_out_file, "\t.long\t%s\n", name);
21140 return true;
21141 }
21142 }
21143 #endif /* RELOCATABLE_NEEDS_FIXUP */
21144 return default_assemble_integer (x, size, aligned_p);
21145 }
21146
21147 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21148 /* Emit an assembler directive to set symbol visibility for DECL to
21149 VISIBILITY_TYPE. */
21150
21151 static void
21152 rs6000_assemble_visibility (tree decl, int vis)
21153 {
21154 if (TARGET_XCOFF)
21155 return;
21156
21157 /* Functions need to have their entry point symbol visibility set as
21158 well as their descriptor symbol visibility. */
21159 if (DEFAULT_ABI == ABI_AIX
21160 && DOT_SYMBOLS
21161 && TREE_CODE (decl) == FUNCTION_DECL)
21162 {
21163 static const char * const visibility_types[] = {
21164 NULL, "protected", "hidden", "internal"
21165 };
21166
21167 const char *name, *type;
21168
21169 name = ((* targetm.strip_name_encoding)
21170 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21171 type = visibility_types[vis];
21172
21173 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21174 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21175 }
21176 else
21177 default_assemble_visibility (decl, vis);
21178 }
21179 #endif
21180 \f
21181 enum rtx_code
21182 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21183 {
21184 /* Reversal of FP compares takes care -- an ordered compare
21185 becomes an unordered compare and vice versa. */
21186 if (mode == CCFPmode
21187 && (!flag_finite_math_only
21188 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21189 || code == UNEQ || code == LTGT))
21190 return reverse_condition_maybe_unordered (code);
21191 else
21192 return reverse_condition (code);
21193 }
21194
21195 /* Generate a compare for CODE. Return a brand-new rtx that
21196 represents the result of the compare. */
21197
21198 static rtx
21199 rs6000_generate_compare (rtx cmp, machine_mode mode)
21200 {
21201 machine_mode comp_mode;
21202 rtx compare_result;
21203 enum rtx_code code = GET_CODE (cmp);
21204 rtx op0 = XEXP (cmp, 0);
21205 rtx op1 = XEXP (cmp, 1);
21206
21207 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21208 comp_mode = CCmode;
21209 else if (FLOAT_MODE_P (mode))
21210 comp_mode = CCFPmode;
21211 else if (code == GTU || code == LTU
21212 || code == GEU || code == LEU)
21213 comp_mode = CCUNSmode;
21214 else if ((code == EQ || code == NE)
21215 && unsigned_reg_p (op0)
21216 && (unsigned_reg_p (op1)
21217 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21218 /* These are unsigned values, perhaps there will be a later
21219 ordering compare that can be shared with this one. */
21220 comp_mode = CCUNSmode;
21221 else
21222 comp_mode = CCmode;
21223
21224 /* If we have an unsigned compare, make sure we don't have a signed value as
21225 an immediate. */
21226 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21227 && INTVAL (op1) < 0)
21228 {
21229 op0 = copy_rtx_if_shared (op0);
21230 op1 = force_reg (GET_MODE (op0), op1);
21231 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21232 }
21233
21234 /* First, the compare. */
21235 compare_result = gen_reg_rtx (comp_mode);
21236
21237 /* IEEE 128-bit support in VSX registers when we do not have hardware
21238 support. */
21239 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21240 {
21241 rtx libfunc = NULL_RTX;
21242 bool check_nan = false;
21243 rtx dest;
21244
21245 switch (code)
21246 {
21247 case EQ:
21248 case NE:
21249 libfunc = optab_libfunc (eq_optab, mode);
21250 break;
21251
21252 case GT:
21253 case GE:
21254 libfunc = optab_libfunc (ge_optab, mode);
21255 break;
21256
21257 case LT:
21258 case LE:
21259 libfunc = optab_libfunc (le_optab, mode);
21260 break;
21261
21262 case UNORDERED:
21263 case ORDERED:
21264 libfunc = optab_libfunc (unord_optab, mode);
21265 code = (code == UNORDERED) ? NE : EQ;
21266 break;
21267
21268 case UNGE:
21269 case UNGT:
21270 check_nan = true;
21271 libfunc = optab_libfunc (ge_optab, mode);
21272 code = (code == UNGE) ? GE : GT;
21273 break;
21274
21275 case UNLE:
21276 case UNLT:
21277 check_nan = true;
21278 libfunc = optab_libfunc (le_optab, mode);
21279 code = (code == UNLE) ? LE : LT;
21280 break;
21281
21282 case UNEQ:
21283 case LTGT:
21284 check_nan = true;
21285 libfunc = optab_libfunc (eq_optab, mode);
21286 code = (code = UNEQ) ? EQ : NE;
21287 break;
21288
21289 default:
21290 gcc_unreachable ();
21291 }
21292
21293 gcc_assert (libfunc);
21294
21295 if (!check_nan)
21296 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21297 SImode, op0, mode, op1, mode);
21298
21299 /* The library signals an exception for signalling NaNs, so we need to
21300 handle isgreater, etc. by first checking isordered. */
21301 else
21302 {
21303 rtx ne_rtx, normal_dest, unord_dest;
21304 rtx unord_func = optab_libfunc (unord_optab, mode);
21305 rtx join_label = gen_label_rtx ();
21306 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21307 rtx unord_cmp = gen_reg_rtx (comp_mode);
21308
21309
21310 /* Test for either value being a NaN. */
21311 gcc_assert (unord_func);
21312 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21313 SImode, op0, mode, op1, mode);
21314
21315 /* Set value (0) if either value is a NaN, and jump to the join
21316 label. */
21317 dest = gen_reg_rtx (SImode);
21318 emit_move_insn (dest, const1_rtx);
21319 emit_insn (gen_rtx_SET (unord_cmp,
21320 gen_rtx_COMPARE (comp_mode, unord_dest,
21321 const0_rtx)));
21322
21323 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21324 emit_jump_insn (gen_rtx_SET (pc_rtx,
21325 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21326 join_ref,
21327 pc_rtx)));
21328
21329 /* Do the normal comparison, knowing that the values are not
21330 NaNs. */
21331 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21332 SImode, op0, mode, op1, mode);
21333
21334 emit_insn (gen_cstoresi4 (dest,
21335 gen_rtx_fmt_ee (code, SImode, normal_dest,
21336 const0_rtx),
21337 normal_dest, const0_rtx));
21338
21339 /* Join NaN and non-Nan paths. Compare dest against 0. */
21340 emit_label (join_label);
21341 code = NE;
21342 }
21343
21344 emit_insn (gen_rtx_SET (compare_result,
21345 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21346 }
21347
21348 else
21349 {
21350 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21351 CLOBBERs to match cmptf_internal2 pattern. */
21352 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21353 && FLOAT128_IBM_P (GET_MODE (op0))
21354 && TARGET_HARD_FLOAT)
21355 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21356 gen_rtvec (10,
21357 gen_rtx_SET (compare_result,
21358 gen_rtx_COMPARE (comp_mode, op0, op1)),
21359 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21360 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21361 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21362 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21363 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21364 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21365 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21366 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21367 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21368 else if (GET_CODE (op1) == UNSPEC
21369 && XINT (op1, 1) == UNSPEC_SP_TEST)
21370 {
21371 rtx op1b = XVECEXP (op1, 0, 0);
21372 comp_mode = CCEQmode;
21373 compare_result = gen_reg_rtx (CCEQmode);
21374 if (TARGET_64BIT)
21375 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21376 else
21377 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21378 }
21379 else
21380 emit_insn (gen_rtx_SET (compare_result,
21381 gen_rtx_COMPARE (comp_mode, op0, op1)));
21382 }
21383
21384 /* Some kinds of FP comparisons need an OR operation;
21385 under flag_finite_math_only we don't bother. */
21386 if (FLOAT_MODE_P (mode)
21387 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21388 && !flag_finite_math_only
21389 && (code == LE || code == GE
21390 || code == UNEQ || code == LTGT
21391 || code == UNGT || code == UNLT))
21392 {
21393 enum rtx_code or1, or2;
21394 rtx or1_rtx, or2_rtx, compare2_rtx;
21395 rtx or_result = gen_reg_rtx (CCEQmode);
21396
21397 switch (code)
21398 {
21399 case LE: or1 = LT; or2 = EQ; break;
21400 case GE: or1 = GT; or2 = EQ; break;
21401 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21402 case LTGT: or1 = LT; or2 = GT; break;
21403 case UNGT: or1 = UNORDERED; or2 = GT; break;
21404 case UNLT: or1 = UNORDERED; or2 = LT; break;
21405 default: gcc_unreachable ();
21406 }
21407 validate_condition_mode (or1, comp_mode);
21408 validate_condition_mode (or2, comp_mode);
21409 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21410 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21411 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21412 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21413 const_true_rtx);
21414 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21415
21416 compare_result = or_result;
21417 code = EQ;
21418 }
21419
21420 validate_condition_mode (code, GET_MODE (compare_result));
21421
21422 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21423 }
21424
21425 \f
21426 /* Return the diagnostic message string if the binary operation OP is
21427 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21428
21429 static const char*
21430 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21431 const_tree type1,
21432 const_tree type2)
21433 {
21434 machine_mode mode1 = TYPE_MODE (type1);
21435 machine_mode mode2 = TYPE_MODE (type2);
21436
21437 /* For complex modes, use the inner type. */
21438 if (COMPLEX_MODE_P (mode1))
21439 mode1 = GET_MODE_INNER (mode1);
21440
21441 if (COMPLEX_MODE_P (mode2))
21442 mode2 = GET_MODE_INNER (mode2);
21443
21444 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21445 double to intermix unless -mfloat128-convert. */
21446 if (mode1 == mode2)
21447 return NULL;
21448
21449 if (!TARGET_FLOAT128_CVT)
21450 {
21451 if ((mode1 == KFmode && mode2 == IFmode)
21452 || (mode1 == IFmode && mode2 == KFmode))
21453 return N_("__float128 and __ibm128 cannot be used in the same "
21454 "expression");
21455
21456 if (TARGET_IEEEQUAD
21457 && ((mode1 == IFmode && mode2 == TFmode)
21458 || (mode1 == TFmode && mode2 == IFmode)))
21459 return N_("__ibm128 and long double cannot be used in the same "
21460 "expression");
21461
21462 if (!TARGET_IEEEQUAD
21463 && ((mode1 == KFmode && mode2 == TFmode)
21464 || (mode1 == TFmode && mode2 == KFmode)))
21465 return N_("__float128 and long double cannot be used in the same "
21466 "expression");
21467 }
21468
21469 return NULL;
21470 }
21471
21472 \f
21473 /* Expand floating point conversion to/from __float128 and __ibm128. */
21474
21475 void
21476 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21477 {
21478 machine_mode dest_mode = GET_MODE (dest);
21479 machine_mode src_mode = GET_MODE (src);
21480 convert_optab cvt = unknown_optab;
21481 bool do_move = false;
21482 rtx libfunc = NULL_RTX;
21483 rtx dest2;
21484 typedef rtx (*rtx_2func_t) (rtx, rtx);
21485 rtx_2func_t hw_convert = (rtx_2func_t)0;
21486 size_t kf_or_tf;
21487
21488 struct hw_conv_t {
21489 rtx_2func_t from_df;
21490 rtx_2func_t from_sf;
21491 rtx_2func_t from_si_sign;
21492 rtx_2func_t from_si_uns;
21493 rtx_2func_t from_di_sign;
21494 rtx_2func_t from_di_uns;
21495 rtx_2func_t to_df;
21496 rtx_2func_t to_sf;
21497 rtx_2func_t to_si_sign;
21498 rtx_2func_t to_si_uns;
21499 rtx_2func_t to_di_sign;
21500 rtx_2func_t to_di_uns;
21501 } hw_conversions[2] = {
21502 /* convertions to/from KFmode */
21503 {
21504 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21505 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21506 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21507 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21508 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21509 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21510 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21511 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21512 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21513 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21514 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21515 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21516 },
21517
21518 /* convertions to/from TFmode */
21519 {
21520 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21521 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21522 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21523 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21524 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21525 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21526 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21527 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21528 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21529 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21530 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21531 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21532 },
21533 };
21534
21535 if (dest_mode == src_mode)
21536 gcc_unreachable ();
21537
21538 /* Eliminate memory operations. */
21539 if (MEM_P (src))
21540 src = force_reg (src_mode, src);
21541
21542 if (MEM_P (dest))
21543 {
21544 rtx tmp = gen_reg_rtx (dest_mode);
21545 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21546 rs6000_emit_move (dest, tmp, dest_mode);
21547 return;
21548 }
21549
21550 /* Convert to IEEE 128-bit floating point. */
21551 if (FLOAT128_IEEE_P (dest_mode))
21552 {
21553 if (dest_mode == KFmode)
21554 kf_or_tf = 0;
21555 else if (dest_mode == TFmode)
21556 kf_or_tf = 1;
21557 else
21558 gcc_unreachable ();
21559
21560 switch (src_mode)
21561 {
21562 case E_DFmode:
21563 cvt = sext_optab;
21564 hw_convert = hw_conversions[kf_or_tf].from_df;
21565 break;
21566
21567 case E_SFmode:
21568 cvt = sext_optab;
21569 hw_convert = hw_conversions[kf_or_tf].from_sf;
21570 break;
21571
21572 case E_KFmode:
21573 case E_IFmode:
21574 case E_TFmode:
21575 if (FLOAT128_IBM_P (src_mode))
21576 cvt = sext_optab;
21577 else
21578 do_move = true;
21579 break;
21580
21581 case E_SImode:
21582 if (unsigned_p)
21583 {
21584 cvt = ufloat_optab;
21585 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21586 }
21587 else
21588 {
21589 cvt = sfloat_optab;
21590 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
21591 }
21592 break;
21593
21594 case E_DImode:
21595 if (unsigned_p)
21596 {
21597 cvt = ufloat_optab;
21598 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
21599 }
21600 else
21601 {
21602 cvt = sfloat_optab;
21603 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
21604 }
21605 break;
21606
21607 default:
21608 gcc_unreachable ();
21609 }
21610 }
21611
21612 /* Convert from IEEE 128-bit floating point. */
21613 else if (FLOAT128_IEEE_P (src_mode))
21614 {
21615 if (src_mode == KFmode)
21616 kf_or_tf = 0;
21617 else if (src_mode == TFmode)
21618 kf_or_tf = 1;
21619 else
21620 gcc_unreachable ();
21621
21622 switch (dest_mode)
21623 {
21624 case E_DFmode:
21625 cvt = trunc_optab;
21626 hw_convert = hw_conversions[kf_or_tf].to_df;
21627 break;
21628
21629 case E_SFmode:
21630 cvt = trunc_optab;
21631 hw_convert = hw_conversions[kf_or_tf].to_sf;
21632 break;
21633
21634 case E_KFmode:
21635 case E_IFmode:
21636 case E_TFmode:
21637 if (FLOAT128_IBM_P (dest_mode))
21638 cvt = trunc_optab;
21639 else
21640 do_move = true;
21641 break;
21642
21643 case E_SImode:
21644 if (unsigned_p)
21645 {
21646 cvt = ufix_optab;
21647 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
21648 }
21649 else
21650 {
21651 cvt = sfix_optab;
21652 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
21653 }
21654 break;
21655
21656 case E_DImode:
21657 if (unsigned_p)
21658 {
21659 cvt = ufix_optab;
21660 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
21661 }
21662 else
21663 {
21664 cvt = sfix_optab;
21665 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
21666 }
21667 break;
21668
21669 default:
21670 gcc_unreachable ();
21671 }
21672 }
21673
21674 /* Both IBM format. */
21675 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
21676 do_move = true;
21677
21678 else
21679 gcc_unreachable ();
21680
21681 /* Handle conversion between TFmode/KFmode/IFmode. */
21682 if (do_move)
21683 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
21684
21685 /* Handle conversion if we have hardware support. */
21686 else if (TARGET_FLOAT128_HW && hw_convert)
21687 emit_insn ((hw_convert) (dest, src));
21688
21689 /* Call an external function to do the conversion. */
21690 else if (cvt != unknown_optab)
21691 {
21692 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
21693 gcc_assert (libfunc != NULL_RTX);
21694
21695 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
21696 src, src_mode);
21697
21698 gcc_assert (dest2 != NULL_RTX);
21699 if (!rtx_equal_p (dest, dest2))
21700 emit_move_insn (dest, dest2);
21701 }
21702
21703 else
21704 gcc_unreachable ();
21705
21706 return;
21707 }
21708
21709 \f
21710 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
21711 can be used as that dest register. Return the dest register. */
21712
21713 rtx
21714 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
21715 {
21716 if (op2 == const0_rtx)
21717 return op1;
21718
21719 if (GET_CODE (scratch) == SCRATCH)
21720 scratch = gen_reg_rtx (mode);
21721
21722 if (logical_operand (op2, mode))
21723 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
21724 else
21725 emit_insn (gen_rtx_SET (scratch,
21726 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
21727
21728 return scratch;
21729 }
21730
21731 void
21732 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
21733 {
21734 rtx condition_rtx;
21735 machine_mode op_mode;
21736 enum rtx_code cond_code;
21737 rtx result = operands[0];
21738
21739 condition_rtx = rs6000_generate_compare (operands[1], mode);
21740 cond_code = GET_CODE (condition_rtx);
21741
21742 if (cond_code == NE
21743 || cond_code == GE || cond_code == LE
21744 || cond_code == GEU || cond_code == LEU
21745 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
21746 {
21747 rtx not_result = gen_reg_rtx (CCEQmode);
21748 rtx not_op, rev_cond_rtx;
21749 machine_mode cc_mode;
21750
21751 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
21752
21753 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
21754 SImode, XEXP (condition_rtx, 0), const0_rtx);
21755 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
21756 emit_insn (gen_rtx_SET (not_result, not_op));
21757 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
21758 }
21759
21760 op_mode = GET_MODE (XEXP (operands[1], 0));
21761 if (op_mode == VOIDmode)
21762 op_mode = GET_MODE (XEXP (operands[1], 1));
21763
21764 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
21765 {
21766 PUT_MODE (condition_rtx, DImode);
21767 convert_move (result, condition_rtx, 0);
21768 }
21769 else
21770 {
21771 PUT_MODE (condition_rtx, SImode);
21772 emit_insn (gen_rtx_SET (result, condition_rtx));
21773 }
21774 }
21775
21776 /* Emit a branch of kind CODE to location LOC. */
21777
21778 void
21779 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
21780 {
21781 rtx condition_rtx, loc_ref;
21782
21783 condition_rtx = rs6000_generate_compare (operands[0], mode);
21784 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
21785 emit_jump_insn (gen_rtx_SET (pc_rtx,
21786 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
21787 loc_ref, pc_rtx)));
21788 }
21789
21790 /* Return the string to output a conditional branch to LABEL, which is
21791 the operand template of the label, or NULL if the branch is really a
21792 conditional return.
21793
21794 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
21795 condition code register and its mode specifies what kind of
21796 comparison we made.
21797
21798 REVERSED is nonzero if we should reverse the sense of the comparison.
21799
21800 INSN is the insn. */
21801
21802 char *
21803 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
21804 {
21805 static char string[64];
21806 enum rtx_code code = GET_CODE (op);
21807 rtx cc_reg = XEXP (op, 0);
21808 machine_mode mode = GET_MODE (cc_reg);
21809 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
21810 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
21811 int really_reversed = reversed ^ need_longbranch;
21812 char *s = string;
21813 const char *ccode;
21814 const char *pred;
21815 rtx note;
21816
21817 validate_condition_mode (code, mode);
21818
21819 /* Work out which way this really branches. We could use
21820 reverse_condition_maybe_unordered here always but this
21821 makes the resulting assembler clearer. */
21822 if (really_reversed)
21823 {
21824 /* Reversal of FP compares takes care -- an ordered compare
21825 becomes an unordered compare and vice versa. */
21826 if (mode == CCFPmode)
21827 code = reverse_condition_maybe_unordered (code);
21828 else
21829 code = reverse_condition (code);
21830 }
21831
21832 switch (code)
21833 {
21834 /* Not all of these are actually distinct opcodes, but
21835 we distinguish them for clarity of the resulting assembler. */
21836 case NE: case LTGT:
21837 ccode = "ne"; break;
21838 case EQ: case UNEQ:
21839 ccode = "eq"; break;
21840 case GE: case GEU:
21841 ccode = "ge"; break;
21842 case GT: case GTU: case UNGT:
21843 ccode = "gt"; break;
21844 case LE: case LEU:
21845 ccode = "le"; break;
21846 case LT: case LTU: case UNLT:
21847 ccode = "lt"; break;
21848 case UNORDERED: ccode = "un"; break;
21849 case ORDERED: ccode = "nu"; break;
21850 case UNGE: ccode = "nl"; break;
21851 case UNLE: ccode = "ng"; break;
21852 default:
21853 gcc_unreachable ();
21854 }
21855
21856 /* Maybe we have a guess as to how likely the branch is. */
21857 pred = "";
21858 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
21859 if (note != NULL_RTX)
21860 {
21861 /* PROB is the difference from 50%. */
21862 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
21863 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
21864
21865 /* Only hint for highly probable/improbable branches on newer cpus when
21866 we have real profile data, as static prediction overrides processor
21867 dynamic prediction. For older cpus we may as well always hint, but
21868 assume not taken for branches that are very close to 50% as a
21869 mispredicted taken branch is more expensive than a
21870 mispredicted not-taken branch. */
21871 if (rs6000_always_hint
21872 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
21873 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
21874 && br_prob_note_reliable_p (note)))
21875 {
21876 if (abs (prob) > REG_BR_PROB_BASE / 20
21877 && ((prob > 0) ^ need_longbranch))
21878 pred = "+";
21879 else
21880 pred = "-";
21881 }
21882 }
21883
21884 if (label == NULL)
21885 s += sprintf (s, "b%slr%s ", ccode, pred);
21886 else
21887 s += sprintf (s, "b%s%s ", ccode, pred);
21888
21889 /* We need to escape any '%' characters in the reg_names string.
21890 Assume they'd only be the first character.... */
21891 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
21892 *s++ = '%';
21893 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
21894
21895 if (label != NULL)
21896 {
21897 /* If the branch distance was too far, we may have to use an
21898 unconditional branch to go the distance. */
21899 if (need_longbranch)
21900 s += sprintf (s, ",$+8\n\tb %s", label);
21901 else
21902 s += sprintf (s, ",%s", label);
21903 }
21904
21905 return string;
21906 }
21907
21908 /* Return insn for VSX or Altivec comparisons. */
21909
21910 static rtx
21911 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
21912 {
21913 rtx mask;
21914 machine_mode mode = GET_MODE (op0);
21915
21916 switch (code)
21917 {
21918 default:
21919 break;
21920
21921 case GE:
21922 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
21923 return NULL_RTX;
21924 /* FALLTHRU */
21925
21926 case EQ:
21927 case GT:
21928 case GTU:
21929 case ORDERED:
21930 case UNORDERED:
21931 case UNEQ:
21932 case LTGT:
21933 mask = gen_reg_rtx (mode);
21934 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
21935 return mask;
21936 }
21937
21938 return NULL_RTX;
21939 }
21940
21941 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
21942 DMODE is expected destination mode. This is a recursive function. */
21943
21944 static rtx
21945 rs6000_emit_vector_compare (enum rtx_code rcode,
21946 rtx op0, rtx op1,
21947 machine_mode dmode)
21948 {
21949 rtx mask;
21950 bool swap_operands = false;
21951 bool try_again = false;
21952
21953 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
21954 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
21955
21956 /* See if the comparison works as is. */
21957 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
21958 if (mask)
21959 return mask;
21960
21961 switch (rcode)
21962 {
21963 case LT:
21964 rcode = GT;
21965 swap_operands = true;
21966 try_again = true;
21967 break;
21968 case LTU:
21969 rcode = GTU;
21970 swap_operands = true;
21971 try_again = true;
21972 break;
21973 case NE:
21974 case UNLE:
21975 case UNLT:
21976 case UNGE:
21977 case UNGT:
21978 /* Invert condition and try again.
21979 e.g., A != B becomes ~(A==B). */
21980 {
21981 enum rtx_code rev_code;
21982 enum insn_code nor_code;
21983 rtx mask2;
21984
21985 rev_code = reverse_condition_maybe_unordered (rcode);
21986 if (rev_code == UNKNOWN)
21987 return NULL_RTX;
21988
21989 nor_code = optab_handler (one_cmpl_optab, dmode);
21990 if (nor_code == CODE_FOR_nothing)
21991 return NULL_RTX;
21992
21993 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
21994 if (!mask2)
21995 return NULL_RTX;
21996
21997 mask = gen_reg_rtx (dmode);
21998 emit_insn (GEN_FCN (nor_code) (mask, mask2));
21999 return mask;
22000 }
22001 break;
22002 case GE:
22003 case GEU:
22004 case LE:
22005 case LEU:
22006 /* Try GT/GTU/LT/LTU OR EQ */
22007 {
22008 rtx c_rtx, eq_rtx;
22009 enum insn_code ior_code;
22010 enum rtx_code new_code;
22011
22012 switch (rcode)
22013 {
22014 case GE:
22015 new_code = GT;
22016 break;
22017
22018 case GEU:
22019 new_code = GTU;
22020 break;
22021
22022 case LE:
22023 new_code = LT;
22024 break;
22025
22026 case LEU:
22027 new_code = LTU;
22028 break;
22029
22030 default:
22031 gcc_unreachable ();
22032 }
22033
22034 ior_code = optab_handler (ior_optab, dmode);
22035 if (ior_code == CODE_FOR_nothing)
22036 return NULL_RTX;
22037
22038 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22039 if (!c_rtx)
22040 return NULL_RTX;
22041
22042 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22043 if (!eq_rtx)
22044 return NULL_RTX;
22045
22046 mask = gen_reg_rtx (dmode);
22047 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22048 return mask;
22049 }
22050 break;
22051 default:
22052 return NULL_RTX;
22053 }
22054
22055 if (try_again)
22056 {
22057 if (swap_operands)
22058 std::swap (op0, op1);
22059
22060 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22061 if (mask)
22062 return mask;
22063 }
22064
22065 /* You only get two chances. */
22066 return NULL_RTX;
22067 }
22068
22069 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22070 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22071 operands for the relation operation COND. */
22072
22073 int
22074 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22075 rtx cond, rtx cc_op0, rtx cc_op1)
22076 {
22077 machine_mode dest_mode = GET_MODE (dest);
22078 machine_mode mask_mode = GET_MODE (cc_op0);
22079 enum rtx_code rcode = GET_CODE (cond);
22080 machine_mode cc_mode = CCmode;
22081 rtx mask;
22082 rtx cond2;
22083 bool invert_move = false;
22084
22085 if (VECTOR_UNIT_NONE_P (dest_mode))
22086 return 0;
22087
22088 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22089 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22090
22091 switch (rcode)
22092 {
22093 /* Swap operands if we can, and fall back to doing the operation as
22094 specified, and doing a NOR to invert the test. */
22095 case NE:
22096 case UNLE:
22097 case UNLT:
22098 case UNGE:
22099 case UNGT:
22100 /* Invert condition and try again.
22101 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22102 invert_move = true;
22103 rcode = reverse_condition_maybe_unordered (rcode);
22104 if (rcode == UNKNOWN)
22105 return 0;
22106 break;
22107
22108 case GE:
22109 case LE:
22110 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22111 {
22112 /* Invert condition to avoid compound test. */
22113 invert_move = true;
22114 rcode = reverse_condition (rcode);
22115 }
22116 break;
22117
22118 case GTU:
22119 case GEU:
22120 case LTU:
22121 case LEU:
22122 /* Mark unsigned tests with CCUNSmode. */
22123 cc_mode = CCUNSmode;
22124
22125 /* Invert condition to avoid compound test if necessary. */
22126 if (rcode == GEU || rcode == LEU)
22127 {
22128 invert_move = true;
22129 rcode = reverse_condition (rcode);
22130 }
22131 break;
22132
22133 default:
22134 break;
22135 }
22136
22137 /* Get the vector mask for the given relational operations. */
22138 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22139
22140 if (!mask)
22141 return 0;
22142
22143 if (invert_move)
22144 std::swap (op_true, op_false);
22145
22146 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22147 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22148 && (GET_CODE (op_true) == CONST_VECTOR
22149 || GET_CODE (op_false) == CONST_VECTOR))
22150 {
22151 rtx constant_0 = CONST0_RTX (dest_mode);
22152 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22153
22154 if (op_true == constant_m1 && op_false == constant_0)
22155 {
22156 emit_move_insn (dest, mask);
22157 return 1;
22158 }
22159
22160 else if (op_true == constant_0 && op_false == constant_m1)
22161 {
22162 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22163 return 1;
22164 }
22165
22166 /* If we can't use the vector comparison directly, perhaps we can use
22167 the mask for the true or false fields, instead of loading up a
22168 constant. */
22169 if (op_true == constant_m1)
22170 op_true = mask;
22171
22172 if (op_false == constant_0)
22173 op_false = mask;
22174 }
22175
22176 if (!REG_P (op_true) && !SUBREG_P (op_true))
22177 op_true = force_reg (dest_mode, op_true);
22178
22179 if (!REG_P (op_false) && !SUBREG_P (op_false))
22180 op_false = force_reg (dest_mode, op_false);
22181
22182 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22183 CONST0_RTX (dest_mode));
22184 emit_insn (gen_rtx_SET (dest,
22185 gen_rtx_IF_THEN_ELSE (dest_mode,
22186 cond2,
22187 op_true,
22188 op_false)));
22189 return 1;
22190 }
22191
22192 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22193 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22194 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22195 hardware has no such operation. */
22196
22197 static int
22198 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22199 {
22200 enum rtx_code code = GET_CODE (op);
22201 rtx op0 = XEXP (op, 0);
22202 rtx op1 = XEXP (op, 1);
22203 machine_mode compare_mode = GET_MODE (op0);
22204 machine_mode result_mode = GET_MODE (dest);
22205 bool max_p = false;
22206
22207 if (result_mode != compare_mode)
22208 return 0;
22209
22210 if (code == GE || code == GT)
22211 max_p = true;
22212 else if (code == LE || code == LT)
22213 max_p = false;
22214 else
22215 return 0;
22216
22217 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22218 ;
22219
22220 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22221 max_p = !max_p;
22222
22223 else
22224 return 0;
22225
22226 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22227 return 1;
22228 }
22229
22230 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22231 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22232 operands of the last comparison is nonzero/true, FALSE_COND if it is
22233 zero/false. Return 0 if the hardware has no such operation. */
22234
22235 static int
22236 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22237 {
22238 enum rtx_code code = GET_CODE (op);
22239 rtx op0 = XEXP (op, 0);
22240 rtx op1 = XEXP (op, 1);
22241 machine_mode result_mode = GET_MODE (dest);
22242 rtx compare_rtx;
22243 rtx cmove_rtx;
22244 rtx clobber_rtx;
22245
22246 if (!can_create_pseudo_p ())
22247 return 0;
22248
22249 switch (code)
22250 {
22251 case EQ:
22252 case GE:
22253 case GT:
22254 break;
22255
22256 case NE:
22257 case LT:
22258 case LE:
22259 code = swap_condition (code);
22260 std::swap (op0, op1);
22261 break;
22262
22263 default:
22264 return 0;
22265 }
22266
22267 /* Generate: [(parallel [(set (dest)
22268 (if_then_else (op (cmp1) (cmp2))
22269 (true)
22270 (false)))
22271 (clobber (scratch))])]. */
22272
22273 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22274 cmove_rtx = gen_rtx_SET (dest,
22275 gen_rtx_IF_THEN_ELSE (result_mode,
22276 compare_rtx,
22277 true_cond,
22278 false_cond));
22279
22280 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22281 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22282 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22283
22284 return 1;
22285 }
22286
22287 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22288 operands of the last comparison is nonzero/true, FALSE_COND if it
22289 is zero/false. Return 0 if the hardware has no such operation. */
22290
22291 int
22292 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22293 {
22294 enum rtx_code code = GET_CODE (op);
22295 rtx op0 = XEXP (op, 0);
22296 rtx op1 = XEXP (op, 1);
22297 machine_mode compare_mode = GET_MODE (op0);
22298 machine_mode result_mode = GET_MODE (dest);
22299 rtx temp;
22300 bool is_against_zero;
22301
22302 /* These modes should always match. */
22303 if (GET_MODE (op1) != compare_mode
22304 /* In the isel case however, we can use a compare immediate, so
22305 op1 may be a small constant. */
22306 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22307 return 0;
22308 if (GET_MODE (true_cond) != result_mode)
22309 return 0;
22310 if (GET_MODE (false_cond) != result_mode)
22311 return 0;
22312
22313 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22314 if (TARGET_P9_MINMAX
22315 && (compare_mode == SFmode || compare_mode == DFmode)
22316 && (result_mode == SFmode || result_mode == DFmode))
22317 {
22318 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22319 return 1;
22320
22321 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22322 return 1;
22323 }
22324
22325 /* Don't allow using floating point comparisons for integer results for
22326 now. */
22327 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22328 return 0;
22329
22330 /* First, work out if the hardware can do this at all, or
22331 if it's too slow.... */
22332 if (!FLOAT_MODE_P (compare_mode))
22333 {
22334 if (TARGET_ISEL)
22335 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22336 return 0;
22337 }
22338
22339 is_against_zero = op1 == CONST0_RTX (compare_mode);
22340
22341 /* A floating-point subtract might overflow, underflow, or produce
22342 an inexact result, thus changing the floating-point flags, so it
22343 can't be generated if we care about that. It's safe if one side
22344 of the construct is zero, since then no subtract will be
22345 generated. */
22346 if (SCALAR_FLOAT_MODE_P (compare_mode)
22347 && flag_trapping_math && ! is_against_zero)
22348 return 0;
22349
22350 /* Eliminate half of the comparisons by switching operands, this
22351 makes the remaining code simpler. */
22352 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22353 || code == LTGT || code == LT || code == UNLE)
22354 {
22355 code = reverse_condition_maybe_unordered (code);
22356 temp = true_cond;
22357 true_cond = false_cond;
22358 false_cond = temp;
22359 }
22360
22361 /* UNEQ and LTGT take four instructions for a comparison with zero,
22362 it'll probably be faster to use a branch here too. */
22363 if (code == UNEQ && HONOR_NANS (compare_mode))
22364 return 0;
22365
22366 /* We're going to try to implement comparisons by performing
22367 a subtract, then comparing against zero. Unfortunately,
22368 Inf - Inf is NaN which is not zero, and so if we don't
22369 know that the operand is finite and the comparison
22370 would treat EQ different to UNORDERED, we can't do it. */
22371 if (HONOR_INFINITIES (compare_mode)
22372 && code != GT && code != UNGE
22373 && (GET_CODE (op1) != CONST_DOUBLE
22374 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22375 /* Constructs of the form (a OP b ? a : b) are safe. */
22376 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22377 || (! rtx_equal_p (op0, true_cond)
22378 && ! rtx_equal_p (op1, true_cond))))
22379 return 0;
22380
22381 /* At this point we know we can use fsel. */
22382
22383 /* Reduce the comparison to a comparison against zero. */
22384 if (! is_against_zero)
22385 {
22386 temp = gen_reg_rtx (compare_mode);
22387 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22388 op0 = temp;
22389 op1 = CONST0_RTX (compare_mode);
22390 }
22391
22392 /* If we don't care about NaNs we can reduce some of the comparisons
22393 down to faster ones. */
22394 if (! HONOR_NANS (compare_mode))
22395 switch (code)
22396 {
22397 case GT:
22398 code = LE;
22399 temp = true_cond;
22400 true_cond = false_cond;
22401 false_cond = temp;
22402 break;
22403 case UNGE:
22404 code = GE;
22405 break;
22406 case UNEQ:
22407 code = EQ;
22408 break;
22409 default:
22410 break;
22411 }
22412
22413 /* Now, reduce everything down to a GE. */
22414 switch (code)
22415 {
22416 case GE:
22417 break;
22418
22419 case LE:
22420 temp = gen_reg_rtx (compare_mode);
22421 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22422 op0 = temp;
22423 break;
22424
22425 case ORDERED:
22426 temp = gen_reg_rtx (compare_mode);
22427 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22428 op0 = temp;
22429 break;
22430
22431 case EQ:
22432 temp = gen_reg_rtx (compare_mode);
22433 emit_insn (gen_rtx_SET (temp,
22434 gen_rtx_NEG (compare_mode,
22435 gen_rtx_ABS (compare_mode, op0))));
22436 op0 = temp;
22437 break;
22438
22439 case UNGE:
22440 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22441 temp = gen_reg_rtx (result_mode);
22442 emit_insn (gen_rtx_SET (temp,
22443 gen_rtx_IF_THEN_ELSE (result_mode,
22444 gen_rtx_GE (VOIDmode,
22445 op0, op1),
22446 true_cond, false_cond)));
22447 false_cond = true_cond;
22448 true_cond = temp;
22449
22450 temp = gen_reg_rtx (compare_mode);
22451 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22452 op0 = temp;
22453 break;
22454
22455 case GT:
22456 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22457 temp = gen_reg_rtx (result_mode);
22458 emit_insn (gen_rtx_SET (temp,
22459 gen_rtx_IF_THEN_ELSE (result_mode,
22460 gen_rtx_GE (VOIDmode,
22461 op0, op1),
22462 true_cond, false_cond)));
22463 true_cond = false_cond;
22464 false_cond = temp;
22465
22466 temp = gen_reg_rtx (compare_mode);
22467 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22468 op0 = temp;
22469 break;
22470
22471 default:
22472 gcc_unreachable ();
22473 }
22474
22475 emit_insn (gen_rtx_SET (dest,
22476 gen_rtx_IF_THEN_ELSE (result_mode,
22477 gen_rtx_GE (VOIDmode,
22478 op0, op1),
22479 true_cond, false_cond)));
22480 return 1;
22481 }
22482
22483 /* Same as above, but for ints (isel). */
22484
22485 int
22486 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22487 {
22488 rtx condition_rtx, cr;
22489 machine_mode mode = GET_MODE (dest);
22490 enum rtx_code cond_code;
22491 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22492 bool signedp;
22493
22494 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22495 return 0;
22496
22497 /* We still have to do the compare, because isel doesn't do a
22498 compare, it just looks at the CRx bits set by a previous compare
22499 instruction. */
22500 condition_rtx = rs6000_generate_compare (op, mode);
22501 cond_code = GET_CODE (condition_rtx);
22502 cr = XEXP (condition_rtx, 0);
22503 signedp = GET_MODE (cr) == CCmode;
22504
22505 isel_func = (mode == SImode
22506 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22507 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22508
22509 switch (cond_code)
22510 {
22511 case LT: case GT: case LTU: case GTU: case EQ:
22512 /* isel handles these directly. */
22513 break;
22514
22515 default:
22516 /* We need to swap the sense of the comparison. */
22517 {
22518 std::swap (false_cond, true_cond);
22519 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22520 }
22521 break;
22522 }
22523
22524 false_cond = force_reg (mode, false_cond);
22525 if (true_cond != const0_rtx)
22526 true_cond = force_reg (mode, true_cond);
22527
22528 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22529
22530 return 1;
22531 }
22532
22533 void
22534 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22535 {
22536 machine_mode mode = GET_MODE (op0);
22537 enum rtx_code c;
22538 rtx target;
22539
22540 /* VSX/altivec have direct min/max insns. */
22541 if ((code == SMAX || code == SMIN)
22542 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22543 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22544 {
22545 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22546 return;
22547 }
22548
22549 if (code == SMAX || code == SMIN)
22550 c = GE;
22551 else
22552 c = GEU;
22553
22554 if (code == SMAX || code == UMAX)
22555 target = emit_conditional_move (dest, c, op0, op1, mode,
22556 op0, op1, mode, 0);
22557 else
22558 target = emit_conditional_move (dest, c, op0, op1, mode,
22559 op1, op0, mode, 0);
22560 gcc_assert (target);
22561 if (target != dest)
22562 emit_move_insn (dest, target);
22563 }
22564
22565 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22566 COND is true. Mark the jump as unlikely to be taken. */
22567
22568 static void
22569 emit_unlikely_jump (rtx cond, rtx label)
22570 {
22571 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22572 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22573 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22574 }
22575
22576 /* A subroutine of the atomic operation splitters. Emit a load-locked
22577 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22578 the zero_extend operation. */
22579
22580 static void
22581 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22582 {
22583 rtx (*fn) (rtx, rtx) = NULL;
22584
22585 switch (mode)
22586 {
22587 case E_QImode:
22588 fn = gen_load_lockedqi;
22589 break;
22590 case E_HImode:
22591 fn = gen_load_lockedhi;
22592 break;
22593 case E_SImode:
22594 if (GET_MODE (mem) == QImode)
22595 fn = gen_load_lockedqi_si;
22596 else if (GET_MODE (mem) == HImode)
22597 fn = gen_load_lockedhi_si;
22598 else
22599 fn = gen_load_lockedsi;
22600 break;
22601 case E_DImode:
22602 fn = gen_load_lockeddi;
22603 break;
22604 case E_TImode:
22605 fn = gen_load_lockedti;
22606 break;
22607 default:
22608 gcc_unreachable ();
22609 }
22610 emit_insn (fn (reg, mem));
22611 }
22612
22613 /* A subroutine of the atomic operation splitters. Emit a store-conditional
22614 instruction in MODE. */
22615
22616 static void
22617 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
22618 {
22619 rtx (*fn) (rtx, rtx, rtx) = NULL;
22620
22621 switch (mode)
22622 {
22623 case E_QImode:
22624 fn = gen_store_conditionalqi;
22625 break;
22626 case E_HImode:
22627 fn = gen_store_conditionalhi;
22628 break;
22629 case E_SImode:
22630 fn = gen_store_conditionalsi;
22631 break;
22632 case E_DImode:
22633 fn = gen_store_conditionaldi;
22634 break;
22635 case E_TImode:
22636 fn = gen_store_conditionalti;
22637 break;
22638 default:
22639 gcc_unreachable ();
22640 }
22641
22642 /* Emit sync before stwcx. to address PPC405 Erratum. */
22643 if (PPC405_ERRATUM77)
22644 emit_insn (gen_hwsync ());
22645
22646 emit_insn (fn (res, mem, val));
22647 }
22648
22649 /* Expand barriers before and after a load_locked/store_cond sequence. */
22650
22651 static rtx
22652 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
22653 {
22654 rtx addr = XEXP (mem, 0);
22655
22656 if (!legitimate_indirect_address_p (addr, reload_completed)
22657 && !legitimate_indexed_address_p (addr, reload_completed))
22658 {
22659 addr = force_reg (Pmode, addr);
22660 mem = replace_equiv_address_nv (mem, addr);
22661 }
22662
22663 switch (model)
22664 {
22665 case MEMMODEL_RELAXED:
22666 case MEMMODEL_CONSUME:
22667 case MEMMODEL_ACQUIRE:
22668 break;
22669 case MEMMODEL_RELEASE:
22670 case MEMMODEL_ACQ_REL:
22671 emit_insn (gen_lwsync ());
22672 break;
22673 case MEMMODEL_SEQ_CST:
22674 emit_insn (gen_hwsync ());
22675 break;
22676 default:
22677 gcc_unreachable ();
22678 }
22679 return mem;
22680 }
22681
22682 static void
22683 rs6000_post_atomic_barrier (enum memmodel model)
22684 {
22685 switch (model)
22686 {
22687 case MEMMODEL_RELAXED:
22688 case MEMMODEL_CONSUME:
22689 case MEMMODEL_RELEASE:
22690 break;
22691 case MEMMODEL_ACQUIRE:
22692 case MEMMODEL_ACQ_REL:
22693 case MEMMODEL_SEQ_CST:
22694 emit_insn (gen_isync ());
22695 break;
22696 default:
22697 gcc_unreachable ();
22698 }
22699 }
22700
22701 /* A subroutine of the various atomic expanders. For sub-word operations,
22702 we must adjust things to operate on SImode. Given the original MEM,
22703 return a new aligned memory. Also build and return the quantities by
22704 which to shift and mask. */
22705
22706 static rtx
22707 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
22708 {
22709 rtx addr, align, shift, mask, mem;
22710 HOST_WIDE_INT shift_mask;
22711 machine_mode mode = GET_MODE (orig_mem);
22712
22713 /* For smaller modes, we have to implement this via SImode. */
22714 shift_mask = (mode == QImode ? 0x18 : 0x10);
22715
22716 addr = XEXP (orig_mem, 0);
22717 addr = force_reg (GET_MODE (addr), addr);
22718
22719 /* Aligned memory containing subword. Generate a new memory. We
22720 do not want any of the existing MEM_ATTR data, as we're now
22721 accessing memory outside the original object. */
22722 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
22723 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22724 mem = gen_rtx_MEM (SImode, align);
22725 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
22726 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
22727 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
22728
22729 /* Shift amount for subword relative to aligned word. */
22730 shift = gen_reg_rtx (SImode);
22731 addr = gen_lowpart (SImode, addr);
22732 rtx tmp = gen_reg_rtx (SImode);
22733 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
22734 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
22735 if (BYTES_BIG_ENDIAN)
22736 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
22737 shift, 1, OPTAB_LIB_WIDEN);
22738 *pshift = shift;
22739
22740 /* Mask for insertion. */
22741 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
22742 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
22743 *pmask = mask;
22744
22745 return mem;
22746 }
22747
22748 /* A subroutine of the various atomic expanders. For sub-word operands,
22749 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
22750
22751 static rtx
22752 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
22753 {
22754 rtx x;
22755
22756 x = gen_reg_rtx (SImode);
22757 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
22758 gen_rtx_NOT (SImode, mask),
22759 oldval)));
22760
22761 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
22762
22763 return x;
22764 }
22765
22766 /* A subroutine of the various atomic expanders. For sub-word operands,
22767 extract WIDE to NARROW via SHIFT. */
22768
22769 static void
22770 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
22771 {
22772 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
22773 wide, 1, OPTAB_LIB_WIDEN);
22774 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
22775 }
22776
22777 /* Expand an atomic compare and swap operation. */
22778
22779 void
22780 rs6000_expand_atomic_compare_and_swap (rtx operands[])
22781 {
22782 rtx boolval, retval, mem, oldval, newval, cond;
22783 rtx label1, label2, x, mask, shift;
22784 machine_mode mode, orig_mode;
22785 enum memmodel mod_s, mod_f;
22786 bool is_weak;
22787
22788 boolval = operands[0];
22789 retval = operands[1];
22790 mem = operands[2];
22791 oldval = operands[3];
22792 newval = operands[4];
22793 is_weak = (INTVAL (operands[5]) != 0);
22794 mod_s = memmodel_base (INTVAL (operands[6]));
22795 mod_f = memmodel_base (INTVAL (operands[7]));
22796 orig_mode = mode = GET_MODE (mem);
22797
22798 mask = shift = NULL_RTX;
22799 if (mode == QImode || mode == HImode)
22800 {
22801 /* Before power8, we didn't have access to lbarx/lharx, so generate a
22802 lwarx and shift/mask operations. With power8, we need to do the
22803 comparison in SImode, but the store is still done in QI/HImode. */
22804 oldval = convert_modes (SImode, mode, oldval, 1);
22805
22806 if (!TARGET_SYNC_HI_QI)
22807 {
22808 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
22809
22810 /* Shift and mask OLDVAL into position with the word. */
22811 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
22812 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22813
22814 /* Shift and mask NEWVAL into position within the word. */
22815 newval = convert_modes (SImode, mode, newval, 1);
22816 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
22817 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22818 }
22819
22820 /* Prepare to adjust the return value. */
22821 retval = gen_reg_rtx (SImode);
22822 mode = SImode;
22823 }
22824 else if (reg_overlap_mentioned_p (retval, oldval))
22825 oldval = copy_to_reg (oldval);
22826
22827 if (mode != TImode && !reg_or_short_operand (oldval, mode))
22828 oldval = copy_to_mode_reg (mode, oldval);
22829
22830 if (reg_overlap_mentioned_p (retval, newval))
22831 newval = copy_to_reg (newval);
22832
22833 mem = rs6000_pre_atomic_barrier (mem, mod_s);
22834
22835 label1 = NULL_RTX;
22836 if (!is_weak)
22837 {
22838 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
22839 emit_label (XEXP (label1, 0));
22840 }
22841 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
22842
22843 emit_load_locked (mode, retval, mem);
22844
22845 x = retval;
22846 if (mask)
22847 x = expand_simple_binop (SImode, AND, retval, mask,
22848 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22849
22850 cond = gen_reg_rtx (CCmode);
22851 /* If we have TImode, synthesize a comparison. */
22852 if (mode != TImode)
22853 x = gen_rtx_COMPARE (CCmode, x, oldval);
22854 else
22855 {
22856 rtx xor1_result = gen_reg_rtx (DImode);
22857 rtx xor2_result = gen_reg_rtx (DImode);
22858 rtx or_result = gen_reg_rtx (DImode);
22859 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
22860 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
22861 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
22862 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
22863
22864 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
22865 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
22866 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
22867 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
22868 }
22869
22870 emit_insn (gen_rtx_SET (cond, x));
22871
22872 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
22873 emit_unlikely_jump (x, label2);
22874
22875 x = newval;
22876 if (mask)
22877 x = rs6000_mask_atomic_subword (retval, newval, mask);
22878
22879 emit_store_conditional (orig_mode, cond, mem, x);
22880
22881 if (!is_weak)
22882 {
22883 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
22884 emit_unlikely_jump (x, label1);
22885 }
22886
22887 if (!is_mm_relaxed (mod_f))
22888 emit_label (XEXP (label2, 0));
22889
22890 rs6000_post_atomic_barrier (mod_s);
22891
22892 if (is_mm_relaxed (mod_f))
22893 emit_label (XEXP (label2, 0));
22894
22895 if (shift)
22896 rs6000_finish_atomic_subword (operands[1], retval, shift);
22897 else if (mode != GET_MODE (operands[1]))
22898 convert_move (operands[1], retval, 1);
22899
22900 /* In all cases, CR0 contains EQ on success, and NE on failure. */
22901 x = gen_rtx_EQ (SImode, cond, const0_rtx);
22902 emit_insn (gen_rtx_SET (boolval, x));
22903 }
22904
22905 /* Expand an atomic exchange operation. */
22906
22907 void
22908 rs6000_expand_atomic_exchange (rtx operands[])
22909 {
22910 rtx retval, mem, val, cond;
22911 machine_mode mode;
22912 enum memmodel model;
22913 rtx label, x, mask, shift;
22914
22915 retval = operands[0];
22916 mem = operands[1];
22917 val = operands[2];
22918 model = memmodel_base (INTVAL (operands[3]));
22919 mode = GET_MODE (mem);
22920
22921 mask = shift = NULL_RTX;
22922 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
22923 {
22924 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
22925
22926 /* Shift and mask VAL into position with the word. */
22927 val = convert_modes (SImode, mode, val, 1);
22928 val = expand_simple_binop (SImode, ASHIFT, val, shift,
22929 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22930
22931 /* Prepare to adjust the return value. */
22932 retval = gen_reg_rtx (SImode);
22933 mode = SImode;
22934 }
22935
22936 mem = rs6000_pre_atomic_barrier (mem, model);
22937
22938 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
22939 emit_label (XEXP (label, 0));
22940
22941 emit_load_locked (mode, retval, mem);
22942
22943 x = val;
22944 if (mask)
22945 x = rs6000_mask_atomic_subword (retval, val, mask);
22946
22947 cond = gen_reg_rtx (CCmode);
22948 emit_store_conditional (mode, cond, mem, x);
22949
22950 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
22951 emit_unlikely_jump (x, label);
22952
22953 rs6000_post_atomic_barrier (model);
22954
22955 if (shift)
22956 rs6000_finish_atomic_subword (operands[0], retval, shift);
22957 }
22958
22959 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
22960 to perform. MEM is the memory on which to operate. VAL is the second
22961 operand of the binary operator. BEFORE and AFTER are optional locations to
22962 return the value of MEM either before of after the operation. MODEL_RTX
22963 is a CONST_INT containing the memory model to use. */
22964
22965 void
22966 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
22967 rtx orig_before, rtx orig_after, rtx model_rtx)
22968 {
22969 enum memmodel model = memmodel_base (INTVAL (model_rtx));
22970 machine_mode mode = GET_MODE (mem);
22971 machine_mode store_mode = mode;
22972 rtx label, x, cond, mask, shift;
22973 rtx before = orig_before, after = orig_after;
22974
22975 mask = shift = NULL_RTX;
22976 /* On power8, we want to use SImode for the operation. On previous systems,
22977 use the operation in a subword and shift/mask to get the proper byte or
22978 halfword. */
22979 if (mode == QImode || mode == HImode)
22980 {
22981 if (TARGET_SYNC_HI_QI)
22982 {
22983 val = convert_modes (SImode, mode, val, 1);
22984
22985 /* Prepare to adjust the return value. */
22986 before = gen_reg_rtx (SImode);
22987 if (after)
22988 after = gen_reg_rtx (SImode);
22989 mode = SImode;
22990 }
22991 else
22992 {
22993 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
22994
22995 /* Shift and mask VAL into position with the word. */
22996 val = convert_modes (SImode, mode, val, 1);
22997 val = expand_simple_binop (SImode, ASHIFT, val, shift,
22998 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22999
23000 switch (code)
23001 {
23002 case IOR:
23003 case XOR:
23004 /* We've already zero-extended VAL. That is sufficient to
23005 make certain that it does not affect other bits. */
23006 mask = NULL;
23007 break;
23008
23009 case AND:
23010 /* If we make certain that all of the other bits in VAL are
23011 set, that will be sufficient to not affect other bits. */
23012 x = gen_rtx_NOT (SImode, mask);
23013 x = gen_rtx_IOR (SImode, x, val);
23014 emit_insn (gen_rtx_SET (val, x));
23015 mask = NULL;
23016 break;
23017
23018 case NOT:
23019 case PLUS:
23020 case MINUS:
23021 /* These will all affect bits outside the field and need
23022 adjustment via MASK within the loop. */
23023 break;
23024
23025 default:
23026 gcc_unreachable ();
23027 }
23028
23029 /* Prepare to adjust the return value. */
23030 before = gen_reg_rtx (SImode);
23031 if (after)
23032 after = gen_reg_rtx (SImode);
23033 store_mode = mode = SImode;
23034 }
23035 }
23036
23037 mem = rs6000_pre_atomic_barrier (mem, model);
23038
23039 label = gen_label_rtx ();
23040 emit_label (label);
23041 label = gen_rtx_LABEL_REF (VOIDmode, label);
23042
23043 if (before == NULL_RTX)
23044 before = gen_reg_rtx (mode);
23045
23046 emit_load_locked (mode, before, mem);
23047
23048 if (code == NOT)
23049 {
23050 x = expand_simple_binop (mode, AND, before, val,
23051 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23052 after = expand_simple_unop (mode, NOT, x, after, 1);
23053 }
23054 else
23055 {
23056 after = expand_simple_binop (mode, code, before, val,
23057 after, 1, OPTAB_LIB_WIDEN);
23058 }
23059
23060 x = after;
23061 if (mask)
23062 {
23063 x = expand_simple_binop (SImode, AND, after, mask,
23064 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23065 x = rs6000_mask_atomic_subword (before, x, mask);
23066 }
23067 else if (store_mode != mode)
23068 x = convert_modes (store_mode, mode, x, 1);
23069
23070 cond = gen_reg_rtx (CCmode);
23071 emit_store_conditional (store_mode, cond, mem, x);
23072
23073 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23074 emit_unlikely_jump (x, label);
23075
23076 rs6000_post_atomic_barrier (model);
23077
23078 if (shift)
23079 {
23080 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23081 then do the calcuations in a SImode register. */
23082 if (orig_before)
23083 rs6000_finish_atomic_subword (orig_before, before, shift);
23084 if (orig_after)
23085 rs6000_finish_atomic_subword (orig_after, after, shift);
23086 }
23087 else if (store_mode != mode)
23088 {
23089 /* QImode/HImode on machines with lbarx/lharx where we do the native
23090 operation and then do the calcuations in a SImode register. */
23091 if (orig_before)
23092 convert_move (orig_before, before, 1);
23093 if (orig_after)
23094 convert_move (orig_after, after, 1);
23095 }
23096 else if (orig_after && after != orig_after)
23097 emit_move_insn (orig_after, after);
23098 }
23099
23100 /* Emit instructions to move SRC to DST. Called by splitters for
23101 multi-register moves. It will emit at most one instruction for
23102 each register that is accessed; that is, it won't emit li/lis pairs
23103 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23104 register. */
23105
23106 void
23107 rs6000_split_multireg_move (rtx dst, rtx src)
23108 {
23109 /* The register number of the first register being moved. */
23110 int reg;
23111 /* The mode that is to be moved. */
23112 machine_mode mode;
23113 /* The mode that the move is being done in, and its size. */
23114 machine_mode reg_mode;
23115 int reg_mode_size;
23116 /* The number of registers that will be moved. */
23117 int nregs;
23118
23119 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23120 mode = GET_MODE (dst);
23121 nregs = hard_regno_nregs (reg, mode);
23122 if (FP_REGNO_P (reg))
23123 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23124 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23125 else if (ALTIVEC_REGNO_P (reg))
23126 reg_mode = V16QImode;
23127 else
23128 reg_mode = word_mode;
23129 reg_mode_size = GET_MODE_SIZE (reg_mode);
23130
23131 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23132
23133 /* TDmode residing in FP registers is special, since the ISA requires that
23134 the lower-numbered word of a register pair is always the most significant
23135 word, even in little-endian mode. This does not match the usual subreg
23136 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23137 the appropriate constituent registers "by hand" in little-endian mode.
23138
23139 Note we do not need to check for destructive overlap here since TDmode
23140 can only reside in even/odd register pairs. */
23141 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23142 {
23143 rtx p_src, p_dst;
23144 int i;
23145
23146 for (i = 0; i < nregs; i++)
23147 {
23148 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23149 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23150 else
23151 p_src = simplify_gen_subreg (reg_mode, src, mode,
23152 i * reg_mode_size);
23153
23154 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23155 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23156 else
23157 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23158 i * reg_mode_size);
23159
23160 emit_insn (gen_rtx_SET (p_dst, p_src));
23161 }
23162
23163 return;
23164 }
23165
23166 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23167 {
23168 /* Move register range backwards, if we might have destructive
23169 overlap. */
23170 int i;
23171 for (i = nregs - 1; i >= 0; i--)
23172 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23173 i * reg_mode_size),
23174 simplify_gen_subreg (reg_mode, src, mode,
23175 i * reg_mode_size)));
23176 }
23177 else
23178 {
23179 int i;
23180 int j = -1;
23181 bool used_update = false;
23182 rtx restore_basereg = NULL_RTX;
23183
23184 if (MEM_P (src) && INT_REGNO_P (reg))
23185 {
23186 rtx breg;
23187
23188 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23189 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23190 {
23191 rtx delta_rtx;
23192 breg = XEXP (XEXP (src, 0), 0);
23193 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23194 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23195 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23196 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23197 src = replace_equiv_address (src, breg);
23198 }
23199 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23200 {
23201 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23202 {
23203 rtx basereg = XEXP (XEXP (src, 0), 0);
23204 if (TARGET_UPDATE)
23205 {
23206 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23207 emit_insn (gen_rtx_SET (ndst,
23208 gen_rtx_MEM (reg_mode,
23209 XEXP (src, 0))));
23210 used_update = true;
23211 }
23212 else
23213 emit_insn (gen_rtx_SET (basereg,
23214 XEXP (XEXP (src, 0), 1)));
23215 src = replace_equiv_address (src, basereg);
23216 }
23217 else
23218 {
23219 rtx basereg = gen_rtx_REG (Pmode, reg);
23220 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23221 src = replace_equiv_address (src, basereg);
23222 }
23223 }
23224
23225 breg = XEXP (src, 0);
23226 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23227 breg = XEXP (breg, 0);
23228
23229 /* If the base register we are using to address memory is
23230 also a destination reg, then change that register last. */
23231 if (REG_P (breg)
23232 && REGNO (breg) >= REGNO (dst)
23233 && REGNO (breg) < REGNO (dst) + nregs)
23234 j = REGNO (breg) - REGNO (dst);
23235 }
23236 else if (MEM_P (dst) && INT_REGNO_P (reg))
23237 {
23238 rtx breg;
23239
23240 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23241 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23242 {
23243 rtx delta_rtx;
23244 breg = XEXP (XEXP (dst, 0), 0);
23245 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23246 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23247 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23248
23249 /* We have to update the breg before doing the store.
23250 Use store with update, if available. */
23251
23252 if (TARGET_UPDATE)
23253 {
23254 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23255 emit_insn (TARGET_32BIT
23256 ? (TARGET_POWERPC64
23257 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23258 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23259 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23260 used_update = true;
23261 }
23262 else
23263 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23264 dst = replace_equiv_address (dst, breg);
23265 }
23266 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23267 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23268 {
23269 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23270 {
23271 rtx basereg = XEXP (XEXP (dst, 0), 0);
23272 if (TARGET_UPDATE)
23273 {
23274 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23275 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23276 XEXP (dst, 0)),
23277 nsrc));
23278 used_update = true;
23279 }
23280 else
23281 emit_insn (gen_rtx_SET (basereg,
23282 XEXP (XEXP (dst, 0), 1)));
23283 dst = replace_equiv_address (dst, basereg);
23284 }
23285 else
23286 {
23287 rtx basereg = XEXP (XEXP (dst, 0), 0);
23288 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23289 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23290 && REG_P (basereg)
23291 && REG_P (offsetreg)
23292 && REGNO (basereg) != REGNO (offsetreg));
23293 if (REGNO (basereg) == 0)
23294 {
23295 rtx tmp = offsetreg;
23296 offsetreg = basereg;
23297 basereg = tmp;
23298 }
23299 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23300 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23301 dst = replace_equiv_address (dst, basereg);
23302 }
23303 }
23304 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23305 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23306 }
23307
23308 for (i = 0; i < nregs; i++)
23309 {
23310 /* Calculate index to next subword. */
23311 ++j;
23312 if (j == nregs)
23313 j = 0;
23314
23315 /* If compiler already emitted move of first word by
23316 store with update, no need to do anything. */
23317 if (j == 0 && used_update)
23318 continue;
23319
23320 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23321 j * reg_mode_size),
23322 simplify_gen_subreg (reg_mode, src, mode,
23323 j * reg_mode_size)));
23324 }
23325 if (restore_basereg != NULL_RTX)
23326 emit_insn (restore_basereg);
23327 }
23328 }
23329
23330 \f
23331 /* This page contains routines that are used to determine what the
23332 function prologue and epilogue code will do and write them out. */
23333
23334 /* Determine whether the REG is really used. */
23335
23336 static bool
23337 save_reg_p (int reg)
23338 {
23339 /* We need to mark the PIC offset register live for the same conditions
23340 as it is set up, or otherwise it won't be saved before we clobber it. */
23341
23342 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23343 {
23344 /* When calling eh_return, we must return true for all the cases
23345 where conditional_register_usage marks the PIC offset reg
23346 call used. */
23347 if (TARGET_TOC && TARGET_MINIMAL_TOC
23348 && (crtl->calls_eh_return
23349 || df_regs_ever_live_p (reg)
23350 || !constant_pool_empty_p ()))
23351 return true;
23352
23353 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23354 && flag_pic)
23355 return true;
23356 }
23357
23358 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23359 }
23360
23361 /* Return the first fixed-point register that is required to be
23362 saved. 32 if none. */
23363
23364 int
23365 first_reg_to_save (void)
23366 {
23367 int first_reg;
23368
23369 /* Find lowest numbered live register. */
23370 for (first_reg = 13; first_reg <= 31; first_reg++)
23371 if (save_reg_p (first_reg))
23372 break;
23373
23374 #if TARGET_MACHO
23375 if (flag_pic
23376 && crtl->uses_pic_offset_table
23377 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
23378 return RS6000_PIC_OFFSET_TABLE_REGNUM;
23379 #endif
23380
23381 return first_reg;
23382 }
23383
23384 /* Similar, for FP regs. */
23385
23386 int
23387 first_fp_reg_to_save (void)
23388 {
23389 int first_reg;
23390
23391 /* Find lowest numbered live register. */
23392 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23393 if (save_reg_p (first_reg))
23394 break;
23395
23396 return first_reg;
23397 }
23398
23399 /* Similar, for AltiVec regs. */
23400
23401 static int
23402 first_altivec_reg_to_save (void)
23403 {
23404 int i;
23405
23406 /* Stack frame remains as is unless we are in AltiVec ABI. */
23407 if (! TARGET_ALTIVEC_ABI)
23408 return LAST_ALTIVEC_REGNO + 1;
23409
23410 /* On Darwin, the unwind routines are compiled without
23411 TARGET_ALTIVEC, and use save_world to save/restore the
23412 altivec registers when necessary. */
23413 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23414 && ! TARGET_ALTIVEC)
23415 return FIRST_ALTIVEC_REGNO + 20;
23416
23417 /* Find lowest numbered live register. */
23418 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23419 if (save_reg_p (i))
23420 break;
23421
23422 return i;
23423 }
23424
23425 /* Return a 32-bit mask of the AltiVec registers we need to set in
23426 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23427 the 32-bit word is 0. */
23428
23429 static unsigned int
23430 compute_vrsave_mask (void)
23431 {
23432 unsigned int i, mask = 0;
23433
23434 /* On Darwin, the unwind routines are compiled without
23435 TARGET_ALTIVEC, and use save_world to save/restore the
23436 call-saved altivec registers when necessary. */
23437 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23438 && ! TARGET_ALTIVEC)
23439 mask |= 0xFFF;
23440
23441 /* First, find out if we use _any_ altivec registers. */
23442 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
23443 if (df_regs_ever_live_p (i))
23444 mask |= ALTIVEC_REG_BIT (i);
23445
23446 if (mask == 0)
23447 return mask;
23448
23449 /* Next, remove the argument registers from the set. These must
23450 be in the VRSAVE mask set by the caller, so we don't need to add
23451 them in again. More importantly, the mask we compute here is
23452 used to generate CLOBBERs in the set_vrsave insn, and we do not
23453 wish the argument registers to die. */
23454 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
23455 mask &= ~ALTIVEC_REG_BIT (i);
23456
23457 /* Similarly, remove the return value from the set. */
23458 {
23459 bool yes = false;
23460 diddle_return_value (is_altivec_return_reg, &yes);
23461 if (yes)
23462 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
23463 }
23464
23465 return mask;
23466 }
23467
23468 /* For a very restricted set of circumstances, we can cut down the
23469 size of prologues/epilogues by calling our own save/restore-the-world
23470 routines. */
23471
23472 static void
23473 compute_save_world_info (rs6000_stack_t *info)
23474 {
23475 info->world_save_p = 1;
23476 info->world_save_p
23477 = (WORLD_SAVE_P (info)
23478 && DEFAULT_ABI == ABI_DARWIN
23479 && !cfun->has_nonlocal_label
23480 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
23481 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
23482 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
23483 && info->cr_save_p);
23484
23485 /* This will not work in conjunction with sibcalls. Make sure there
23486 are none. (This check is expensive, but seldom executed.) */
23487 if (WORLD_SAVE_P (info))
23488 {
23489 rtx_insn *insn;
23490 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
23491 if (CALL_P (insn) && SIBLING_CALL_P (insn))
23492 {
23493 info->world_save_p = 0;
23494 break;
23495 }
23496 }
23497
23498 if (WORLD_SAVE_P (info))
23499 {
23500 /* Even if we're not touching VRsave, make sure there's room on the
23501 stack for it, if it looks like we're calling SAVE_WORLD, which
23502 will attempt to save it. */
23503 info->vrsave_size = 4;
23504
23505 /* If we are going to save the world, we need to save the link register too. */
23506 info->lr_save_p = 1;
23507
23508 /* "Save" the VRsave register too if we're saving the world. */
23509 if (info->vrsave_mask == 0)
23510 info->vrsave_mask = compute_vrsave_mask ();
23511
23512 /* Because the Darwin register save/restore routines only handle
23513 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
23514 check. */
23515 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
23516 && (info->first_altivec_reg_save
23517 >= FIRST_SAVED_ALTIVEC_REGNO));
23518 }
23519
23520 return;
23521 }
23522
23523
23524 static void
23525 is_altivec_return_reg (rtx reg, void *xyes)
23526 {
23527 bool *yes = (bool *) xyes;
23528 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
23529 *yes = true;
23530 }
23531
23532 \f
23533 /* Return whether REG is a global user reg or has been specifed by
23534 -ffixed-REG. We should not restore these, and so cannot use
23535 lmw or out-of-line restore functions if there are any. We also
23536 can't save them (well, emit frame notes for them), because frame
23537 unwinding during exception handling will restore saved registers. */
23538
23539 static bool
23540 fixed_reg_p (int reg)
23541 {
23542 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
23543 backend sets it, overriding anything the user might have given. */
23544 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23545 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23546 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23547 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23548 return false;
23549
23550 return fixed_regs[reg];
23551 }
23552
23553 /* Determine the strategy for savings/restoring registers. */
23554
23555 enum {
23556 SAVE_MULTIPLE = 0x1,
23557 SAVE_INLINE_GPRS = 0x2,
23558 SAVE_INLINE_FPRS = 0x4,
23559 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
23560 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
23561 SAVE_INLINE_VRS = 0x20,
23562 REST_MULTIPLE = 0x100,
23563 REST_INLINE_GPRS = 0x200,
23564 REST_INLINE_FPRS = 0x400,
23565 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
23566 REST_INLINE_VRS = 0x1000
23567 };
23568
23569 static int
23570 rs6000_savres_strategy (rs6000_stack_t *info,
23571 bool using_static_chain_p)
23572 {
23573 int strategy = 0;
23574
23575 /* Select between in-line and out-of-line save and restore of regs.
23576 First, all the obvious cases where we don't use out-of-line. */
23577 if (crtl->calls_eh_return
23578 || cfun->machine->ra_need_lr)
23579 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
23580 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
23581 | SAVE_INLINE_VRS | REST_INLINE_VRS);
23582
23583 if (info->first_gp_reg_save == 32)
23584 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23585
23586 if (info->first_fp_reg_save == 64)
23587 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23588
23589 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
23590 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23591
23592 /* Define cutoff for using out-of-line functions to save registers. */
23593 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
23594 {
23595 if (!optimize_size)
23596 {
23597 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23598 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23599 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23600 }
23601 else
23602 {
23603 /* Prefer out-of-line restore if it will exit. */
23604 if (info->first_fp_reg_save > 61)
23605 strategy |= SAVE_INLINE_FPRS;
23606 if (info->first_gp_reg_save > 29)
23607 {
23608 if (info->first_fp_reg_save == 64)
23609 strategy |= SAVE_INLINE_GPRS;
23610 else
23611 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23612 }
23613 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
23614 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23615 }
23616 }
23617 else if (DEFAULT_ABI == ABI_DARWIN)
23618 {
23619 if (info->first_fp_reg_save > 60)
23620 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23621 if (info->first_gp_reg_save > 29)
23622 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23623 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23624 }
23625 else
23626 {
23627 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
23628 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
23629 || info->first_fp_reg_save > 61)
23630 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23631 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23632 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23633 }
23634
23635 /* Don't bother to try to save things out-of-line if r11 is occupied
23636 by the static chain. It would require too much fiddling and the
23637 static chain is rarely used anyway. FPRs are saved w.r.t the stack
23638 pointer on Darwin, and AIX uses r1 or r12. */
23639 if (using_static_chain_p
23640 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
23641 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
23642 | SAVE_INLINE_GPRS
23643 | SAVE_INLINE_VRS);
23644
23645 /* Don't ever restore fixed regs. That means we can't use the
23646 out-of-line register restore functions if a fixed reg is in the
23647 range of regs restored. */
23648 if (!(strategy & REST_INLINE_FPRS))
23649 for (int i = info->first_fp_reg_save; i < 64; i++)
23650 if (fixed_regs[i])
23651 {
23652 strategy |= REST_INLINE_FPRS;
23653 break;
23654 }
23655
23656 /* We can only use the out-of-line routines to restore fprs if we've
23657 saved all the registers from first_fp_reg_save in the prologue.
23658 Otherwise, we risk loading garbage. Of course, if we have saved
23659 out-of-line then we know we haven't skipped any fprs. */
23660 if ((strategy & SAVE_INLINE_FPRS)
23661 && !(strategy & REST_INLINE_FPRS))
23662 for (int i = info->first_fp_reg_save; i < 64; i++)
23663 if (!save_reg_p (i))
23664 {
23665 strategy |= REST_INLINE_FPRS;
23666 break;
23667 }
23668
23669 /* Similarly, for altivec regs. */
23670 if (!(strategy & REST_INLINE_VRS))
23671 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
23672 if (fixed_regs[i])
23673 {
23674 strategy |= REST_INLINE_VRS;
23675 break;
23676 }
23677
23678 if ((strategy & SAVE_INLINE_VRS)
23679 && !(strategy & REST_INLINE_VRS))
23680 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
23681 if (!save_reg_p (i))
23682 {
23683 strategy |= REST_INLINE_VRS;
23684 break;
23685 }
23686
23687 /* info->lr_save_p isn't yet set if the only reason lr needs to be
23688 saved is an out-of-line save or restore. Set up the value for
23689 the next test (excluding out-of-line gprs). */
23690 bool lr_save_p = (info->lr_save_p
23691 || !(strategy & SAVE_INLINE_FPRS)
23692 || !(strategy & SAVE_INLINE_VRS)
23693 || !(strategy & REST_INLINE_FPRS)
23694 || !(strategy & REST_INLINE_VRS));
23695
23696 if (TARGET_MULTIPLE
23697 && !TARGET_POWERPC64
23698 && info->first_gp_reg_save < 31
23699 && !(flag_shrink_wrap
23700 && flag_shrink_wrap_separate
23701 && optimize_function_for_speed_p (cfun)))
23702 {
23703 int count = 0;
23704 for (int i = info->first_gp_reg_save; i < 32; i++)
23705 if (save_reg_p (i))
23706 count++;
23707
23708 if (count <= 1)
23709 /* Don't use store multiple if only one reg needs to be
23710 saved. This can occur for example when the ABI_V4 pic reg
23711 (r30) needs to be saved to make calls, but r31 is not
23712 used. */
23713 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23714 else
23715 {
23716 /* Prefer store multiple for saves over out-of-line
23717 routines, since the store-multiple instruction will
23718 always be smaller. */
23719 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
23720
23721 /* The situation is more complicated with load multiple.
23722 We'd prefer to use the out-of-line routines for restores,
23723 since the "exit" out-of-line routines can handle the
23724 restore of LR and the frame teardown. However if doesn't
23725 make sense to use the out-of-line routine if that is the
23726 only reason we'd need to save LR, and we can't use the
23727 "exit" out-of-line gpr restore if we have saved some
23728 fprs; In those cases it is advantageous to use load
23729 multiple when available. */
23730 if (info->first_fp_reg_save != 64 || !lr_save_p)
23731 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
23732 }
23733 }
23734
23735 /* Using the "exit" out-of-line routine does not improve code size
23736 if using it would require lr to be saved and if only saving one
23737 or two gprs. */
23738 else if (!lr_save_p && info->first_gp_reg_save > 29)
23739 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23740
23741 /* Don't ever restore fixed regs. */
23742 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
23743 for (int i = info->first_gp_reg_save; i < 32; i++)
23744 if (fixed_reg_p (i))
23745 {
23746 strategy |= REST_INLINE_GPRS;
23747 strategy &= ~REST_MULTIPLE;
23748 break;
23749 }
23750
23751 /* We can only use load multiple or the out-of-line routines to
23752 restore gprs if we've saved all the registers from
23753 first_gp_reg_save. Otherwise, we risk loading garbage.
23754 Of course, if we have saved out-of-line or used stmw then we know
23755 we haven't skipped any gprs. */
23756 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
23757 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
23758 for (int i = info->first_gp_reg_save; i < 32; i++)
23759 if (!save_reg_p (i))
23760 {
23761 strategy |= REST_INLINE_GPRS;
23762 strategy &= ~REST_MULTIPLE;
23763 break;
23764 }
23765
23766 if (TARGET_ELF && TARGET_64BIT)
23767 {
23768 if (!(strategy & SAVE_INLINE_FPRS))
23769 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
23770 else if (!(strategy & SAVE_INLINE_GPRS)
23771 && info->first_fp_reg_save == 64)
23772 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
23773 }
23774 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
23775 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
23776
23777 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
23778 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
23779
23780 return strategy;
23781 }
23782
23783 /* Calculate the stack information for the current function. This is
23784 complicated by having two separate calling sequences, the AIX calling
23785 sequence and the V.4 calling sequence.
23786
23787 AIX (and Darwin/Mac OS X) stack frames look like:
23788 32-bit 64-bit
23789 SP----> +---------------------------------------+
23790 | back chain to caller | 0 0
23791 +---------------------------------------+
23792 | saved CR | 4 8 (8-11)
23793 +---------------------------------------+
23794 | saved LR | 8 16
23795 +---------------------------------------+
23796 | reserved for compilers | 12 24
23797 +---------------------------------------+
23798 | reserved for binders | 16 32
23799 +---------------------------------------+
23800 | saved TOC pointer | 20 40
23801 +---------------------------------------+
23802 | Parameter save area (+padding*) (P) | 24 48
23803 +---------------------------------------+
23804 | Alloca space (A) | 24+P etc.
23805 +---------------------------------------+
23806 | Local variable space (L) | 24+P+A
23807 +---------------------------------------+
23808 | Float/int conversion temporary (X) | 24+P+A+L
23809 +---------------------------------------+
23810 | Save area for AltiVec registers (W) | 24+P+A+L+X
23811 +---------------------------------------+
23812 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
23813 +---------------------------------------+
23814 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
23815 +---------------------------------------+
23816 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
23817 +---------------------------------------+
23818 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
23819 +---------------------------------------+
23820 old SP->| back chain to caller's caller |
23821 +---------------------------------------+
23822
23823 * If the alloca area is present, the parameter save area is
23824 padded so that the former starts 16-byte aligned.
23825
23826 The required alignment for AIX configurations is two words (i.e., 8
23827 or 16 bytes).
23828
23829 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
23830
23831 SP----> +---------------------------------------+
23832 | Back chain to caller | 0
23833 +---------------------------------------+
23834 | Save area for CR | 8
23835 +---------------------------------------+
23836 | Saved LR | 16
23837 +---------------------------------------+
23838 | Saved TOC pointer | 24
23839 +---------------------------------------+
23840 | Parameter save area (+padding*) (P) | 32
23841 +---------------------------------------+
23842 | Alloca space (A) | 32+P
23843 +---------------------------------------+
23844 | Local variable space (L) | 32+P+A
23845 +---------------------------------------+
23846 | Save area for AltiVec registers (W) | 32+P+A+L
23847 +---------------------------------------+
23848 | AltiVec alignment padding (Y) | 32+P+A+L+W
23849 +---------------------------------------+
23850 | Save area for GP registers (G) | 32+P+A+L+W+Y
23851 +---------------------------------------+
23852 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
23853 +---------------------------------------+
23854 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
23855 +---------------------------------------+
23856
23857 * If the alloca area is present, the parameter save area is
23858 padded so that the former starts 16-byte aligned.
23859
23860 V.4 stack frames look like:
23861
23862 SP----> +---------------------------------------+
23863 | back chain to caller | 0
23864 +---------------------------------------+
23865 | caller's saved LR | 4
23866 +---------------------------------------+
23867 | Parameter save area (+padding*) (P) | 8
23868 +---------------------------------------+
23869 | Alloca space (A) | 8+P
23870 +---------------------------------------+
23871 | Varargs save area (V) | 8+P+A
23872 +---------------------------------------+
23873 | Local variable space (L) | 8+P+A+V
23874 +---------------------------------------+
23875 | Float/int conversion temporary (X) | 8+P+A+V+L
23876 +---------------------------------------+
23877 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
23878 +---------------------------------------+
23879 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
23880 +---------------------------------------+
23881 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
23882 +---------------------------------------+
23883 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
23884 +---------------------------------------+
23885 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
23886 +---------------------------------------+
23887 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
23888 +---------------------------------------+
23889 old SP->| back chain to caller's caller |
23890 +---------------------------------------+
23891
23892 * If the alloca area is present and the required alignment is
23893 16 bytes, the parameter save area is padded so that the
23894 alloca area starts 16-byte aligned.
23895
23896 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
23897 given. (But note below and in sysv4.h that we require only 8 and
23898 may round up the size of our stack frame anyways. The historical
23899 reason is early versions of powerpc-linux which didn't properly
23900 align the stack at program startup. A happy side-effect is that
23901 -mno-eabi libraries can be used with -meabi programs.)
23902
23903 The EABI configuration defaults to the V.4 layout. However,
23904 the stack alignment requirements may differ. If -mno-eabi is not
23905 given, the required stack alignment is 8 bytes; if -mno-eabi is
23906 given, the required alignment is 16 bytes. (But see V.4 comment
23907 above.) */
23908
23909 #ifndef ABI_STACK_BOUNDARY
23910 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
23911 #endif
23912
23913 static rs6000_stack_t *
23914 rs6000_stack_info (void)
23915 {
23916 /* We should never be called for thunks, we are not set up for that. */
23917 gcc_assert (!cfun->is_thunk);
23918
23919 rs6000_stack_t *info = &stack_info;
23920 int reg_size = TARGET_32BIT ? 4 : 8;
23921 int ehrd_size;
23922 int ehcr_size;
23923 int save_align;
23924 int first_gp;
23925 HOST_WIDE_INT non_fixed_size;
23926 bool using_static_chain_p;
23927
23928 if (reload_completed && info->reload_completed)
23929 return info;
23930
23931 memset (info, 0, sizeof (*info));
23932 info->reload_completed = reload_completed;
23933
23934 /* Select which calling sequence. */
23935 info->abi = DEFAULT_ABI;
23936
23937 /* Calculate which registers need to be saved & save area size. */
23938 info->first_gp_reg_save = first_reg_to_save ();
23939 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
23940 even if it currently looks like we won't. Reload may need it to
23941 get at a constant; if so, it will have already created a constant
23942 pool entry for it. */
23943 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
23944 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
23945 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
23946 && crtl->uses_const_pool
23947 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
23948 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
23949 else
23950 first_gp = info->first_gp_reg_save;
23951
23952 info->gp_size = reg_size * (32 - first_gp);
23953
23954 info->first_fp_reg_save = first_fp_reg_to_save ();
23955 info->fp_size = 8 * (64 - info->first_fp_reg_save);
23956
23957 info->first_altivec_reg_save = first_altivec_reg_to_save ();
23958 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
23959 - info->first_altivec_reg_save);
23960
23961 /* Does this function call anything? */
23962 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
23963
23964 /* Determine if we need to save the condition code registers. */
23965 if (save_reg_p (CR2_REGNO)
23966 || save_reg_p (CR3_REGNO)
23967 || save_reg_p (CR4_REGNO))
23968 {
23969 info->cr_save_p = 1;
23970 if (DEFAULT_ABI == ABI_V4)
23971 info->cr_size = reg_size;
23972 }
23973
23974 /* If the current function calls __builtin_eh_return, then we need
23975 to allocate stack space for registers that will hold data for
23976 the exception handler. */
23977 if (crtl->calls_eh_return)
23978 {
23979 unsigned int i;
23980 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
23981 continue;
23982
23983 ehrd_size = i * UNITS_PER_WORD;
23984 }
23985 else
23986 ehrd_size = 0;
23987
23988 /* In the ELFv2 ABI, we also need to allocate space for separate
23989 CR field save areas if the function calls __builtin_eh_return. */
23990 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
23991 {
23992 /* This hard-codes that we have three call-saved CR fields. */
23993 ehcr_size = 3 * reg_size;
23994 /* We do *not* use the regular CR save mechanism. */
23995 info->cr_save_p = 0;
23996 }
23997 else
23998 ehcr_size = 0;
23999
24000 /* Determine various sizes. */
24001 info->reg_size = reg_size;
24002 info->fixed_size = RS6000_SAVE_AREA;
24003 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24004 if (cfun->calls_alloca)
24005 info->parm_size =
24006 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24007 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24008 else
24009 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24010 TARGET_ALTIVEC ? 16 : 8);
24011 if (FRAME_GROWS_DOWNWARD)
24012 info->vars_size
24013 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24014 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24015 - (info->fixed_size + info->vars_size + info->parm_size);
24016
24017 if (TARGET_ALTIVEC_ABI)
24018 info->vrsave_mask = compute_vrsave_mask ();
24019
24020 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24021 info->vrsave_size = 4;
24022
24023 compute_save_world_info (info);
24024
24025 /* Calculate the offsets. */
24026 switch (DEFAULT_ABI)
24027 {
24028 case ABI_NONE:
24029 default:
24030 gcc_unreachable ();
24031
24032 case ABI_AIX:
24033 case ABI_ELFv2:
24034 case ABI_DARWIN:
24035 info->fp_save_offset = -info->fp_size;
24036 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24037
24038 if (TARGET_ALTIVEC_ABI)
24039 {
24040 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24041
24042 /* Align stack so vector save area is on a quadword boundary.
24043 The padding goes above the vectors. */
24044 if (info->altivec_size != 0)
24045 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24046
24047 info->altivec_save_offset = info->vrsave_save_offset
24048 - info->altivec_padding_size
24049 - info->altivec_size;
24050 gcc_assert (info->altivec_size == 0
24051 || info->altivec_save_offset % 16 == 0);
24052
24053 /* Adjust for AltiVec case. */
24054 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24055 }
24056 else
24057 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24058
24059 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24060 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24061 info->lr_save_offset = 2*reg_size;
24062 break;
24063
24064 case ABI_V4:
24065 info->fp_save_offset = -info->fp_size;
24066 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24067 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24068
24069 if (TARGET_ALTIVEC_ABI)
24070 {
24071 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24072
24073 /* Align stack so vector save area is on a quadword boundary. */
24074 if (info->altivec_size != 0)
24075 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24076
24077 info->altivec_save_offset = info->vrsave_save_offset
24078 - info->altivec_padding_size
24079 - info->altivec_size;
24080
24081 /* Adjust for AltiVec case. */
24082 info->ehrd_offset = info->altivec_save_offset;
24083 }
24084 else
24085 info->ehrd_offset = info->cr_save_offset;
24086
24087 info->ehrd_offset -= ehrd_size;
24088 info->lr_save_offset = reg_size;
24089 }
24090
24091 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24092 info->save_size = RS6000_ALIGN (info->fp_size
24093 + info->gp_size
24094 + info->altivec_size
24095 + info->altivec_padding_size
24096 + ehrd_size
24097 + ehcr_size
24098 + info->cr_size
24099 + info->vrsave_size,
24100 save_align);
24101
24102 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24103
24104 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24105 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24106
24107 /* Determine if we need to save the link register. */
24108 if (info->calls_p
24109 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24110 && crtl->profile
24111 && !TARGET_PROFILE_KERNEL)
24112 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24113 #ifdef TARGET_RELOCATABLE
24114 || (DEFAULT_ABI == ABI_V4
24115 && (TARGET_RELOCATABLE || flag_pic > 1)
24116 && !constant_pool_empty_p ())
24117 #endif
24118 || rs6000_ra_ever_killed ())
24119 info->lr_save_p = 1;
24120
24121 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24122 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24123 && call_used_regs[STATIC_CHAIN_REGNUM]);
24124 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24125
24126 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24127 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24128 || !(info->savres_strategy & SAVE_INLINE_VRS)
24129 || !(info->savres_strategy & REST_INLINE_GPRS)
24130 || !(info->savres_strategy & REST_INLINE_FPRS)
24131 || !(info->savres_strategy & REST_INLINE_VRS))
24132 info->lr_save_p = 1;
24133
24134 if (info->lr_save_p)
24135 df_set_regs_ever_live (LR_REGNO, true);
24136
24137 /* Determine if we need to allocate any stack frame:
24138
24139 For AIX we need to push the stack if a frame pointer is needed
24140 (because the stack might be dynamically adjusted), if we are
24141 debugging, if we make calls, or if the sum of fp_save, gp_save,
24142 and local variables are more than the space needed to save all
24143 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24144 + 18*8 = 288 (GPR13 reserved).
24145
24146 For V.4 we don't have the stack cushion that AIX uses, but assume
24147 that the debugger can handle stackless frames. */
24148
24149 if (info->calls_p)
24150 info->push_p = 1;
24151
24152 else if (DEFAULT_ABI == ABI_V4)
24153 info->push_p = non_fixed_size != 0;
24154
24155 else if (frame_pointer_needed)
24156 info->push_p = 1;
24157
24158 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24159 info->push_p = 1;
24160
24161 else
24162 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24163
24164 return info;
24165 }
24166
24167 static void
24168 debug_stack_info (rs6000_stack_t *info)
24169 {
24170 const char *abi_string;
24171
24172 if (! info)
24173 info = rs6000_stack_info ();
24174
24175 fprintf (stderr, "\nStack information for function %s:\n",
24176 ((current_function_decl && DECL_NAME (current_function_decl))
24177 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24178 : "<unknown>"));
24179
24180 switch (info->abi)
24181 {
24182 default: abi_string = "Unknown"; break;
24183 case ABI_NONE: abi_string = "NONE"; break;
24184 case ABI_AIX: abi_string = "AIX"; break;
24185 case ABI_ELFv2: abi_string = "ELFv2"; break;
24186 case ABI_DARWIN: abi_string = "Darwin"; break;
24187 case ABI_V4: abi_string = "V.4"; break;
24188 }
24189
24190 fprintf (stderr, "\tABI = %5s\n", abi_string);
24191
24192 if (TARGET_ALTIVEC_ABI)
24193 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24194
24195 if (info->first_gp_reg_save != 32)
24196 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24197
24198 if (info->first_fp_reg_save != 64)
24199 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24200
24201 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24202 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24203 info->first_altivec_reg_save);
24204
24205 if (info->lr_save_p)
24206 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24207
24208 if (info->cr_save_p)
24209 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24210
24211 if (info->vrsave_mask)
24212 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24213
24214 if (info->push_p)
24215 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24216
24217 if (info->calls_p)
24218 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24219
24220 if (info->gp_size)
24221 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24222
24223 if (info->fp_size)
24224 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24225
24226 if (info->altivec_size)
24227 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24228 info->altivec_save_offset);
24229
24230 if (info->vrsave_size)
24231 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24232 info->vrsave_save_offset);
24233
24234 if (info->lr_save_p)
24235 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24236
24237 if (info->cr_save_p)
24238 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24239
24240 if (info->varargs_save_offset)
24241 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24242
24243 if (info->total_size)
24244 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24245 info->total_size);
24246
24247 if (info->vars_size)
24248 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24249 info->vars_size);
24250
24251 if (info->parm_size)
24252 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24253
24254 if (info->fixed_size)
24255 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24256
24257 if (info->gp_size)
24258 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24259
24260 if (info->fp_size)
24261 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24262
24263 if (info->altivec_size)
24264 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24265
24266 if (info->vrsave_size)
24267 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24268
24269 if (info->altivec_padding_size)
24270 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24271 info->altivec_padding_size);
24272
24273 if (info->cr_size)
24274 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24275
24276 if (info->save_size)
24277 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24278
24279 if (info->reg_size != 4)
24280 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24281
24282 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24283
24284 fprintf (stderr, "\n");
24285 }
24286
24287 rtx
24288 rs6000_return_addr (int count, rtx frame)
24289 {
24290 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24291 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24292 if (count != 0
24293 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24294 {
24295 cfun->machine->ra_needs_full_frame = 1;
24296
24297 if (count == 0)
24298 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24299 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24300 frame = stack_pointer_rtx;
24301 rtx prev_frame_addr = memory_address (Pmode, frame);
24302 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24303 rtx lr_save_off = plus_constant (Pmode,
24304 prev_frame, RETURN_ADDRESS_OFFSET);
24305 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24306 return gen_rtx_MEM (Pmode, lr_save_addr);
24307 }
24308
24309 cfun->machine->ra_need_lr = 1;
24310 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24311 }
24312
24313 /* Say whether a function is a candidate for sibcall handling or not. */
24314
24315 static bool
24316 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24317 {
24318 tree fntype;
24319
24320 /* The sibcall epilogue may clobber the static chain register.
24321 ??? We could work harder and avoid that, but it's probably
24322 not worth the hassle in practice. */
24323 if (CALL_EXPR_STATIC_CHAIN (exp))
24324 return false;
24325
24326 if (decl)
24327 fntype = TREE_TYPE (decl);
24328 else
24329 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24330
24331 /* We can't do it if the called function has more vector parameters
24332 than the current function; there's nowhere to put the VRsave code. */
24333 if (TARGET_ALTIVEC_ABI
24334 && TARGET_ALTIVEC_VRSAVE
24335 && !(decl && decl == current_function_decl))
24336 {
24337 function_args_iterator args_iter;
24338 tree type;
24339 int nvreg = 0;
24340
24341 /* Functions with vector parameters are required to have a
24342 prototype, so the argument type info must be available
24343 here. */
24344 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24345 if (TREE_CODE (type) == VECTOR_TYPE
24346 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24347 nvreg++;
24348
24349 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24350 if (TREE_CODE (type) == VECTOR_TYPE
24351 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24352 nvreg--;
24353
24354 if (nvreg > 0)
24355 return false;
24356 }
24357
24358 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24359 functions, because the callee may have a different TOC pointer to
24360 the caller and there's no way to ensure we restore the TOC when
24361 we return. With the secure-plt SYSV ABI we can't make non-local
24362 calls when -fpic/PIC because the plt call stubs use r30. */
24363 if (DEFAULT_ABI == ABI_DARWIN
24364 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24365 && decl
24366 && !DECL_EXTERNAL (decl)
24367 && !DECL_WEAK (decl)
24368 && (*targetm.binds_local_p) (decl))
24369 || (DEFAULT_ABI == ABI_V4
24370 && (!TARGET_SECURE_PLT
24371 || !flag_pic
24372 || (decl
24373 && (*targetm.binds_local_p) (decl)))))
24374 {
24375 tree attr_list = TYPE_ATTRIBUTES (fntype);
24376
24377 if (!lookup_attribute ("longcall", attr_list)
24378 || lookup_attribute ("shortcall", attr_list))
24379 return true;
24380 }
24381
24382 return false;
24383 }
24384
24385 static int
24386 rs6000_ra_ever_killed (void)
24387 {
24388 rtx_insn *top;
24389 rtx reg;
24390 rtx_insn *insn;
24391
24392 if (cfun->is_thunk)
24393 return 0;
24394
24395 if (cfun->machine->lr_save_state)
24396 return cfun->machine->lr_save_state - 1;
24397
24398 /* regs_ever_live has LR marked as used if any sibcalls are present,
24399 but this should not force saving and restoring in the
24400 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24401 clobbers LR, so that is inappropriate. */
24402
24403 /* Also, the prologue can generate a store into LR that
24404 doesn't really count, like this:
24405
24406 move LR->R0
24407 bcl to set PIC register
24408 move LR->R31
24409 move R0->LR
24410
24411 When we're called from the epilogue, we need to avoid counting
24412 this as a store. */
24413
24414 push_topmost_sequence ();
24415 top = get_insns ();
24416 pop_topmost_sequence ();
24417 reg = gen_rtx_REG (Pmode, LR_REGNO);
24418
24419 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24420 {
24421 if (INSN_P (insn))
24422 {
24423 if (CALL_P (insn))
24424 {
24425 if (!SIBLING_CALL_P (insn))
24426 return 1;
24427 }
24428 else if (find_regno_note (insn, REG_INC, LR_REGNO))
24429 return 1;
24430 else if (set_of (reg, insn) != NULL_RTX
24431 && !prologue_epilogue_contains (insn))
24432 return 1;
24433 }
24434 }
24435 return 0;
24436 }
24437 \f
24438 /* Emit instructions needed to load the TOC register.
24439 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24440 a constant pool; or for SVR4 -fpic. */
24441
24442 void
24443 rs6000_emit_load_toc_table (int fromprolog)
24444 {
24445 rtx dest;
24446 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
24447
24448 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
24449 {
24450 char buf[30];
24451 rtx lab, tmp1, tmp2, got;
24452
24453 lab = gen_label_rtx ();
24454 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
24455 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24456 if (flag_pic == 2)
24457 {
24458 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24459 need_toc_init = 1;
24460 }
24461 else
24462 got = rs6000_got_sym ();
24463 tmp1 = tmp2 = dest;
24464 if (!fromprolog)
24465 {
24466 tmp1 = gen_reg_rtx (Pmode);
24467 tmp2 = gen_reg_rtx (Pmode);
24468 }
24469 emit_insn (gen_load_toc_v4_PIC_1 (lab));
24470 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
24471 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
24472 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
24473 }
24474 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
24475 {
24476 emit_insn (gen_load_toc_v4_pic_si ());
24477 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24478 }
24479 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
24480 {
24481 char buf[30];
24482 rtx temp0 = (fromprolog
24483 ? gen_rtx_REG (Pmode, 0)
24484 : gen_reg_rtx (Pmode));
24485
24486 if (fromprolog)
24487 {
24488 rtx symF, symL;
24489
24490 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
24491 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24492
24493 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
24494 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24495
24496 emit_insn (gen_load_toc_v4_PIC_1 (symF));
24497 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24498 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
24499 }
24500 else
24501 {
24502 rtx tocsym, lab;
24503
24504 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24505 need_toc_init = 1;
24506 lab = gen_label_rtx ();
24507 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
24508 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24509 if (TARGET_LINK_STACK)
24510 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
24511 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
24512 }
24513 emit_insn (gen_addsi3 (dest, temp0, dest));
24514 }
24515 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
24516 {
24517 /* This is for AIX code running in non-PIC ELF32. */
24518 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24519
24520 need_toc_init = 1;
24521 emit_insn (gen_elf_high (dest, realsym));
24522 emit_insn (gen_elf_low (dest, dest, realsym));
24523 }
24524 else
24525 {
24526 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24527
24528 if (TARGET_32BIT)
24529 emit_insn (gen_load_toc_aix_si (dest));
24530 else
24531 emit_insn (gen_load_toc_aix_di (dest));
24532 }
24533 }
24534
24535 /* Emit instructions to restore the link register after determining where
24536 its value has been stored. */
24537
24538 void
24539 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
24540 {
24541 rs6000_stack_t *info = rs6000_stack_info ();
24542 rtx operands[2];
24543
24544 operands[0] = source;
24545 operands[1] = scratch;
24546
24547 if (info->lr_save_p)
24548 {
24549 rtx frame_rtx = stack_pointer_rtx;
24550 HOST_WIDE_INT sp_offset = 0;
24551 rtx tmp;
24552
24553 if (frame_pointer_needed
24554 || cfun->calls_alloca
24555 || info->total_size > 32767)
24556 {
24557 tmp = gen_frame_mem (Pmode, frame_rtx);
24558 emit_move_insn (operands[1], tmp);
24559 frame_rtx = operands[1];
24560 }
24561 else if (info->push_p)
24562 sp_offset = info->total_size;
24563
24564 tmp = plus_constant (Pmode, frame_rtx,
24565 info->lr_save_offset + sp_offset);
24566 tmp = gen_frame_mem (Pmode, tmp);
24567 emit_move_insn (tmp, operands[0]);
24568 }
24569 else
24570 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
24571
24572 /* Freeze lr_save_p. We've just emitted rtl that depends on the
24573 state of lr_save_p so any change from here on would be a bug. In
24574 particular, stop rs6000_ra_ever_killed from considering the SET
24575 of lr we may have added just above. */
24576 cfun->machine->lr_save_state = info->lr_save_p + 1;
24577 }
24578
24579 static GTY(()) alias_set_type set = -1;
24580
24581 alias_set_type
24582 get_TOC_alias_set (void)
24583 {
24584 if (set == -1)
24585 set = new_alias_set ();
24586 return set;
24587 }
24588
24589 /* This returns nonzero if the current function uses the TOC. This is
24590 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
24591 is generated by the ABI_V4 load_toc_* patterns.
24592 Return 2 instead of 1 if the load_toc_* pattern is in the function
24593 partition that doesn't start the function. */
24594 #if TARGET_ELF
24595 static int
24596 uses_TOC (void)
24597 {
24598 rtx_insn *insn;
24599 int ret = 1;
24600
24601 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
24602 {
24603 if (INSN_P (insn))
24604 {
24605 rtx pat = PATTERN (insn);
24606 int i;
24607
24608 if (GET_CODE (pat) == PARALLEL)
24609 for (i = 0; i < XVECLEN (pat, 0); i++)
24610 {
24611 rtx sub = XVECEXP (pat, 0, i);
24612 if (GET_CODE (sub) == USE)
24613 {
24614 sub = XEXP (sub, 0);
24615 if (GET_CODE (sub) == UNSPEC
24616 && XINT (sub, 1) == UNSPEC_TOC)
24617 return ret;
24618 }
24619 }
24620 }
24621 else if (crtl->has_bb_partition
24622 && NOTE_P (insn)
24623 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
24624 ret = 2;
24625 }
24626 return 0;
24627 }
24628 #endif
24629
24630 rtx
24631 create_TOC_reference (rtx symbol, rtx largetoc_reg)
24632 {
24633 rtx tocrel, tocreg, hi;
24634
24635 if (TARGET_DEBUG_ADDR)
24636 {
24637 if (GET_CODE (symbol) == SYMBOL_REF)
24638 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
24639 XSTR (symbol, 0));
24640 else
24641 {
24642 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
24643 GET_RTX_NAME (GET_CODE (symbol)));
24644 debug_rtx (symbol);
24645 }
24646 }
24647
24648 if (!can_create_pseudo_p ())
24649 df_set_regs_ever_live (TOC_REGISTER, true);
24650
24651 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
24652 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
24653 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
24654 return tocrel;
24655
24656 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
24657 if (largetoc_reg != NULL)
24658 {
24659 emit_move_insn (largetoc_reg, hi);
24660 hi = largetoc_reg;
24661 }
24662 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
24663 }
24664
24665 /* Issue assembly directives that create a reference to the given DWARF
24666 FRAME_TABLE_LABEL from the current function section. */
24667 void
24668 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
24669 {
24670 fprintf (asm_out_file, "\t.ref %s\n",
24671 (* targetm.strip_name_encoding) (frame_table_label));
24672 }
24673 \f
24674 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
24675 and the change to the stack pointer. */
24676
24677 static void
24678 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
24679 {
24680 rtvec p;
24681 int i;
24682 rtx regs[3];
24683
24684 i = 0;
24685 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
24686 if (hard_frame_needed)
24687 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
24688 if (!(REGNO (fp) == STACK_POINTER_REGNUM
24689 || (hard_frame_needed
24690 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
24691 regs[i++] = fp;
24692
24693 p = rtvec_alloc (i);
24694 while (--i >= 0)
24695 {
24696 rtx mem = gen_frame_mem (BLKmode, regs[i]);
24697 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
24698 }
24699
24700 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
24701 }
24702
24703 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
24704 and set the appropriate attributes for the generated insn. Return the
24705 first insn which adjusts the stack pointer or the last insn before
24706 the stack adjustment loop.
24707
24708 SIZE_INT is used to create the CFI note for the allocation.
24709
24710 SIZE_RTX is an rtx containing the size of the adjustment. Note that
24711 since stacks grow to lower addresses its runtime value is -SIZE_INT.
24712
24713 ORIG_SP contains the backchain value that must be stored at *sp. */
24714
24715 static rtx_insn *
24716 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
24717 {
24718 rtx_insn *insn;
24719
24720 rtx size_rtx = GEN_INT (-size_int);
24721 if (size_int > 32767)
24722 {
24723 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
24724 /* Need a note here so that try_split doesn't get confused. */
24725 if (get_last_insn () == NULL_RTX)
24726 emit_note (NOTE_INSN_DELETED);
24727 insn = emit_move_insn (tmp_reg, size_rtx);
24728 try_split (PATTERN (insn), insn, 0);
24729 size_rtx = tmp_reg;
24730 }
24731
24732 if (Pmode == SImode)
24733 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
24734 stack_pointer_rtx,
24735 size_rtx,
24736 orig_sp));
24737 else
24738 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
24739 stack_pointer_rtx,
24740 size_rtx,
24741 orig_sp));
24742 rtx par = PATTERN (insn);
24743 gcc_assert (GET_CODE (par) == PARALLEL);
24744 rtx set = XVECEXP (par, 0, 0);
24745 gcc_assert (GET_CODE (set) == SET);
24746 rtx mem = SET_DEST (set);
24747 gcc_assert (MEM_P (mem));
24748 MEM_NOTRAP_P (mem) = 1;
24749 set_mem_alias_set (mem, get_frame_alias_set ());
24750
24751 RTX_FRAME_RELATED_P (insn) = 1;
24752 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
24753 gen_rtx_SET (stack_pointer_rtx,
24754 gen_rtx_PLUS (Pmode,
24755 stack_pointer_rtx,
24756 GEN_INT (-size_int))));
24757
24758 /* Emit a blockage to ensure the allocation/probing insns are
24759 not optimized, combined, removed, etc. Add REG_STACK_CHECK
24760 note for similar reasons. */
24761 if (flag_stack_clash_protection)
24762 {
24763 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
24764 emit_insn (gen_blockage ());
24765 }
24766
24767 return insn;
24768 }
24769
24770 static HOST_WIDE_INT
24771 get_stack_clash_protection_probe_interval (void)
24772 {
24773 return (HOST_WIDE_INT_1U
24774 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
24775 }
24776
24777 static HOST_WIDE_INT
24778 get_stack_clash_protection_guard_size (void)
24779 {
24780 return (HOST_WIDE_INT_1U
24781 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
24782 }
24783
24784 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
24785 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
24786
24787 COPY_REG, if non-null, should contain a copy of the original
24788 stack pointer at exit from this function.
24789
24790 This is subtly different than the Ada probing in that it tries hard to
24791 prevent attacks that jump the stack guard. Thus it is never allowed to
24792 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
24793 space without a suitable probe. */
24794 static rtx_insn *
24795 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
24796 rtx copy_reg)
24797 {
24798 rtx orig_sp = copy_reg;
24799
24800 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
24801
24802 /* Round the size down to a multiple of PROBE_INTERVAL. */
24803 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
24804
24805 /* If explicitly requested,
24806 or the rounded size is not the same as the original size
24807 or the the rounded size is greater than a page,
24808 then we will need a copy of the original stack pointer. */
24809 if (rounded_size != orig_size
24810 || rounded_size > probe_interval
24811 || copy_reg)
24812 {
24813 /* If the caller did not request a copy of the incoming stack
24814 pointer, then we use r0 to hold the copy. */
24815 if (!copy_reg)
24816 orig_sp = gen_rtx_REG (Pmode, 0);
24817 emit_move_insn (orig_sp, stack_pointer_rtx);
24818 }
24819
24820 /* There's three cases here.
24821
24822 One is a single probe which is the most common and most efficiently
24823 implemented as it does not have to have a copy of the original
24824 stack pointer if there are no residuals.
24825
24826 Second is unrolled allocation/probes which we use if there's just
24827 a few of them. It needs to save the original stack pointer into a
24828 temporary for use as a source register in the allocation/probe.
24829
24830 Last is a loop. This is the most uncommon case and least efficient. */
24831 rtx_insn *retval = NULL;
24832 if (rounded_size == probe_interval)
24833 {
24834 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
24835
24836 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
24837 }
24838 else if (rounded_size <= 8 * probe_interval)
24839 {
24840 /* The ABI requires using the store with update insns to allocate
24841 space and store the backchain into the stack
24842
24843 So we save the current stack pointer into a temporary, then
24844 emit the store-with-update insns to store the saved stack pointer
24845 into the right location in each new page. */
24846 for (int i = 0; i < rounded_size; i += probe_interval)
24847 {
24848 rtx_insn *insn
24849 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
24850
24851 /* Save the first stack adjustment in RETVAL. */
24852 if (i == 0)
24853 retval = insn;
24854 }
24855
24856 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
24857 }
24858 else
24859 {
24860 /* Compute the ending address. */
24861 rtx end_addr
24862 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
24863 rtx rs = GEN_INT (-rounded_size);
24864 rtx_insn *insn;
24865 if (add_operand (rs, Pmode))
24866 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
24867 else
24868 {
24869 emit_move_insn (end_addr, GEN_INT (-rounded_size));
24870 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
24871 stack_pointer_rtx));
24872 /* Describe the effect of INSN to the CFI engine. */
24873 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
24874 gen_rtx_SET (end_addr,
24875 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24876 rs)));
24877 }
24878 RTX_FRAME_RELATED_P (insn) = 1;
24879
24880 /* Emit the loop. */
24881 if (TARGET_64BIT)
24882 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
24883 stack_pointer_rtx, orig_sp,
24884 end_addr));
24885 else
24886 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
24887 stack_pointer_rtx, orig_sp,
24888 end_addr));
24889 RTX_FRAME_RELATED_P (retval) = 1;
24890 /* Describe the effect of INSN to the CFI engine. */
24891 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
24892 gen_rtx_SET (stack_pointer_rtx, end_addr));
24893
24894 /* Emit a blockage to ensure the allocation/probing insns are
24895 not optimized, combined, removed, etc. Other cases handle this
24896 within their call to rs6000_emit_allocate_stack_1. */
24897 emit_insn (gen_blockage ());
24898
24899 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
24900 }
24901
24902 if (orig_size != rounded_size)
24903 {
24904 /* Allocate (and implicitly probe) any residual space. */
24905 HOST_WIDE_INT residual = orig_size - rounded_size;
24906
24907 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
24908
24909 /* If the residual was the only allocation, then we can return the
24910 allocating insn. */
24911 if (!retval)
24912 retval = insn;
24913 }
24914
24915 return retval;
24916 }
24917
24918 /* Emit the correct code for allocating stack space, as insns.
24919 If COPY_REG, make sure a copy of the old frame is left there.
24920 The generated code may use hard register 0 as a temporary. */
24921
24922 static rtx_insn *
24923 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
24924 {
24925 rtx_insn *insn;
24926 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
24927 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
24928 rtx todec = gen_int_mode (-size, Pmode);
24929
24930 if (INTVAL (todec) != -size)
24931 {
24932 warning (0, "stack frame too large");
24933 emit_insn (gen_trap ());
24934 return 0;
24935 }
24936
24937 if (crtl->limit_stack)
24938 {
24939 if (REG_P (stack_limit_rtx)
24940 && REGNO (stack_limit_rtx) > 1
24941 && REGNO (stack_limit_rtx) <= 31)
24942 {
24943 rtx_insn *insn
24944 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
24945 gcc_assert (insn);
24946 emit_insn (insn);
24947 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
24948 }
24949 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
24950 && TARGET_32BIT
24951 && DEFAULT_ABI == ABI_V4
24952 && !flag_pic)
24953 {
24954 rtx toload = gen_rtx_CONST (VOIDmode,
24955 gen_rtx_PLUS (Pmode,
24956 stack_limit_rtx,
24957 GEN_INT (size)));
24958
24959 emit_insn (gen_elf_high (tmp_reg, toload));
24960 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
24961 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
24962 const0_rtx));
24963 }
24964 else
24965 warning (0, "stack limit expression is not supported");
24966 }
24967
24968 if (flag_stack_clash_protection)
24969 {
24970 if (size < get_stack_clash_protection_guard_size ())
24971 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
24972 else
24973 {
24974 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
24975 copy_reg);
24976
24977 /* If we asked for a copy with an offset, then we still need add in
24978 the offset. */
24979 if (copy_reg && copy_off)
24980 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
24981 return insn;
24982 }
24983 }
24984
24985 if (copy_reg)
24986 {
24987 if (copy_off != 0)
24988 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
24989 else
24990 emit_move_insn (copy_reg, stack_reg);
24991 }
24992
24993 /* Since we didn't use gen_frame_mem to generate the MEM, grab
24994 it now and set the alias set/attributes. The above gen_*_update
24995 calls will generate a PARALLEL with the MEM set being the first
24996 operation. */
24997 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
24998 return insn;
24999 }
25000
25001 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25002
25003 #if PROBE_INTERVAL > 32768
25004 #error Cannot use indexed addressing mode for stack probing
25005 #endif
25006
25007 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25008 inclusive. These are offsets from the current stack pointer. */
25009
25010 static void
25011 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25012 {
25013 /* See if we have a constant small number of probes to generate. If so,
25014 that's the easy case. */
25015 if (first + size <= 32768)
25016 {
25017 HOST_WIDE_INT i;
25018
25019 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25020 it exceeds SIZE. If only one probe is needed, this will not
25021 generate any code. Then probe at FIRST + SIZE. */
25022 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25023 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25024 -(first + i)));
25025
25026 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25027 -(first + size)));
25028 }
25029
25030 /* Otherwise, do the same as above, but in a loop. Note that we must be
25031 extra careful with variables wrapping around because we might be at
25032 the very top (or the very bottom) of the address space and we have
25033 to be able to handle this case properly; in particular, we use an
25034 equality test for the loop condition. */
25035 else
25036 {
25037 HOST_WIDE_INT rounded_size;
25038 rtx r12 = gen_rtx_REG (Pmode, 12);
25039 rtx r0 = gen_rtx_REG (Pmode, 0);
25040
25041 /* Sanity check for the addressing mode we're going to use. */
25042 gcc_assert (first <= 32768);
25043
25044 /* Step 1: round SIZE to the previous multiple of the interval. */
25045
25046 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25047
25048
25049 /* Step 2: compute initial and final value of the loop counter. */
25050
25051 /* TEST_ADDR = SP + FIRST. */
25052 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25053 -first)));
25054
25055 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25056 if (rounded_size > 32768)
25057 {
25058 emit_move_insn (r0, GEN_INT (-rounded_size));
25059 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25060 }
25061 else
25062 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25063 -rounded_size)));
25064
25065
25066 /* Step 3: the loop
25067
25068 do
25069 {
25070 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25071 probe at TEST_ADDR
25072 }
25073 while (TEST_ADDR != LAST_ADDR)
25074
25075 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25076 until it is equal to ROUNDED_SIZE. */
25077
25078 if (TARGET_64BIT)
25079 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25080 else
25081 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25082
25083
25084 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25085 that SIZE is equal to ROUNDED_SIZE. */
25086
25087 if (size != rounded_size)
25088 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25089 }
25090 }
25091
25092 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25093 addresses, not offsets. */
25094
25095 static const char *
25096 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25097 {
25098 static int labelno = 0;
25099 char loop_lab[32];
25100 rtx xops[2];
25101
25102 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25103
25104 /* Loop. */
25105 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25106
25107 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25108 xops[0] = reg1;
25109 xops[1] = GEN_INT (-PROBE_INTERVAL);
25110 output_asm_insn ("addi %0,%0,%1", xops);
25111
25112 /* Probe at TEST_ADDR. */
25113 xops[1] = gen_rtx_REG (Pmode, 0);
25114 output_asm_insn ("stw %1,0(%0)", xops);
25115
25116 /* Test if TEST_ADDR == LAST_ADDR. */
25117 xops[1] = reg2;
25118 if (TARGET_64BIT)
25119 output_asm_insn ("cmpd 0,%0,%1", xops);
25120 else
25121 output_asm_insn ("cmpw 0,%0,%1", xops);
25122
25123 /* Branch. */
25124 fputs ("\tbne 0,", asm_out_file);
25125 assemble_name_raw (asm_out_file, loop_lab);
25126 fputc ('\n', asm_out_file);
25127
25128 return "";
25129 }
25130
25131 /* This function is called when rs6000_frame_related is processing
25132 SETs within a PARALLEL, and returns whether the REGNO save ought to
25133 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25134 for out-of-line register save functions, store multiple, and the
25135 Darwin world_save. They may contain registers that don't really
25136 need saving. */
25137
25138 static bool
25139 interesting_frame_related_regno (unsigned int regno)
25140 {
25141 /* Saves apparently of r0 are actually saving LR. It doesn't make
25142 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25143 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25144 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25145 as frame related. */
25146 if (regno == 0)
25147 return true;
25148 /* If we see CR2 then we are here on a Darwin world save. Saves of
25149 CR2 signify the whole CR is being saved. This is a long-standing
25150 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25151 that CR needs to be saved. */
25152 if (regno == CR2_REGNO)
25153 return true;
25154 /* Omit frame info for any user-defined global regs. If frame info
25155 is supplied for them, frame unwinding will restore a user reg.
25156 Also omit frame info for any reg we don't need to save, as that
25157 bloats frame info and can cause problems with shrink wrapping.
25158 Since global regs won't be seen as needing to be saved, both of
25159 these conditions are covered by save_reg_p. */
25160 return save_reg_p (regno);
25161 }
25162
25163 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25164 addresses, not offsets.
25165
25166 REG2 contains the backchain that must be stored into *sp at each allocation.
25167
25168 This is subtly different than the Ada probing above in that it tries hard
25169 to prevent attacks that jump the stack guard. Thus, it is never allowed
25170 to allocate more than PROBE_INTERVAL bytes of stack space without a
25171 suitable probe. */
25172
25173 static const char *
25174 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25175 {
25176 static int labelno = 0;
25177 char loop_lab[32];
25178 rtx xops[3];
25179
25180 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25181
25182 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25183
25184 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25185
25186 /* This allocates and probes. */
25187 xops[0] = reg1;
25188 xops[1] = reg2;
25189 xops[2] = GEN_INT (-probe_interval);
25190 if (TARGET_64BIT)
25191 output_asm_insn ("stdu %1,%2(%0)", xops);
25192 else
25193 output_asm_insn ("stwu %1,%2(%0)", xops);
25194
25195 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25196 xops[0] = reg1;
25197 xops[1] = reg3;
25198 if (TARGET_64BIT)
25199 output_asm_insn ("cmpd 0,%0,%1", xops);
25200 else
25201 output_asm_insn ("cmpw 0,%0,%1", xops);
25202
25203 fputs ("\tbne 0,", asm_out_file);
25204 assemble_name_raw (asm_out_file, loop_lab);
25205 fputc ('\n', asm_out_file);
25206
25207 return "";
25208 }
25209
25210 /* Wrapper around the output_probe_stack_range routines. */
25211 const char *
25212 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25213 {
25214 if (flag_stack_clash_protection)
25215 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25216 else
25217 return output_probe_stack_range_1 (reg1, reg3);
25218 }
25219
25220 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25221 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25222 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25223 deduce these equivalences by itself so it wasn't necessary to hold
25224 its hand so much. Don't be tempted to always supply d2_f_d_e with
25225 the actual cfa register, ie. r31 when we are using a hard frame
25226 pointer. That fails when saving regs off r1, and sched moves the
25227 r31 setup past the reg saves. */
25228
25229 static rtx_insn *
25230 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25231 rtx reg2, rtx repl2)
25232 {
25233 rtx repl;
25234
25235 if (REGNO (reg) == STACK_POINTER_REGNUM)
25236 {
25237 gcc_checking_assert (val == 0);
25238 repl = NULL_RTX;
25239 }
25240 else
25241 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25242 GEN_INT (val));
25243
25244 rtx pat = PATTERN (insn);
25245 if (!repl && !reg2)
25246 {
25247 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25248 if (GET_CODE (pat) == PARALLEL)
25249 for (int i = 0; i < XVECLEN (pat, 0); i++)
25250 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25251 {
25252 rtx set = XVECEXP (pat, 0, i);
25253
25254 if (!REG_P (SET_SRC (set))
25255 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25256 RTX_FRAME_RELATED_P (set) = 1;
25257 }
25258 RTX_FRAME_RELATED_P (insn) = 1;
25259 return insn;
25260 }
25261
25262 /* We expect that 'pat' is either a SET or a PARALLEL containing
25263 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25264 are important so they all have to be marked RTX_FRAME_RELATED_P.
25265 Call simplify_replace_rtx on the SETs rather than the whole insn
25266 so as to leave the other stuff alone (for example USE of r12). */
25267
25268 set_used_flags (pat);
25269 if (GET_CODE (pat) == SET)
25270 {
25271 if (repl)
25272 pat = simplify_replace_rtx (pat, reg, repl);
25273 if (reg2)
25274 pat = simplify_replace_rtx (pat, reg2, repl2);
25275 }
25276 else if (GET_CODE (pat) == PARALLEL)
25277 {
25278 pat = shallow_copy_rtx (pat);
25279 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25280
25281 for (int i = 0; i < XVECLEN (pat, 0); i++)
25282 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25283 {
25284 rtx set = XVECEXP (pat, 0, i);
25285
25286 if (repl)
25287 set = simplify_replace_rtx (set, reg, repl);
25288 if (reg2)
25289 set = simplify_replace_rtx (set, reg2, repl2);
25290 XVECEXP (pat, 0, i) = set;
25291
25292 if (!REG_P (SET_SRC (set))
25293 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25294 RTX_FRAME_RELATED_P (set) = 1;
25295 }
25296 }
25297 else
25298 gcc_unreachable ();
25299
25300 RTX_FRAME_RELATED_P (insn) = 1;
25301 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25302
25303 return insn;
25304 }
25305
25306 /* Returns an insn that has a vrsave set operation with the
25307 appropriate CLOBBERs. */
25308
25309 static rtx
25310 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25311 {
25312 int nclobs, i;
25313 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25314 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25315
25316 clobs[0]
25317 = gen_rtx_SET (vrsave,
25318 gen_rtx_UNSPEC_VOLATILE (SImode,
25319 gen_rtvec (2, reg, vrsave),
25320 UNSPECV_SET_VRSAVE));
25321
25322 nclobs = 1;
25323
25324 /* We need to clobber the registers in the mask so the scheduler
25325 does not move sets to VRSAVE before sets of AltiVec registers.
25326
25327 However, if the function receives nonlocal gotos, reload will set
25328 all call saved registers live. We will end up with:
25329
25330 (set (reg 999) (mem))
25331 (parallel [ (set (reg vrsave) (unspec blah))
25332 (clobber (reg 999))])
25333
25334 The clobber will cause the store into reg 999 to be dead, and
25335 flow will attempt to delete an epilogue insn. In this case, we
25336 need an unspec use/set of the register. */
25337
25338 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25339 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25340 {
25341 if (!epiloguep || call_used_regs [i])
25342 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25343 gen_rtx_REG (V4SImode, i));
25344 else
25345 {
25346 rtx reg = gen_rtx_REG (V4SImode, i);
25347
25348 clobs[nclobs++]
25349 = gen_rtx_SET (reg,
25350 gen_rtx_UNSPEC (V4SImode,
25351 gen_rtvec (1, reg), 27));
25352 }
25353 }
25354
25355 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25356
25357 for (i = 0; i < nclobs; ++i)
25358 XVECEXP (insn, 0, i) = clobs[i];
25359
25360 return insn;
25361 }
25362
25363 static rtx
25364 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25365 {
25366 rtx addr, mem;
25367
25368 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25369 mem = gen_frame_mem (GET_MODE (reg), addr);
25370 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25371 }
25372
25373 static rtx
25374 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25375 {
25376 return gen_frame_set (reg, frame_reg, offset, false);
25377 }
25378
25379 static rtx
25380 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25381 {
25382 return gen_frame_set (reg, frame_reg, offset, true);
25383 }
25384
25385 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25386 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25387
25388 static rtx_insn *
25389 emit_frame_save (rtx frame_reg, machine_mode mode,
25390 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25391 {
25392 rtx reg;
25393
25394 /* Some cases that need register indexed addressing. */
25395 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25396 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25397
25398 reg = gen_rtx_REG (mode, regno);
25399 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25400 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25401 NULL_RTX, NULL_RTX);
25402 }
25403
25404 /* Emit an offset memory reference suitable for a frame store, while
25405 converting to a valid addressing mode. */
25406
25407 static rtx
25408 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25409 {
25410 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25411 }
25412
25413 #ifndef TARGET_FIX_AND_CONTINUE
25414 #define TARGET_FIX_AND_CONTINUE 0
25415 #endif
25416
25417 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25418 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25419 #define LAST_SAVRES_REGISTER 31
25420 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25421
25422 enum {
25423 SAVRES_LR = 0x1,
25424 SAVRES_SAVE = 0x2,
25425 SAVRES_REG = 0x0c,
25426 SAVRES_GPR = 0,
25427 SAVRES_FPR = 4,
25428 SAVRES_VR = 8
25429 };
25430
25431 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25432
25433 /* Temporary holding space for an out-of-line register save/restore
25434 routine name. */
25435 static char savres_routine_name[30];
25436
25437 /* Return the name for an out-of-line register save/restore routine.
25438 We are saving/restoring GPRs if GPR is true. */
25439
25440 static char *
25441 rs6000_savres_routine_name (int regno, int sel)
25442 {
25443 const char *prefix = "";
25444 const char *suffix = "";
25445
25446 /* Different targets are supposed to define
25447 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25448 routine name could be defined with:
25449
25450 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25451
25452 This is a nice idea in practice, but in reality, things are
25453 complicated in several ways:
25454
25455 - ELF targets have save/restore routines for GPRs.
25456
25457 - PPC64 ELF targets have routines for save/restore of GPRs that
25458 differ in what they do with the link register, so having a set
25459 prefix doesn't work. (We only use one of the save routines at
25460 the moment, though.)
25461
25462 - PPC32 elf targets have "exit" versions of the restore routines
25463 that restore the link register and can save some extra space.
25464 These require an extra suffix. (There are also "tail" versions
25465 of the restore routines and "GOT" versions of the save routines,
25466 but we don't generate those at present. Same problems apply,
25467 though.)
25468
25469 We deal with all this by synthesizing our own prefix/suffix and
25470 using that for the simple sprintf call shown above. */
25471 if (DEFAULT_ABI == ABI_V4)
25472 {
25473 if (TARGET_64BIT)
25474 goto aix_names;
25475
25476 if ((sel & SAVRES_REG) == SAVRES_GPR)
25477 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25478 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25479 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25480 else if ((sel & SAVRES_REG) == SAVRES_VR)
25481 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25482 else
25483 abort ();
25484
25485 if ((sel & SAVRES_LR))
25486 suffix = "_x";
25487 }
25488 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25489 {
25490 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25491 /* No out-of-line save/restore routines for GPRs on AIX. */
25492 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25493 #endif
25494
25495 aix_names:
25496 if ((sel & SAVRES_REG) == SAVRES_GPR)
25497 prefix = ((sel & SAVRES_SAVE)
25498 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25499 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25500 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25501 {
25502 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25503 if ((sel & SAVRES_LR))
25504 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25505 else
25506 #endif
25507 {
25508 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25509 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25510 }
25511 }
25512 else if ((sel & SAVRES_REG) == SAVRES_VR)
25513 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25514 else
25515 abort ();
25516 }
25517
25518 if (DEFAULT_ABI == ABI_DARWIN)
25519 {
25520 /* The Darwin approach is (slightly) different, in order to be
25521 compatible with code generated by the system toolchain. There is a
25522 single symbol for the start of save sequence, and the code here
25523 embeds an offset into that code on the basis of the first register
25524 to be saved. */
25525 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25526 if ((sel & SAVRES_REG) == SAVRES_GPR)
25527 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25528 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25529 (regno - 13) * 4, prefix, regno);
25530 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25531 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25532 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25533 else if ((sel & SAVRES_REG) == SAVRES_VR)
25534 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25535 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25536 else
25537 abort ();
25538 }
25539 else
25540 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25541
25542 return savres_routine_name;
25543 }
25544
25545 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25546 We are saving/restoring GPRs if GPR is true. */
25547
25548 static rtx
25549 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25550 {
25551 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25552 ? info->first_gp_reg_save
25553 : (sel & SAVRES_REG) == SAVRES_FPR
25554 ? info->first_fp_reg_save - 32
25555 : (sel & SAVRES_REG) == SAVRES_VR
25556 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25557 : -1);
25558 rtx sym;
25559 int select = sel;
25560
25561 /* Don't generate bogus routine names. */
25562 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25563 && regno <= LAST_SAVRES_REGISTER
25564 && select >= 0 && select <= 12);
25565
25566 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25567
25568 if (sym == NULL)
25569 {
25570 char *name;
25571
25572 name = rs6000_savres_routine_name (regno, sel);
25573
25574 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25575 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25576 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25577 }
25578
25579 return sym;
25580 }
25581
25582 /* Emit a sequence of insns, including a stack tie if needed, for
25583 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25584 reset the stack pointer, but move the base of the frame into
25585 reg UPDT_REGNO for use by out-of-line register restore routines. */
25586
25587 static rtx
25588 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25589 unsigned updt_regno)
25590 {
25591 /* If there is nothing to do, don't do anything. */
25592 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
25593 return NULL_RTX;
25594
25595 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
25596
25597 /* This blockage is needed so that sched doesn't decide to move
25598 the sp change before the register restores. */
25599 if (DEFAULT_ABI == ABI_V4)
25600 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
25601 GEN_INT (frame_off)));
25602
25603 /* If we are restoring registers out-of-line, we will be using the
25604 "exit" variants of the restore routines, which will reset the
25605 stack for us. But we do need to point updt_reg into the
25606 right place for those routines. */
25607 if (frame_off != 0)
25608 return emit_insn (gen_add3_insn (updt_reg_rtx,
25609 frame_reg_rtx, GEN_INT (frame_off)));
25610 else
25611 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
25612
25613 return NULL_RTX;
25614 }
25615
25616 /* Return the register number used as a pointer by out-of-line
25617 save/restore functions. */
25618
25619 static inline unsigned
25620 ptr_regno_for_savres (int sel)
25621 {
25622 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25623 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
25624 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
25625 }
25626
25627 /* Construct a parallel rtx describing the effect of a call to an
25628 out-of-line register save/restore routine, and emit the insn
25629 or jump_insn as appropriate. */
25630
25631 static rtx_insn *
25632 rs6000_emit_savres_rtx (rs6000_stack_t *info,
25633 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
25634 machine_mode reg_mode, int sel)
25635 {
25636 int i;
25637 int offset, start_reg, end_reg, n_regs, use_reg;
25638 int reg_size = GET_MODE_SIZE (reg_mode);
25639 rtx sym;
25640 rtvec p;
25641 rtx par;
25642 rtx_insn *insn;
25643
25644 offset = 0;
25645 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25646 ? info->first_gp_reg_save
25647 : (sel & SAVRES_REG) == SAVRES_FPR
25648 ? info->first_fp_reg_save
25649 : (sel & SAVRES_REG) == SAVRES_VR
25650 ? info->first_altivec_reg_save
25651 : -1);
25652 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25653 ? 32
25654 : (sel & SAVRES_REG) == SAVRES_FPR
25655 ? 64
25656 : (sel & SAVRES_REG) == SAVRES_VR
25657 ? LAST_ALTIVEC_REGNO + 1
25658 : -1);
25659 n_regs = end_reg - start_reg;
25660 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
25661 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
25662 + n_regs);
25663
25664 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25665 RTVEC_ELT (p, offset++) = ret_rtx;
25666
25667 RTVEC_ELT (p, offset++)
25668 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
25669
25670 sym = rs6000_savres_routine_sym (info, sel);
25671 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
25672
25673 use_reg = ptr_regno_for_savres (sel);
25674 if ((sel & SAVRES_REG) == SAVRES_VR)
25675 {
25676 /* Vector regs are saved/restored using [reg+reg] addressing. */
25677 RTVEC_ELT (p, offset++)
25678 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25679 RTVEC_ELT (p, offset++)
25680 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
25681 }
25682 else
25683 RTVEC_ELT (p, offset++)
25684 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25685
25686 for (i = 0; i < end_reg - start_reg; i++)
25687 RTVEC_ELT (p, i + offset)
25688 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
25689 frame_reg_rtx, save_area_offset + reg_size * i,
25690 (sel & SAVRES_SAVE) != 0);
25691
25692 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25693 RTVEC_ELT (p, i + offset)
25694 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
25695
25696 par = gen_rtx_PARALLEL (VOIDmode, p);
25697
25698 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25699 {
25700 insn = emit_jump_insn (par);
25701 JUMP_LABEL (insn) = ret_rtx;
25702 }
25703 else
25704 insn = emit_insn (par);
25705 return insn;
25706 }
25707
25708 /* Emit prologue code to store CR fields that need to be saved into REG. This
25709 function should only be called when moving the non-volatile CRs to REG, it
25710 is not a general purpose routine to move the entire set of CRs to REG.
25711 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
25712 volatile CRs. */
25713
25714 static void
25715 rs6000_emit_prologue_move_from_cr (rtx reg)
25716 {
25717 /* Only the ELFv2 ABI allows storing only selected fields. */
25718 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
25719 {
25720 int i, cr_reg[8], count = 0;
25721
25722 /* Collect CR fields that must be saved. */
25723 for (i = 0; i < 8; i++)
25724 if (save_reg_p (CR0_REGNO + i))
25725 cr_reg[count++] = i;
25726
25727 /* If it's just a single one, use mfcrf. */
25728 if (count == 1)
25729 {
25730 rtvec p = rtvec_alloc (1);
25731 rtvec r = rtvec_alloc (2);
25732 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
25733 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
25734 RTVEC_ELT (p, 0)
25735 = gen_rtx_SET (reg,
25736 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
25737
25738 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
25739 return;
25740 }
25741
25742 /* ??? It might be better to handle count == 2 / 3 cases here
25743 as well, using logical operations to combine the values. */
25744 }
25745
25746 emit_insn (gen_prologue_movesi_from_cr (reg));
25747 }
25748
25749 /* Return whether the split-stack arg pointer (r12) is used. */
25750
25751 static bool
25752 split_stack_arg_pointer_used_p (void)
25753 {
25754 /* If the pseudo holding the arg pointer is no longer a pseudo,
25755 then the arg pointer is used. */
25756 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
25757 && (!REG_P (cfun->machine->split_stack_arg_pointer)
25758 || (REGNO (cfun->machine->split_stack_arg_pointer)
25759 < FIRST_PSEUDO_REGISTER)))
25760 return true;
25761
25762 /* Unfortunately we also need to do some code scanning, since
25763 r12 may have been substituted for the pseudo. */
25764 rtx_insn *insn;
25765 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
25766 FOR_BB_INSNS (bb, insn)
25767 if (NONDEBUG_INSN_P (insn))
25768 {
25769 /* A call destroys r12. */
25770 if (CALL_P (insn))
25771 return false;
25772
25773 df_ref use;
25774 FOR_EACH_INSN_USE (use, insn)
25775 {
25776 rtx x = DF_REF_REG (use);
25777 if (REG_P (x) && REGNO (x) == 12)
25778 return true;
25779 }
25780 df_ref def;
25781 FOR_EACH_INSN_DEF (def, insn)
25782 {
25783 rtx x = DF_REF_REG (def);
25784 if (REG_P (x) && REGNO (x) == 12)
25785 return false;
25786 }
25787 }
25788 return bitmap_bit_p (DF_LR_OUT (bb), 12);
25789 }
25790
25791 /* Return whether we need to emit an ELFv2 global entry point prologue. */
25792
25793 static bool
25794 rs6000_global_entry_point_needed_p (void)
25795 {
25796 /* Only needed for the ELFv2 ABI. */
25797 if (DEFAULT_ABI != ABI_ELFv2)
25798 return false;
25799
25800 /* With -msingle-pic-base, we assume the whole program shares the same
25801 TOC, so no global entry point prologues are needed anywhere. */
25802 if (TARGET_SINGLE_PIC_BASE)
25803 return false;
25804
25805 /* Ensure we have a global entry point for thunks. ??? We could
25806 avoid that if the target routine doesn't need a global entry point,
25807 but we do not know whether this is the case at this point. */
25808 if (cfun->is_thunk)
25809 return true;
25810
25811 /* For regular functions, rs6000_emit_prologue sets this flag if the
25812 routine ever uses the TOC pointer. */
25813 return cfun->machine->r2_setup_needed;
25814 }
25815
25816 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
25817 static sbitmap
25818 rs6000_get_separate_components (void)
25819 {
25820 rs6000_stack_t *info = rs6000_stack_info ();
25821
25822 if (WORLD_SAVE_P (info))
25823 return NULL;
25824
25825 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
25826 && !(info->savres_strategy & REST_MULTIPLE));
25827
25828 /* Component 0 is the save/restore of LR (done via GPR0).
25829 Component 2 is the save of the TOC (GPR2).
25830 Components 13..31 are the save/restore of GPR13..GPR31.
25831 Components 46..63 are the save/restore of FPR14..FPR31. */
25832
25833 cfun->machine->n_components = 64;
25834
25835 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
25836 bitmap_clear (components);
25837
25838 int reg_size = TARGET_32BIT ? 4 : 8;
25839 int fp_reg_size = 8;
25840
25841 /* The GPRs we need saved to the frame. */
25842 if ((info->savres_strategy & SAVE_INLINE_GPRS)
25843 && (info->savres_strategy & REST_INLINE_GPRS))
25844 {
25845 int offset = info->gp_save_offset;
25846 if (info->push_p)
25847 offset += info->total_size;
25848
25849 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
25850 {
25851 if (IN_RANGE (offset, -0x8000, 0x7fff)
25852 && save_reg_p (regno))
25853 bitmap_set_bit (components, regno);
25854
25855 offset += reg_size;
25856 }
25857 }
25858
25859 /* Don't mess with the hard frame pointer. */
25860 if (frame_pointer_needed)
25861 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
25862
25863 /* Don't mess with the fixed TOC register. */
25864 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
25865 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
25866 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
25867 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
25868
25869 /* The FPRs we need saved to the frame. */
25870 if ((info->savres_strategy & SAVE_INLINE_FPRS)
25871 && (info->savres_strategy & REST_INLINE_FPRS))
25872 {
25873 int offset = info->fp_save_offset;
25874 if (info->push_p)
25875 offset += info->total_size;
25876
25877 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
25878 {
25879 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
25880 bitmap_set_bit (components, regno);
25881
25882 offset += fp_reg_size;
25883 }
25884 }
25885
25886 /* Optimize LR save and restore if we can. This is component 0. Any
25887 out-of-line register save/restore routines need LR. */
25888 if (info->lr_save_p
25889 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
25890 && (info->savres_strategy & SAVE_INLINE_GPRS)
25891 && (info->savres_strategy & REST_INLINE_GPRS)
25892 && (info->savres_strategy & SAVE_INLINE_FPRS)
25893 && (info->savres_strategy & REST_INLINE_FPRS)
25894 && (info->savres_strategy & SAVE_INLINE_VRS)
25895 && (info->savres_strategy & REST_INLINE_VRS))
25896 {
25897 int offset = info->lr_save_offset;
25898 if (info->push_p)
25899 offset += info->total_size;
25900 if (IN_RANGE (offset, -0x8000, 0x7fff))
25901 bitmap_set_bit (components, 0);
25902 }
25903
25904 /* Optimize saving the TOC. This is component 2. */
25905 if (cfun->machine->save_toc_in_prologue)
25906 bitmap_set_bit (components, 2);
25907
25908 return components;
25909 }
25910
25911 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
25912 static sbitmap
25913 rs6000_components_for_bb (basic_block bb)
25914 {
25915 rs6000_stack_t *info = rs6000_stack_info ();
25916
25917 bitmap in = DF_LIVE_IN (bb);
25918 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
25919 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
25920
25921 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
25922 bitmap_clear (components);
25923
25924 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
25925
25926 /* GPRs. */
25927 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
25928 if (bitmap_bit_p (in, regno)
25929 || bitmap_bit_p (gen, regno)
25930 || bitmap_bit_p (kill, regno))
25931 bitmap_set_bit (components, regno);
25932
25933 /* FPRs. */
25934 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
25935 if (bitmap_bit_p (in, regno)
25936 || bitmap_bit_p (gen, regno)
25937 || bitmap_bit_p (kill, regno))
25938 bitmap_set_bit (components, regno);
25939
25940 /* The link register. */
25941 if (bitmap_bit_p (in, LR_REGNO)
25942 || bitmap_bit_p (gen, LR_REGNO)
25943 || bitmap_bit_p (kill, LR_REGNO))
25944 bitmap_set_bit (components, 0);
25945
25946 /* The TOC save. */
25947 if (bitmap_bit_p (in, TOC_REGNUM)
25948 || bitmap_bit_p (gen, TOC_REGNUM)
25949 || bitmap_bit_p (kill, TOC_REGNUM))
25950 bitmap_set_bit (components, 2);
25951
25952 return components;
25953 }
25954
25955 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
25956 static void
25957 rs6000_disqualify_components (sbitmap components, edge e,
25958 sbitmap edge_components, bool /*is_prologue*/)
25959 {
25960 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
25961 live where we want to place that code. */
25962 if (bitmap_bit_p (edge_components, 0)
25963 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
25964 {
25965 if (dump_file)
25966 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
25967 "on entry to bb %d\n", e->dest->index);
25968 bitmap_clear_bit (components, 0);
25969 }
25970 }
25971
25972 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
25973 static void
25974 rs6000_emit_prologue_components (sbitmap components)
25975 {
25976 rs6000_stack_t *info = rs6000_stack_info ();
25977 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
25978 ? HARD_FRAME_POINTER_REGNUM
25979 : STACK_POINTER_REGNUM);
25980
25981 machine_mode reg_mode = Pmode;
25982 int reg_size = TARGET_32BIT ? 4 : 8;
25983 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
25984 int fp_reg_size = 8;
25985
25986 /* Prologue for LR. */
25987 if (bitmap_bit_p (components, 0))
25988 {
25989 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
25990 rtx reg = gen_rtx_REG (reg_mode, 0);
25991 rtx_insn *insn = emit_move_insn (reg, lr);
25992 RTX_FRAME_RELATED_P (insn) = 1;
25993 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
25994
25995 int offset = info->lr_save_offset;
25996 if (info->push_p)
25997 offset += info->total_size;
25998
25999 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26000 RTX_FRAME_RELATED_P (insn) = 1;
26001 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26002 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26003 }
26004
26005 /* Prologue for TOC. */
26006 if (bitmap_bit_p (components, 2))
26007 {
26008 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26009 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26010 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26011 }
26012
26013 /* Prologue for the GPRs. */
26014 int offset = info->gp_save_offset;
26015 if (info->push_p)
26016 offset += info->total_size;
26017
26018 for (int i = info->first_gp_reg_save; i < 32; i++)
26019 {
26020 if (bitmap_bit_p (components, i))
26021 {
26022 rtx reg = gen_rtx_REG (reg_mode, i);
26023 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26024 RTX_FRAME_RELATED_P (insn) = 1;
26025 rtx set = copy_rtx (single_set (insn));
26026 add_reg_note (insn, REG_CFA_OFFSET, set);
26027 }
26028
26029 offset += reg_size;
26030 }
26031
26032 /* Prologue for the FPRs. */
26033 offset = info->fp_save_offset;
26034 if (info->push_p)
26035 offset += info->total_size;
26036
26037 for (int i = info->first_fp_reg_save; i < 64; i++)
26038 {
26039 if (bitmap_bit_p (components, i))
26040 {
26041 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26042 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26043 RTX_FRAME_RELATED_P (insn) = 1;
26044 rtx set = copy_rtx (single_set (insn));
26045 add_reg_note (insn, REG_CFA_OFFSET, set);
26046 }
26047
26048 offset += fp_reg_size;
26049 }
26050 }
26051
26052 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26053 static void
26054 rs6000_emit_epilogue_components (sbitmap components)
26055 {
26056 rs6000_stack_t *info = rs6000_stack_info ();
26057 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26058 ? HARD_FRAME_POINTER_REGNUM
26059 : STACK_POINTER_REGNUM);
26060
26061 machine_mode reg_mode = Pmode;
26062 int reg_size = TARGET_32BIT ? 4 : 8;
26063
26064 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26065 int fp_reg_size = 8;
26066
26067 /* Epilogue for the FPRs. */
26068 int offset = info->fp_save_offset;
26069 if (info->push_p)
26070 offset += info->total_size;
26071
26072 for (int i = info->first_fp_reg_save; i < 64; i++)
26073 {
26074 if (bitmap_bit_p (components, i))
26075 {
26076 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26077 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26078 RTX_FRAME_RELATED_P (insn) = 1;
26079 add_reg_note (insn, REG_CFA_RESTORE, reg);
26080 }
26081
26082 offset += fp_reg_size;
26083 }
26084
26085 /* Epilogue for the GPRs. */
26086 offset = info->gp_save_offset;
26087 if (info->push_p)
26088 offset += info->total_size;
26089
26090 for (int i = info->first_gp_reg_save; i < 32; i++)
26091 {
26092 if (bitmap_bit_p (components, i))
26093 {
26094 rtx reg = gen_rtx_REG (reg_mode, i);
26095 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26096 RTX_FRAME_RELATED_P (insn) = 1;
26097 add_reg_note (insn, REG_CFA_RESTORE, reg);
26098 }
26099
26100 offset += reg_size;
26101 }
26102
26103 /* Epilogue for LR. */
26104 if (bitmap_bit_p (components, 0))
26105 {
26106 int offset = info->lr_save_offset;
26107 if (info->push_p)
26108 offset += info->total_size;
26109
26110 rtx reg = gen_rtx_REG (reg_mode, 0);
26111 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26112
26113 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26114 insn = emit_move_insn (lr, reg);
26115 RTX_FRAME_RELATED_P (insn) = 1;
26116 add_reg_note (insn, REG_CFA_RESTORE, lr);
26117 }
26118 }
26119
26120 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26121 static void
26122 rs6000_set_handled_components (sbitmap components)
26123 {
26124 rs6000_stack_t *info = rs6000_stack_info ();
26125
26126 for (int i = info->first_gp_reg_save; i < 32; i++)
26127 if (bitmap_bit_p (components, i))
26128 cfun->machine->gpr_is_wrapped_separately[i] = true;
26129
26130 for (int i = info->first_fp_reg_save; i < 64; i++)
26131 if (bitmap_bit_p (components, i))
26132 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26133
26134 if (bitmap_bit_p (components, 0))
26135 cfun->machine->lr_is_wrapped_separately = true;
26136
26137 if (bitmap_bit_p (components, 2))
26138 cfun->machine->toc_is_wrapped_separately = true;
26139 }
26140
26141 /* VRSAVE is a bit vector representing which AltiVec registers
26142 are used. The OS uses this to determine which vector
26143 registers to save on a context switch. We need to save
26144 VRSAVE on the stack frame, add whatever AltiVec registers we
26145 used in this function, and do the corresponding magic in the
26146 epilogue. */
26147 static void
26148 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26149 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26150 {
26151 /* Get VRSAVE into a GPR. */
26152 rtx reg = gen_rtx_REG (SImode, save_regno);
26153 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26154 if (TARGET_MACHO)
26155 emit_insn (gen_get_vrsave_internal (reg));
26156 else
26157 emit_insn (gen_rtx_SET (reg, vrsave));
26158
26159 /* Save VRSAVE. */
26160 int offset = info->vrsave_save_offset + frame_off;
26161 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26162
26163 /* Include the registers in the mask. */
26164 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26165
26166 emit_insn (generate_set_vrsave (reg, info, 0));
26167 }
26168
26169 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26170 called, it left the arg pointer to the old stack in r29. Otherwise, the
26171 arg pointer is the top of the current frame. */
26172 static void
26173 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26174 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26175 {
26176 cfun->machine->split_stack_argp_used = true;
26177
26178 if (sp_adjust)
26179 {
26180 rtx r12 = gen_rtx_REG (Pmode, 12);
26181 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26182 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26183 emit_insn_before (set_r12, sp_adjust);
26184 }
26185 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26186 {
26187 rtx r12 = gen_rtx_REG (Pmode, 12);
26188 if (frame_off == 0)
26189 emit_move_insn (r12, frame_reg_rtx);
26190 else
26191 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26192 }
26193
26194 if (info->push_p)
26195 {
26196 rtx r12 = gen_rtx_REG (Pmode, 12);
26197 rtx r29 = gen_rtx_REG (Pmode, 29);
26198 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26199 rtx not_more = gen_label_rtx ();
26200 rtx jump;
26201
26202 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26203 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26204 gen_rtx_LABEL_REF (VOIDmode, not_more),
26205 pc_rtx);
26206 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26207 JUMP_LABEL (jump) = not_more;
26208 LABEL_NUSES (not_more) += 1;
26209 emit_move_insn (r12, r29);
26210 emit_label (not_more);
26211 }
26212 }
26213
26214 /* Emit function prologue as insns. */
26215
26216 void
26217 rs6000_emit_prologue (void)
26218 {
26219 rs6000_stack_t *info = rs6000_stack_info ();
26220 machine_mode reg_mode = Pmode;
26221 int reg_size = TARGET_32BIT ? 4 : 8;
26222 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26223 int fp_reg_size = 8;
26224 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26225 rtx frame_reg_rtx = sp_reg_rtx;
26226 unsigned int cr_save_regno;
26227 rtx cr_save_rtx = NULL_RTX;
26228 rtx_insn *insn;
26229 int strategy;
26230 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26231 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26232 && call_used_regs[STATIC_CHAIN_REGNUM]);
26233 int using_split_stack = (flag_split_stack
26234 && (lookup_attribute ("no_split_stack",
26235 DECL_ATTRIBUTES (cfun->decl))
26236 == NULL));
26237
26238 /* Offset to top of frame for frame_reg and sp respectively. */
26239 HOST_WIDE_INT frame_off = 0;
26240 HOST_WIDE_INT sp_off = 0;
26241 /* sp_adjust is the stack adjusting instruction, tracked so that the
26242 insn setting up the split-stack arg pointer can be emitted just
26243 prior to it, when r12 is not used here for other purposes. */
26244 rtx_insn *sp_adjust = 0;
26245
26246 #if CHECKING_P
26247 /* Track and check usage of r0, r11, r12. */
26248 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26249 #define START_USE(R) do \
26250 { \
26251 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26252 reg_inuse |= 1 << (R); \
26253 } while (0)
26254 #define END_USE(R) do \
26255 { \
26256 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26257 reg_inuse &= ~(1 << (R)); \
26258 } while (0)
26259 #define NOT_INUSE(R) do \
26260 { \
26261 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26262 } while (0)
26263 #else
26264 #define START_USE(R) do {} while (0)
26265 #define END_USE(R) do {} while (0)
26266 #define NOT_INUSE(R) do {} while (0)
26267 #endif
26268
26269 if (DEFAULT_ABI == ABI_ELFv2
26270 && !TARGET_SINGLE_PIC_BASE)
26271 {
26272 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26273
26274 /* With -mminimal-toc we may generate an extra use of r2 below. */
26275 if (TARGET_TOC && TARGET_MINIMAL_TOC
26276 && !constant_pool_empty_p ())
26277 cfun->machine->r2_setup_needed = true;
26278 }
26279
26280
26281 if (flag_stack_usage_info)
26282 current_function_static_stack_size = info->total_size;
26283
26284 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26285 {
26286 HOST_WIDE_INT size = info->total_size;
26287
26288 if (crtl->is_leaf && !cfun->calls_alloca)
26289 {
26290 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26291 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26292 size - get_stack_check_protect ());
26293 }
26294 else if (size > 0)
26295 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26296 }
26297
26298 if (TARGET_FIX_AND_CONTINUE)
26299 {
26300 /* gdb on darwin arranges to forward a function from the old
26301 address by modifying the first 5 instructions of the function
26302 to branch to the overriding function. This is necessary to
26303 permit function pointers that point to the old function to
26304 actually forward to the new function. */
26305 emit_insn (gen_nop ());
26306 emit_insn (gen_nop ());
26307 emit_insn (gen_nop ());
26308 emit_insn (gen_nop ());
26309 emit_insn (gen_nop ());
26310 }
26311
26312 /* Handle world saves specially here. */
26313 if (WORLD_SAVE_P (info))
26314 {
26315 int i, j, sz;
26316 rtx treg;
26317 rtvec p;
26318 rtx reg0;
26319
26320 /* save_world expects lr in r0. */
26321 reg0 = gen_rtx_REG (Pmode, 0);
26322 if (info->lr_save_p)
26323 {
26324 insn = emit_move_insn (reg0,
26325 gen_rtx_REG (Pmode, LR_REGNO));
26326 RTX_FRAME_RELATED_P (insn) = 1;
26327 }
26328
26329 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26330 assumptions about the offsets of various bits of the stack
26331 frame. */
26332 gcc_assert (info->gp_save_offset == -220
26333 && info->fp_save_offset == -144
26334 && info->lr_save_offset == 8
26335 && info->cr_save_offset == 4
26336 && info->push_p
26337 && info->lr_save_p
26338 && (!crtl->calls_eh_return
26339 || info->ehrd_offset == -432)
26340 && info->vrsave_save_offset == -224
26341 && info->altivec_save_offset == -416);
26342
26343 treg = gen_rtx_REG (SImode, 11);
26344 emit_move_insn (treg, GEN_INT (-info->total_size));
26345
26346 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26347 in R11. It also clobbers R12, so beware! */
26348
26349 /* Preserve CR2 for save_world prologues */
26350 sz = 5;
26351 sz += 32 - info->first_gp_reg_save;
26352 sz += 64 - info->first_fp_reg_save;
26353 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26354 p = rtvec_alloc (sz);
26355 j = 0;
26356 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26357 gen_rtx_REG (SImode,
26358 LR_REGNO));
26359 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26360 gen_rtx_SYMBOL_REF (Pmode,
26361 "*save_world"));
26362 /* We do floats first so that the instruction pattern matches
26363 properly. */
26364 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26365 RTVEC_ELT (p, j++)
26366 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26367 info->first_fp_reg_save + i),
26368 frame_reg_rtx,
26369 info->fp_save_offset + frame_off + 8 * i);
26370 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26371 RTVEC_ELT (p, j++)
26372 = gen_frame_store (gen_rtx_REG (V4SImode,
26373 info->first_altivec_reg_save + i),
26374 frame_reg_rtx,
26375 info->altivec_save_offset + frame_off + 16 * i);
26376 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26377 RTVEC_ELT (p, j++)
26378 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26379 frame_reg_rtx,
26380 info->gp_save_offset + frame_off + reg_size * i);
26381
26382 /* CR register traditionally saved as CR2. */
26383 RTVEC_ELT (p, j++)
26384 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26385 frame_reg_rtx, info->cr_save_offset + frame_off);
26386 /* Explain about use of R0. */
26387 if (info->lr_save_p)
26388 RTVEC_ELT (p, j++)
26389 = gen_frame_store (reg0,
26390 frame_reg_rtx, info->lr_save_offset + frame_off);
26391 /* Explain what happens to the stack pointer. */
26392 {
26393 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26394 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26395 }
26396
26397 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26398 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26399 treg, GEN_INT (-info->total_size));
26400 sp_off = frame_off = info->total_size;
26401 }
26402
26403 strategy = info->savres_strategy;
26404
26405 /* For V.4, update stack before we do any saving and set back pointer. */
26406 if (! WORLD_SAVE_P (info)
26407 && info->push_p
26408 && (DEFAULT_ABI == ABI_V4
26409 || crtl->calls_eh_return))
26410 {
26411 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26412 || !(strategy & SAVE_INLINE_GPRS)
26413 || !(strategy & SAVE_INLINE_VRS));
26414 int ptr_regno = -1;
26415 rtx ptr_reg = NULL_RTX;
26416 int ptr_off = 0;
26417
26418 if (info->total_size < 32767)
26419 frame_off = info->total_size;
26420 else if (need_r11)
26421 ptr_regno = 11;
26422 else if (info->cr_save_p
26423 || info->lr_save_p
26424 || info->first_fp_reg_save < 64
26425 || info->first_gp_reg_save < 32
26426 || info->altivec_size != 0
26427 || info->vrsave_size != 0
26428 || crtl->calls_eh_return)
26429 ptr_regno = 12;
26430 else
26431 {
26432 /* The prologue won't be saving any regs so there is no need
26433 to set up a frame register to access any frame save area.
26434 We also won't be using frame_off anywhere below, but set
26435 the correct value anyway to protect against future
26436 changes to this function. */
26437 frame_off = info->total_size;
26438 }
26439 if (ptr_regno != -1)
26440 {
26441 /* Set up the frame offset to that needed by the first
26442 out-of-line save function. */
26443 START_USE (ptr_regno);
26444 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26445 frame_reg_rtx = ptr_reg;
26446 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26447 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26448 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26449 ptr_off = info->gp_save_offset + info->gp_size;
26450 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26451 ptr_off = info->altivec_save_offset + info->altivec_size;
26452 frame_off = -ptr_off;
26453 }
26454 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26455 ptr_reg, ptr_off);
26456 if (REGNO (frame_reg_rtx) == 12)
26457 sp_adjust = 0;
26458 sp_off = info->total_size;
26459 if (frame_reg_rtx != sp_reg_rtx)
26460 rs6000_emit_stack_tie (frame_reg_rtx, false);
26461 }
26462
26463 /* If we use the link register, get it into r0. */
26464 if (!WORLD_SAVE_P (info) && info->lr_save_p
26465 && !cfun->machine->lr_is_wrapped_separately)
26466 {
26467 rtx addr, reg, mem;
26468
26469 reg = gen_rtx_REG (Pmode, 0);
26470 START_USE (0);
26471 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26472 RTX_FRAME_RELATED_P (insn) = 1;
26473
26474 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26475 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26476 {
26477 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26478 GEN_INT (info->lr_save_offset + frame_off));
26479 mem = gen_rtx_MEM (Pmode, addr);
26480 /* This should not be of rs6000_sr_alias_set, because of
26481 __builtin_return_address. */
26482
26483 insn = emit_move_insn (mem, reg);
26484 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26485 NULL_RTX, NULL_RTX);
26486 END_USE (0);
26487 }
26488 }
26489
26490 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26491 r12 will be needed by out-of-line gpr restore. */
26492 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26493 && !(strategy & (SAVE_INLINE_GPRS
26494 | SAVE_NOINLINE_GPRS_SAVES_LR))
26495 ? 11 : 12);
26496 if (!WORLD_SAVE_P (info)
26497 && info->cr_save_p
26498 && REGNO (frame_reg_rtx) != cr_save_regno
26499 && !(using_static_chain_p && cr_save_regno == 11)
26500 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26501 {
26502 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26503 START_USE (cr_save_regno);
26504 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26505 }
26506
26507 /* Do any required saving of fpr's. If only one or two to save, do
26508 it ourselves. Otherwise, call function. */
26509 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26510 {
26511 int offset = info->fp_save_offset + frame_off;
26512 for (int i = info->first_fp_reg_save; i < 64; i++)
26513 {
26514 if (save_reg_p (i)
26515 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26516 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26517 sp_off - frame_off);
26518
26519 offset += fp_reg_size;
26520 }
26521 }
26522 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26523 {
26524 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26525 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26526 unsigned ptr_regno = ptr_regno_for_savres (sel);
26527 rtx ptr_reg = frame_reg_rtx;
26528
26529 if (REGNO (frame_reg_rtx) == ptr_regno)
26530 gcc_checking_assert (frame_off == 0);
26531 else
26532 {
26533 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26534 NOT_INUSE (ptr_regno);
26535 emit_insn (gen_add3_insn (ptr_reg,
26536 frame_reg_rtx, GEN_INT (frame_off)));
26537 }
26538 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26539 info->fp_save_offset,
26540 info->lr_save_offset,
26541 DFmode, sel);
26542 rs6000_frame_related (insn, ptr_reg, sp_off,
26543 NULL_RTX, NULL_RTX);
26544 if (lr)
26545 END_USE (0);
26546 }
26547
26548 /* Save GPRs. This is done as a PARALLEL if we are using
26549 the store-multiple instructions. */
26550 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26551 {
26552 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26553 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26554 unsigned ptr_regno = ptr_regno_for_savres (sel);
26555 rtx ptr_reg = frame_reg_rtx;
26556 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26557 int end_save = info->gp_save_offset + info->gp_size;
26558 int ptr_off;
26559
26560 if (ptr_regno == 12)
26561 sp_adjust = 0;
26562 if (!ptr_set_up)
26563 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26564
26565 /* Need to adjust r11 (r12) if we saved any FPRs. */
26566 if (end_save + frame_off != 0)
26567 {
26568 rtx offset = GEN_INT (end_save + frame_off);
26569
26570 if (ptr_set_up)
26571 frame_off = -end_save;
26572 else
26573 NOT_INUSE (ptr_regno);
26574 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26575 }
26576 else if (!ptr_set_up)
26577 {
26578 NOT_INUSE (ptr_regno);
26579 emit_move_insn (ptr_reg, frame_reg_rtx);
26580 }
26581 ptr_off = -end_save;
26582 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26583 info->gp_save_offset + ptr_off,
26584 info->lr_save_offset + ptr_off,
26585 reg_mode, sel);
26586 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26587 NULL_RTX, NULL_RTX);
26588 if (lr)
26589 END_USE (0);
26590 }
26591 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26592 {
26593 rtvec p;
26594 int i;
26595 p = rtvec_alloc (32 - info->first_gp_reg_save);
26596 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26597 RTVEC_ELT (p, i)
26598 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26599 frame_reg_rtx,
26600 info->gp_save_offset + frame_off + reg_size * i);
26601 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26602 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26603 NULL_RTX, NULL_RTX);
26604 }
26605 else if (!WORLD_SAVE_P (info))
26606 {
26607 int offset = info->gp_save_offset + frame_off;
26608 for (int i = info->first_gp_reg_save; i < 32; i++)
26609 {
26610 if (save_reg_p (i)
26611 && !cfun->machine->gpr_is_wrapped_separately[i])
26612 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
26613 sp_off - frame_off);
26614
26615 offset += reg_size;
26616 }
26617 }
26618
26619 if (crtl->calls_eh_return)
26620 {
26621 unsigned int i;
26622 rtvec p;
26623
26624 for (i = 0; ; ++i)
26625 {
26626 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26627 if (regno == INVALID_REGNUM)
26628 break;
26629 }
26630
26631 p = rtvec_alloc (i);
26632
26633 for (i = 0; ; ++i)
26634 {
26635 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26636 if (regno == INVALID_REGNUM)
26637 break;
26638
26639 rtx set
26640 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
26641 sp_reg_rtx,
26642 info->ehrd_offset + sp_off + reg_size * (int) i);
26643 RTVEC_ELT (p, i) = set;
26644 RTX_FRAME_RELATED_P (set) = 1;
26645 }
26646
26647 insn = emit_insn (gen_blockage ());
26648 RTX_FRAME_RELATED_P (insn) = 1;
26649 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
26650 }
26651
26652 /* In AIX ABI we need to make sure r2 is really saved. */
26653 if (TARGET_AIX && crtl->calls_eh_return)
26654 {
26655 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
26656 rtx join_insn, note;
26657 rtx_insn *save_insn;
26658 long toc_restore_insn;
26659
26660 tmp_reg = gen_rtx_REG (Pmode, 11);
26661 tmp_reg_si = gen_rtx_REG (SImode, 11);
26662 if (using_static_chain_p)
26663 {
26664 START_USE (0);
26665 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
26666 }
26667 else
26668 START_USE (11);
26669 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
26670 /* Peek at instruction to which this function returns. If it's
26671 restoring r2, then we know we've already saved r2. We can't
26672 unconditionally save r2 because the value we have will already
26673 be updated if we arrived at this function via a plt call or
26674 toc adjusting stub. */
26675 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
26676 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
26677 + RS6000_TOC_SAVE_SLOT);
26678 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
26679 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
26680 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
26681 validate_condition_mode (EQ, CCUNSmode);
26682 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
26683 emit_insn (gen_rtx_SET (compare_result,
26684 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
26685 toc_save_done = gen_label_rtx ();
26686 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26687 gen_rtx_EQ (VOIDmode, compare_result,
26688 const0_rtx),
26689 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
26690 pc_rtx);
26691 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26692 JUMP_LABEL (jump) = toc_save_done;
26693 LABEL_NUSES (toc_save_done) += 1;
26694
26695 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
26696 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
26697 sp_off - frame_off);
26698
26699 emit_label (toc_save_done);
26700
26701 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
26702 have a CFG that has different saves along different paths.
26703 Move the note to a dummy blockage insn, which describes that
26704 R2 is unconditionally saved after the label. */
26705 /* ??? An alternate representation might be a special insn pattern
26706 containing both the branch and the store. That might let the
26707 code that minimizes the number of DW_CFA_advance opcodes better
26708 freedom in placing the annotations. */
26709 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
26710 if (note)
26711 remove_note (save_insn, note);
26712 else
26713 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
26714 copy_rtx (PATTERN (save_insn)), NULL_RTX);
26715 RTX_FRAME_RELATED_P (save_insn) = 0;
26716
26717 join_insn = emit_insn (gen_blockage ());
26718 REG_NOTES (join_insn) = note;
26719 RTX_FRAME_RELATED_P (join_insn) = 1;
26720
26721 if (using_static_chain_p)
26722 {
26723 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
26724 END_USE (0);
26725 }
26726 else
26727 END_USE (11);
26728 }
26729
26730 /* Save CR if we use any that must be preserved. */
26731 if (!WORLD_SAVE_P (info) && info->cr_save_p)
26732 {
26733 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26734 GEN_INT (info->cr_save_offset + frame_off));
26735 rtx mem = gen_frame_mem (SImode, addr);
26736
26737 /* If we didn't copy cr before, do so now using r0. */
26738 if (cr_save_rtx == NULL_RTX)
26739 {
26740 START_USE (0);
26741 cr_save_rtx = gen_rtx_REG (SImode, 0);
26742 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26743 }
26744
26745 /* Saving CR requires a two-instruction sequence: one instruction
26746 to move the CR to a general-purpose register, and a second
26747 instruction that stores the GPR to memory.
26748
26749 We do not emit any DWARF CFI records for the first of these,
26750 because we cannot properly represent the fact that CR is saved in
26751 a register. One reason is that we cannot express that multiple
26752 CR fields are saved; another reason is that on 64-bit, the size
26753 of the CR register in DWARF (4 bytes) differs from the size of
26754 a general-purpose register.
26755
26756 This means if any intervening instruction were to clobber one of
26757 the call-saved CR fields, we'd have incorrect CFI. To prevent
26758 this from happening, we mark the store to memory as a use of
26759 those CR fields, which prevents any such instruction from being
26760 scheduled in between the two instructions. */
26761 rtx crsave_v[9];
26762 int n_crsave = 0;
26763 int i;
26764
26765 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
26766 for (i = 0; i < 8; i++)
26767 if (save_reg_p (CR0_REGNO + i))
26768 crsave_v[n_crsave++]
26769 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
26770
26771 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
26772 gen_rtvec_v (n_crsave, crsave_v)));
26773 END_USE (REGNO (cr_save_rtx));
26774
26775 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
26776 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
26777 so we need to construct a frame expression manually. */
26778 RTX_FRAME_RELATED_P (insn) = 1;
26779
26780 /* Update address to be stack-pointer relative, like
26781 rs6000_frame_related would do. */
26782 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26783 GEN_INT (info->cr_save_offset + sp_off));
26784 mem = gen_frame_mem (SImode, addr);
26785
26786 if (DEFAULT_ABI == ABI_ELFv2)
26787 {
26788 /* In the ELFv2 ABI we generate separate CFI records for each
26789 CR field that was actually saved. They all point to the
26790 same 32-bit stack slot. */
26791 rtx crframe[8];
26792 int n_crframe = 0;
26793
26794 for (i = 0; i < 8; i++)
26795 if (save_reg_p (CR0_REGNO + i))
26796 {
26797 crframe[n_crframe]
26798 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
26799
26800 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
26801 n_crframe++;
26802 }
26803
26804 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
26805 gen_rtx_PARALLEL (VOIDmode,
26806 gen_rtvec_v (n_crframe, crframe)));
26807 }
26808 else
26809 {
26810 /* In other ABIs, by convention, we use a single CR regnum to
26811 represent the fact that all call-saved CR fields are saved.
26812 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
26813 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
26814 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
26815 }
26816 }
26817
26818 /* In the ELFv2 ABI we need to save all call-saved CR fields into
26819 *separate* slots if the routine calls __builtin_eh_return, so
26820 that they can be independently restored by the unwinder. */
26821 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
26822 {
26823 int i, cr_off = info->ehcr_offset;
26824 rtx crsave;
26825
26826 /* ??? We might get better performance by using multiple mfocrf
26827 instructions. */
26828 crsave = gen_rtx_REG (SImode, 0);
26829 emit_insn (gen_prologue_movesi_from_cr (crsave));
26830
26831 for (i = 0; i < 8; i++)
26832 if (!call_used_regs[CR0_REGNO + i])
26833 {
26834 rtvec p = rtvec_alloc (2);
26835 RTVEC_ELT (p, 0)
26836 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
26837 RTVEC_ELT (p, 1)
26838 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
26839
26840 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26841
26842 RTX_FRAME_RELATED_P (insn) = 1;
26843 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
26844 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
26845 sp_reg_rtx, cr_off + sp_off));
26846
26847 cr_off += reg_size;
26848 }
26849 }
26850
26851 /* If we are emitting stack probes, but allocate no stack, then
26852 just note that in the dump file. */
26853 if (flag_stack_clash_protection
26854 && dump_file
26855 && !info->push_p)
26856 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
26857
26858 /* Update stack and set back pointer unless this is V.4,
26859 for which it was done previously. */
26860 if (!WORLD_SAVE_P (info) && info->push_p
26861 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
26862 {
26863 rtx ptr_reg = NULL;
26864 int ptr_off = 0;
26865
26866 /* If saving altivec regs we need to be able to address all save
26867 locations using a 16-bit offset. */
26868 if ((strategy & SAVE_INLINE_VRS) == 0
26869 || (info->altivec_size != 0
26870 && (info->altivec_save_offset + info->altivec_size - 16
26871 + info->total_size - frame_off) > 32767)
26872 || (info->vrsave_size != 0
26873 && (info->vrsave_save_offset
26874 + info->total_size - frame_off) > 32767))
26875 {
26876 int sel = SAVRES_SAVE | SAVRES_VR;
26877 unsigned ptr_regno = ptr_regno_for_savres (sel);
26878
26879 if (using_static_chain_p
26880 && ptr_regno == STATIC_CHAIN_REGNUM)
26881 ptr_regno = 12;
26882 if (REGNO (frame_reg_rtx) != ptr_regno)
26883 START_USE (ptr_regno);
26884 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26885 frame_reg_rtx = ptr_reg;
26886 ptr_off = info->altivec_save_offset + info->altivec_size;
26887 frame_off = -ptr_off;
26888 }
26889 else if (REGNO (frame_reg_rtx) == 1)
26890 frame_off = info->total_size;
26891 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26892 ptr_reg, ptr_off);
26893 if (REGNO (frame_reg_rtx) == 12)
26894 sp_adjust = 0;
26895 sp_off = info->total_size;
26896 if (frame_reg_rtx != sp_reg_rtx)
26897 rs6000_emit_stack_tie (frame_reg_rtx, false);
26898 }
26899
26900 /* Set frame pointer, if needed. */
26901 if (frame_pointer_needed)
26902 {
26903 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
26904 sp_reg_rtx);
26905 RTX_FRAME_RELATED_P (insn) = 1;
26906 }
26907
26908 /* Save AltiVec registers if needed. Save here because the red zone does
26909 not always include AltiVec registers. */
26910 if (!WORLD_SAVE_P (info)
26911 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
26912 {
26913 int end_save = info->altivec_save_offset + info->altivec_size;
26914 int ptr_off;
26915 /* Oddly, the vector save/restore functions point r0 at the end
26916 of the save area, then use r11 or r12 to load offsets for
26917 [reg+reg] addressing. */
26918 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
26919 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
26920 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
26921
26922 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
26923 NOT_INUSE (0);
26924 if (scratch_regno == 12)
26925 sp_adjust = 0;
26926 if (end_save + frame_off != 0)
26927 {
26928 rtx offset = GEN_INT (end_save + frame_off);
26929
26930 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26931 }
26932 else
26933 emit_move_insn (ptr_reg, frame_reg_rtx);
26934
26935 ptr_off = -end_save;
26936 insn = rs6000_emit_savres_rtx (info, scratch_reg,
26937 info->altivec_save_offset + ptr_off,
26938 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
26939 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
26940 NULL_RTX, NULL_RTX);
26941 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
26942 {
26943 /* The oddity mentioned above clobbered our frame reg. */
26944 emit_move_insn (frame_reg_rtx, ptr_reg);
26945 frame_off = ptr_off;
26946 }
26947 }
26948 else if (!WORLD_SAVE_P (info)
26949 && info->altivec_size != 0)
26950 {
26951 int i;
26952
26953 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
26954 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26955 {
26956 rtx areg, savereg, mem;
26957 HOST_WIDE_INT offset;
26958
26959 offset = (info->altivec_save_offset + frame_off
26960 + 16 * (i - info->first_altivec_reg_save));
26961
26962 savereg = gen_rtx_REG (V4SImode, i);
26963
26964 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
26965 {
26966 mem = gen_frame_mem (V4SImode,
26967 gen_rtx_PLUS (Pmode, frame_reg_rtx,
26968 GEN_INT (offset)));
26969 insn = emit_insn (gen_rtx_SET (mem, savereg));
26970 areg = NULL_RTX;
26971 }
26972 else
26973 {
26974 NOT_INUSE (0);
26975 areg = gen_rtx_REG (Pmode, 0);
26976 emit_move_insn (areg, GEN_INT (offset));
26977
26978 /* AltiVec addressing mode is [reg+reg]. */
26979 mem = gen_frame_mem (V4SImode,
26980 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
26981
26982 /* Rather than emitting a generic move, force use of the stvx
26983 instruction, which we always want on ISA 2.07 (power8) systems.
26984 In particular we don't want xxpermdi/stxvd2x for little
26985 endian. */
26986 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
26987 }
26988
26989 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26990 areg, GEN_INT (offset));
26991 }
26992 }
26993
26994 /* VRSAVE is a bit vector representing which AltiVec registers
26995 are used. The OS uses this to determine which vector
26996 registers to save on a context switch. We need to save
26997 VRSAVE on the stack frame, add whatever AltiVec registers we
26998 used in this function, and do the corresponding magic in the
26999 epilogue. */
27000
27001 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27002 {
27003 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27004 be using r12 as frame_reg_rtx and r11 as the static chain
27005 pointer for nested functions. */
27006 int save_regno = 12;
27007 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27008 && !using_static_chain_p)
27009 save_regno = 11;
27010 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27011 {
27012 save_regno = 11;
27013 if (using_static_chain_p)
27014 save_regno = 0;
27015 }
27016 NOT_INUSE (save_regno);
27017
27018 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27019 }
27020
27021 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27022 if (!TARGET_SINGLE_PIC_BASE
27023 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27024 && !constant_pool_empty_p ())
27025 || (DEFAULT_ABI == ABI_V4
27026 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27027 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27028 {
27029 /* If emit_load_toc_table will use the link register, we need to save
27030 it. We use R12 for this purpose because emit_load_toc_table
27031 can use register 0. This allows us to use a plain 'blr' to return
27032 from the procedure more often. */
27033 int save_LR_around_toc_setup = (TARGET_ELF
27034 && DEFAULT_ABI == ABI_V4
27035 && flag_pic
27036 && ! info->lr_save_p
27037 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27038 if (save_LR_around_toc_setup)
27039 {
27040 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27041 rtx tmp = gen_rtx_REG (Pmode, 12);
27042
27043 sp_adjust = 0;
27044 insn = emit_move_insn (tmp, lr);
27045 RTX_FRAME_RELATED_P (insn) = 1;
27046
27047 rs6000_emit_load_toc_table (TRUE);
27048
27049 insn = emit_move_insn (lr, tmp);
27050 add_reg_note (insn, REG_CFA_RESTORE, lr);
27051 RTX_FRAME_RELATED_P (insn) = 1;
27052 }
27053 else
27054 rs6000_emit_load_toc_table (TRUE);
27055 }
27056
27057 #if TARGET_MACHO
27058 if (!TARGET_SINGLE_PIC_BASE
27059 && DEFAULT_ABI == ABI_DARWIN
27060 && flag_pic && crtl->uses_pic_offset_table)
27061 {
27062 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27063 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27064
27065 /* Save and restore LR locally around this call (in R0). */
27066 if (!info->lr_save_p)
27067 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27068
27069 emit_insn (gen_load_macho_picbase (src));
27070
27071 emit_move_insn (gen_rtx_REG (Pmode,
27072 RS6000_PIC_OFFSET_TABLE_REGNUM),
27073 lr);
27074
27075 if (!info->lr_save_p)
27076 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27077 }
27078 #endif
27079
27080 /* If we need to, save the TOC register after doing the stack setup.
27081 Do not emit eh frame info for this save. The unwinder wants info,
27082 conceptually attached to instructions in this function, about
27083 register values in the caller of this function. This R2 may have
27084 already been changed from the value in the caller.
27085 We don't attempt to write accurate DWARF EH frame info for R2
27086 because code emitted by gcc for a (non-pointer) function call
27087 doesn't save and restore R2. Instead, R2 is managed out-of-line
27088 by a linker generated plt call stub when the function resides in
27089 a shared library. This behavior is costly to describe in DWARF,
27090 both in terms of the size of DWARF info and the time taken in the
27091 unwinder to interpret it. R2 changes, apart from the
27092 calls_eh_return case earlier in this function, are handled by
27093 linux-unwind.h frob_update_context. */
27094 if (rs6000_save_toc_in_prologue_p ()
27095 && !cfun->machine->toc_is_wrapped_separately)
27096 {
27097 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27098 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27099 }
27100
27101 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27102 if (using_split_stack && split_stack_arg_pointer_used_p ())
27103 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27104 }
27105
27106 /* Output .extern statements for the save/restore routines we use. */
27107
27108 static void
27109 rs6000_output_savres_externs (FILE *file)
27110 {
27111 rs6000_stack_t *info = rs6000_stack_info ();
27112
27113 if (TARGET_DEBUG_STACK)
27114 debug_stack_info (info);
27115
27116 /* Write .extern for any function we will call to save and restore
27117 fp values. */
27118 if (info->first_fp_reg_save < 64
27119 && !TARGET_MACHO
27120 && !TARGET_ELF)
27121 {
27122 char *name;
27123 int regno = info->first_fp_reg_save - 32;
27124
27125 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27126 {
27127 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27128 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27129 name = rs6000_savres_routine_name (regno, sel);
27130 fprintf (file, "\t.extern %s\n", name);
27131 }
27132 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27133 {
27134 bool lr = (info->savres_strategy
27135 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27136 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27137 name = rs6000_savres_routine_name (regno, sel);
27138 fprintf (file, "\t.extern %s\n", name);
27139 }
27140 }
27141 }
27142
27143 /* Write function prologue. */
27144
27145 static void
27146 rs6000_output_function_prologue (FILE *file)
27147 {
27148 if (!cfun->is_thunk)
27149 rs6000_output_savres_externs (file);
27150
27151 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27152 immediately after the global entry point label. */
27153 if (rs6000_global_entry_point_needed_p ())
27154 {
27155 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27156
27157 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27158
27159 if (TARGET_CMODEL != CMODEL_LARGE)
27160 {
27161 /* In the small and medium code models, we assume the TOC is less
27162 2 GB away from the text section, so it can be computed via the
27163 following two-instruction sequence. */
27164 char buf[256];
27165
27166 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27167 fprintf (file, "0:\taddis 2,12,.TOC.-");
27168 assemble_name (file, buf);
27169 fprintf (file, "@ha\n");
27170 fprintf (file, "\taddi 2,2,.TOC.-");
27171 assemble_name (file, buf);
27172 fprintf (file, "@l\n");
27173 }
27174 else
27175 {
27176 /* In the large code model, we allow arbitrary offsets between the
27177 TOC and the text section, so we have to load the offset from
27178 memory. The data field is emitted directly before the global
27179 entry point in rs6000_elf_declare_function_name. */
27180 char buf[256];
27181
27182 #ifdef HAVE_AS_ENTRY_MARKERS
27183 /* If supported by the linker, emit a marker relocation. If the
27184 total code size of the final executable or shared library
27185 happens to fit into 2 GB after all, the linker will replace
27186 this code sequence with the sequence for the small or medium
27187 code model. */
27188 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27189 #endif
27190 fprintf (file, "\tld 2,");
27191 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27192 assemble_name (file, buf);
27193 fprintf (file, "-");
27194 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27195 assemble_name (file, buf);
27196 fprintf (file, "(12)\n");
27197 fprintf (file, "\tadd 2,2,12\n");
27198 }
27199
27200 fputs ("\t.localentry\t", file);
27201 assemble_name (file, name);
27202 fputs (",.-", file);
27203 assemble_name (file, name);
27204 fputs ("\n", file);
27205 }
27206
27207 /* Output -mprofile-kernel code. This needs to be done here instead of
27208 in output_function_profile since it must go after the ELFv2 ABI
27209 local entry point. */
27210 if (TARGET_PROFILE_KERNEL && crtl->profile)
27211 {
27212 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27213 gcc_assert (!TARGET_32BIT);
27214
27215 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27216
27217 /* In the ELFv2 ABI we have no compiler stack word. It must be
27218 the resposibility of _mcount to preserve the static chain
27219 register if required. */
27220 if (DEFAULT_ABI != ABI_ELFv2
27221 && cfun->static_chain_decl != NULL)
27222 {
27223 asm_fprintf (file, "\tstd %s,24(%s)\n",
27224 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27225 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27226 asm_fprintf (file, "\tld %s,24(%s)\n",
27227 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27228 }
27229 else
27230 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27231 }
27232
27233 rs6000_pic_labelno++;
27234 }
27235
27236 /* -mprofile-kernel code calls mcount before the function prolog,
27237 so a profiled leaf function should stay a leaf function. */
27238 static bool
27239 rs6000_keep_leaf_when_profiled ()
27240 {
27241 return TARGET_PROFILE_KERNEL;
27242 }
27243
27244 /* Non-zero if vmx regs are restored before the frame pop, zero if
27245 we restore after the pop when possible. */
27246 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27247
27248 /* Restoring cr is a two step process: loading a reg from the frame
27249 save, then moving the reg to cr. For ABI_V4 we must let the
27250 unwinder know that the stack location is no longer valid at or
27251 before the stack deallocation, but we can't emit a cfa_restore for
27252 cr at the stack deallocation like we do for other registers.
27253 The trouble is that it is possible for the move to cr to be
27254 scheduled after the stack deallocation. So say exactly where cr
27255 is located on each of the two insns. */
27256
27257 static rtx
27258 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27259 {
27260 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27261 rtx reg = gen_rtx_REG (SImode, regno);
27262 rtx_insn *insn = emit_move_insn (reg, mem);
27263
27264 if (!exit_func && DEFAULT_ABI == ABI_V4)
27265 {
27266 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27267 rtx set = gen_rtx_SET (reg, cr);
27268
27269 add_reg_note (insn, REG_CFA_REGISTER, set);
27270 RTX_FRAME_RELATED_P (insn) = 1;
27271 }
27272 return reg;
27273 }
27274
27275 /* Reload CR from REG. */
27276
27277 static void
27278 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27279 {
27280 int count = 0;
27281 int i;
27282
27283 if (using_mfcr_multiple)
27284 {
27285 for (i = 0; i < 8; i++)
27286 if (save_reg_p (CR0_REGNO + i))
27287 count++;
27288 gcc_assert (count);
27289 }
27290
27291 if (using_mfcr_multiple && count > 1)
27292 {
27293 rtx_insn *insn;
27294 rtvec p;
27295 int ndx;
27296
27297 p = rtvec_alloc (count);
27298
27299 ndx = 0;
27300 for (i = 0; i < 8; i++)
27301 if (save_reg_p (CR0_REGNO + i))
27302 {
27303 rtvec r = rtvec_alloc (2);
27304 RTVEC_ELT (r, 0) = reg;
27305 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27306 RTVEC_ELT (p, ndx) =
27307 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27308 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27309 ndx++;
27310 }
27311 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27312 gcc_assert (ndx == count);
27313
27314 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27315 CR field separately. */
27316 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27317 {
27318 for (i = 0; i < 8; i++)
27319 if (save_reg_p (CR0_REGNO + i))
27320 add_reg_note (insn, REG_CFA_RESTORE,
27321 gen_rtx_REG (SImode, CR0_REGNO + i));
27322
27323 RTX_FRAME_RELATED_P (insn) = 1;
27324 }
27325 }
27326 else
27327 for (i = 0; i < 8; i++)
27328 if (save_reg_p (CR0_REGNO + i))
27329 {
27330 rtx insn = emit_insn (gen_movsi_to_cr_one
27331 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27332
27333 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27334 CR field separately, attached to the insn that in fact
27335 restores this particular CR field. */
27336 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27337 {
27338 add_reg_note (insn, REG_CFA_RESTORE,
27339 gen_rtx_REG (SImode, CR0_REGNO + i));
27340
27341 RTX_FRAME_RELATED_P (insn) = 1;
27342 }
27343 }
27344
27345 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27346 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27347 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27348 {
27349 rtx_insn *insn = get_last_insn ();
27350 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27351
27352 add_reg_note (insn, REG_CFA_RESTORE, cr);
27353 RTX_FRAME_RELATED_P (insn) = 1;
27354 }
27355 }
27356
27357 /* Like cr, the move to lr instruction can be scheduled after the
27358 stack deallocation, but unlike cr, its stack frame save is still
27359 valid. So we only need to emit the cfa_restore on the correct
27360 instruction. */
27361
27362 static void
27363 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27364 {
27365 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27366 rtx reg = gen_rtx_REG (Pmode, regno);
27367
27368 emit_move_insn (reg, mem);
27369 }
27370
27371 static void
27372 restore_saved_lr (int regno, bool exit_func)
27373 {
27374 rtx reg = gen_rtx_REG (Pmode, regno);
27375 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27376 rtx_insn *insn = emit_move_insn (lr, reg);
27377
27378 if (!exit_func && flag_shrink_wrap)
27379 {
27380 add_reg_note (insn, REG_CFA_RESTORE, lr);
27381 RTX_FRAME_RELATED_P (insn) = 1;
27382 }
27383 }
27384
27385 static rtx
27386 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27387 {
27388 if (DEFAULT_ABI == ABI_ELFv2)
27389 {
27390 int i;
27391 for (i = 0; i < 8; i++)
27392 if (save_reg_p (CR0_REGNO + i))
27393 {
27394 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27395 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27396 cfa_restores);
27397 }
27398 }
27399 else if (info->cr_save_p)
27400 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27401 gen_rtx_REG (SImode, CR2_REGNO),
27402 cfa_restores);
27403
27404 if (info->lr_save_p)
27405 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27406 gen_rtx_REG (Pmode, LR_REGNO),
27407 cfa_restores);
27408 return cfa_restores;
27409 }
27410
27411 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27412 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27413 below stack pointer not cloberred by signals. */
27414
27415 static inline bool
27416 offset_below_red_zone_p (HOST_WIDE_INT offset)
27417 {
27418 return offset < (DEFAULT_ABI == ABI_V4
27419 ? 0
27420 : TARGET_32BIT ? -220 : -288);
27421 }
27422
27423 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27424
27425 static void
27426 emit_cfa_restores (rtx cfa_restores)
27427 {
27428 rtx_insn *insn = get_last_insn ();
27429 rtx *loc = &REG_NOTES (insn);
27430
27431 while (*loc)
27432 loc = &XEXP (*loc, 1);
27433 *loc = cfa_restores;
27434 RTX_FRAME_RELATED_P (insn) = 1;
27435 }
27436
27437 /* Emit function epilogue as insns. */
27438
27439 void
27440 rs6000_emit_epilogue (int sibcall)
27441 {
27442 rs6000_stack_t *info;
27443 int restoring_GPRs_inline;
27444 int restoring_FPRs_inline;
27445 int using_load_multiple;
27446 int using_mtcr_multiple;
27447 int use_backchain_to_restore_sp;
27448 int restore_lr;
27449 int strategy;
27450 HOST_WIDE_INT frame_off = 0;
27451 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27452 rtx frame_reg_rtx = sp_reg_rtx;
27453 rtx cfa_restores = NULL_RTX;
27454 rtx insn;
27455 rtx cr_save_reg = NULL_RTX;
27456 machine_mode reg_mode = Pmode;
27457 int reg_size = TARGET_32BIT ? 4 : 8;
27458 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
27459 int fp_reg_size = 8;
27460 int i;
27461 bool exit_func;
27462 unsigned ptr_regno;
27463
27464 info = rs6000_stack_info ();
27465
27466 strategy = info->savres_strategy;
27467 using_load_multiple = strategy & REST_MULTIPLE;
27468 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27469 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27470 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
27471 || rs6000_tune == PROCESSOR_PPC603
27472 || rs6000_tune == PROCESSOR_PPC750
27473 || optimize_size);
27474 /* Restore via the backchain when we have a large frame, since this
27475 is more efficient than an addis, addi pair. The second condition
27476 here will not trigger at the moment; We don't actually need a
27477 frame pointer for alloca, but the generic parts of the compiler
27478 give us one anyway. */
27479 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27480 ? info->lr_save_offset
27481 : 0) > 32767
27482 || (cfun->calls_alloca
27483 && !frame_pointer_needed));
27484 restore_lr = (info->lr_save_p
27485 && (restoring_FPRs_inline
27486 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27487 && (restoring_GPRs_inline
27488 || info->first_fp_reg_save < 64)
27489 && !cfun->machine->lr_is_wrapped_separately);
27490
27491
27492 if (WORLD_SAVE_P (info))
27493 {
27494 int i, j;
27495 char rname[30];
27496 const char *alloc_rname;
27497 rtvec p;
27498
27499 /* eh_rest_world_r10 will return to the location saved in the LR
27500 stack slot (which is not likely to be our caller.)
27501 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27502 rest_world is similar, except any R10 parameter is ignored.
27503 The exception-handling stuff that was here in 2.95 is no
27504 longer necessary. */
27505
27506 p = rtvec_alloc (9
27507 + 32 - info->first_gp_reg_save
27508 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27509 + 63 + 1 - info->first_fp_reg_save);
27510
27511 strcpy (rname, ((crtl->calls_eh_return) ?
27512 "*eh_rest_world_r10" : "*rest_world"));
27513 alloc_rname = ggc_strdup (rname);
27514
27515 j = 0;
27516 RTVEC_ELT (p, j++) = ret_rtx;
27517 RTVEC_ELT (p, j++)
27518 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27519 /* The instruction pattern requires a clobber here;
27520 it is shared with the restVEC helper. */
27521 RTVEC_ELT (p, j++)
27522 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27523
27524 {
27525 /* CR register traditionally saved as CR2. */
27526 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27527 RTVEC_ELT (p, j++)
27528 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27529 if (flag_shrink_wrap)
27530 {
27531 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27532 gen_rtx_REG (Pmode, LR_REGNO),
27533 cfa_restores);
27534 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27535 }
27536 }
27537
27538 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27539 {
27540 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27541 RTVEC_ELT (p, j++)
27542 = gen_frame_load (reg,
27543 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27544 if (flag_shrink_wrap
27545 && save_reg_p (info->first_gp_reg_save + i))
27546 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27547 }
27548 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27549 {
27550 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27551 RTVEC_ELT (p, j++)
27552 = gen_frame_load (reg,
27553 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27554 if (flag_shrink_wrap
27555 && save_reg_p (info->first_altivec_reg_save + i))
27556 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27557 }
27558 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27559 {
27560 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27561 info->first_fp_reg_save + i);
27562 RTVEC_ELT (p, j++)
27563 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27564 if (flag_shrink_wrap
27565 && save_reg_p (info->first_fp_reg_save + i))
27566 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27567 }
27568 RTVEC_ELT (p, j++)
27569 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27570 RTVEC_ELT (p, j++)
27571 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27572 RTVEC_ELT (p, j++)
27573 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27574 RTVEC_ELT (p, j++)
27575 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27576 RTVEC_ELT (p, j++)
27577 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27578 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27579
27580 if (flag_shrink_wrap)
27581 {
27582 REG_NOTES (insn) = cfa_restores;
27583 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27584 RTX_FRAME_RELATED_P (insn) = 1;
27585 }
27586 return;
27587 }
27588
27589 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27590 if (info->push_p)
27591 frame_off = info->total_size;
27592
27593 /* Restore AltiVec registers if we must do so before adjusting the
27594 stack. */
27595 if (info->altivec_size != 0
27596 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27597 || (DEFAULT_ABI != ABI_V4
27598 && offset_below_red_zone_p (info->altivec_save_offset))))
27599 {
27600 int i;
27601 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27602
27603 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27604 if (use_backchain_to_restore_sp)
27605 {
27606 int frame_regno = 11;
27607
27608 if ((strategy & REST_INLINE_VRS) == 0)
27609 {
27610 /* Of r11 and r12, select the one not clobbered by an
27611 out-of-line restore function for the frame register. */
27612 frame_regno = 11 + 12 - scratch_regno;
27613 }
27614 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27615 emit_move_insn (frame_reg_rtx,
27616 gen_rtx_MEM (Pmode, sp_reg_rtx));
27617 frame_off = 0;
27618 }
27619 else if (frame_pointer_needed)
27620 frame_reg_rtx = hard_frame_pointer_rtx;
27621
27622 if ((strategy & REST_INLINE_VRS) == 0)
27623 {
27624 int end_save = info->altivec_save_offset + info->altivec_size;
27625 int ptr_off;
27626 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27627 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27628
27629 if (end_save + frame_off != 0)
27630 {
27631 rtx offset = GEN_INT (end_save + frame_off);
27632
27633 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27634 }
27635 else
27636 emit_move_insn (ptr_reg, frame_reg_rtx);
27637
27638 ptr_off = -end_save;
27639 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27640 info->altivec_save_offset + ptr_off,
27641 0, V4SImode, SAVRES_VR);
27642 }
27643 else
27644 {
27645 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27646 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27647 {
27648 rtx addr, areg, mem, insn;
27649 rtx reg = gen_rtx_REG (V4SImode, i);
27650 HOST_WIDE_INT offset
27651 = (info->altivec_save_offset + frame_off
27652 + 16 * (i - info->first_altivec_reg_save));
27653
27654 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27655 {
27656 mem = gen_frame_mem (V4SImode,
27657 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27658 GEN_INT (offset)));
27659 insn = gen_rtx_SET (reg, mem);
27660 }
27661 else
27662 {
27663 areg = gen_rtx_REG (Pmode, 0);
27664 emit_move_insn (areg, GEN_INT (offset));
27665
27666 /* AltiVec addressing mode is [reg+reg]. */
27667 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27668 mem = gen_frame_mem (V4SImode, addr);
27669
27670 /* Rather than emitting a generic move, force use of the
27671 lvx instruction, which we always want. In particular we
27672 don't want lxvd2x/xxpermdi for little endian. */
27673 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27674 }
27675
27676 (void) emit_insn (insn);
27677 }
27678 }
27679
27680 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27681 if (((strategy & REST_INLINE_VRS) == 0
27682 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27683 && (flag_shrink_wrap
27684 || (offset_below_red_zone_p
27685 (info->altivec_save_offset
27686 + 16 * (i - info->first_altivec_reg_save))))
27687 && save_reg_p (i))
27688 {
27689 rtx reg = gen_rtx_REG (V4SImode, i);
27690 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27691 }
27692 }
27693
27694 /* Restore VRSAVE if we must do so before adjusting the stack. */
27695 if (info->vrsave_size != 0
27696 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27697 || (DEFAULT_ABI != ABI_V4
27698 && offset_below_red_zone_p (info->vrsave_save_offset))))
27699 {
27700 rtx reg;
27701
27702 if (frame_reg_rtx == sp_reg_rtx)
27703 {
27704 if (use_backchain_to_restore_sp)
27705 {
27706 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27707 emit_move_insn (frame_reg_rtx,
27708 gen_rtx_MEM (Pmode, sp_reg_rtx));
27709 frame_off = 0;
27710 }
27711 else if (frame_pointer_needed)
27712 frame_reg_rtx = hard_frame_pointer_rtx;
27713 }
27714
27715 reg = gen_rtx_REG (SImode, 12);
27716 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27717 info->vrsave_save_offset + frame_off));
27718
27719 emit_insn (generate_set_vrsave (reg, info, 1));
27720 }
27721
27722 insn = NULL_RTX;
27723 /* If we have a large stack frame, restore the old stack pointer
27724 using the backchain. */
27725 if (use_backchain_to_restore_sp)
27726 {
27727 if (frame_reg_rtx == sp_reg_rtx)
27728 {
27729 /* Under V.4, don't reset the stack pointer until after we're done
27730 loading the saved registers. */
27731 if (DEFAULT_ABI == ABI_V4)
27732 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27733
27734 insn = emit_move_insn (frame_reg_rtx,
27735 gen_rtx_MEM (Pmode, sp_reg_rtx));
27736 frame_off = 0;
27737 }
27738 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27739 && DEFAULT_ABI == ABI_V4)
27740 /* frame_reg_rtx has been set up by the altivec restore. */
27741 ;
27742 else
27743 {
27744 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
27745 frame_reg_rtx = sp_reg_rtx;
27746 }
27747 }
27748 /* If we have a frame pointer, we can restore the old stack pointer
27749 from it. */
27750 else if (frame_pointer_needed)
27751 {
27752 frame_reg_rtx = sp_reg_rtx;
27753 if (DEFAULT_ABI == ABI_V4)
27754 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27755 /* Prevent reordering memory accesses against stack pointer restore. */
27756 else if (cfun->calls_alloca
27757 || offset_below_red_zone_p (-info->total_size))
27758 rs6000_emit_stack_tie (frame_reg_rtx, true);
27759
27760 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
27761 GEN_INT (info->total_size)));
27762 frame_off = 0;
27763 }
27764 else if (info->push_p
27765 && DEFAULT_ABI != ABI_V4
27766 && !crtl->calls_eh_return)
27767 {
27768 /* Prevent reordering memory accesses against stack pointer restore. */
27769 if (cfun->calls_alloca
27770 || offset_below_red_zone_p (-info->total_size))
27771 rs6000_emit_stack_tie (frame_reg_rtx, false);
27772 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
27773 GEN_INT (info->total_size)));
27774 frame_off = 0;
27775 }
27776 if (insn && frame_reg_rtx == sp_reg_rtx)
27777 {
27778 if (cfa_restores)
27779 {
27780 REG_NOTES (insn) = cfa_restores;
27781 cfa_restores = NULL_RTX;
27782 }
27783 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27784 RTX_FRAME_RELATED_P (insn) = 1;
27785 }
27786
27787 /* Restore AltiVec registers if we have not done so already. */
27788 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27789 && info->altivec_size != 0
27790 && (DEFAULT_ABI == ABI_V4
27791 || !offset_below_red_zone_p (info->altivec_save_offset)))
27792 {
27793 int i;
27794
27795 if ((strategy & REST_INLINE_VRS) == 0)
27796 {
27797 int end_save = info->altivec_save_offset + info->altivec_size;
27798 int ptr_off;
27799 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27800 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27801 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27802
27803 if (end_save + frame_off != 0)
27804 {
27805 rtx offset = GEN_INT (end_save + frame_off);
27806
27807 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27808 }
27809 else
27810 emit_move_insn (ptr_reg, frame_reg_rtx);
27811
27812 ptr_off = -end_save;
27813 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27814 info->altivec_save_offset + ptr_off,
27815 0, V4SImode, SAVRES_VR);
27816 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27817 {
27818 /* Frame reg was clobbered by out-of-line save. Restore it
27819 from ptr_reg, and if we are calling out-of-line gpr or
27820 fpr restore set up the correct pointer and offset. */
27821 unsigned newptr_regno = 1;
27822 if (!restoring_GPRs_inline)
27823 {
27824 bool lr = info->gp_save_offset + info->gp_size == 0;
27825 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
27826 newptr_regno = ptr_regno_for_savres (sel);
27827 end_save = info->gp_save_offset + info->gp_size;
27828 }
27829 else if (!restoring_FPRs_inline)
27830 {
27831 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
27832 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27833 newptr_regno = ptr_regno_for_savres (sel);
27834 end_save = info->fp_save_offset + info->fp_size;
27835 }
27836
27837 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
27838 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
27839
27840 if (end_save + ptr_off != 0)
27841 {
27842 rtx offset = GEN_INT (end_save + ptr_off);
27843
27844 frame_off = -end_save;
27845 if (TARGET_32BIT)
27846 emit_insn (gen_addsi3_carry (frame_reg_rtx,
27847 ptr_reg, offset));
27848 else
27849 emit_insn (gen_adddi3_carry (frame_reg_rtx,
27850 ptr_reg, offset));
27851 }
27852 else
27853 {
27854 frame_off = ptr_off;
27855 emit_move_insn (frame_reg_rtx, ptr_reg);
27856 }
27857 }
27858 }
27859 else
27860 {
27861 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27862 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27863 {
27864 rtx addr, areg, mem, insn;
27865 rtx reg = gen_rtx_REG (V4SImode, i);
27866 HOST_WIDE_INT offset
27867 = (info->altivec_save_offset + frame_off
27868 + 16 * (i - info->first_altivec_reg_save));
27869
27870 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27871 {
27872 mem = gen_frame_mem (V4SImode,
27873 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27874 GEN_INT (offset)));
27875 insn = gen_rtx_SET (reg, mem);
27876 }
27877 else
27878 {
27879 areg = gen_rtx_REG (Pmode, 0);
27880 emit_move_insn (areg, GEN_INT (offset));
27881
27882 /* AltiVec addressing mode is [reg+reg]. */
27883 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27884 mem = gen_frame_mem (V4SImode, addr);
27885
27886 /* Rather than emitting a generic move, force use of the
27887 lvx instruction, which we always want. In particular we
27888 don't want lxvd2x/xxpermdi for little endian. */
27889 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27890 }
27891
27892 (void) emit_insn (insn);
27893 }
27894 }
27895
27896 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27897 if (((strategy & REST_INLINE_VRS) == 0
27898 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27899 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
27900 && save_reg_p (i))
27901 {
27902 rtx reg = gen_rtx_REG (V4SImode, i);
27903 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27904 }
27905 }
27906
27907 /* Restore VRSAVE if we have not done so already. */
27908 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27909 && info->vrsave_size != 0
27910 && (DEFAULT_ABI == ABI_V4
27911 || !offset_below_red_zone_p (info->vrsave_save_offset)))
27912 {
27913 rtx reg;
27914
27915 reg = gen_rtx_REG (SImode, 12);
27916 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27917 info->vrsave_save_offset + frame_off));
27918
27919 emit_insn (generate_set_vrsave (reg, info, 1));
27920 }
27921
27922 /* If we exit by an out-of-line restore function on ABI_V4 then that
27923 function will deallocate the stack, so we don't need to worry
27924 about the unwinder restoring cr from an invalid stack frame
27925 location. */
27926 exit_func = (!restoring_FPRs_inline
27927 || (!restoring_GPRs_inline
27928 && info->first_fp_reg_save == 64));
27929
27930 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
27931 *separate* slots if the routine calls __builtin_eh_return, so
27932 that they can be independently restored by the unwinder. */
27933 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27934 {
27935 int i, cr_off = info->ehcr_offset;
27936
27937 for (i = 0; i < 8; i++)
27938 if (!call_used_regs[CR0_REGNO + i])
27939 {
27940 rtx reg = gen_rtx_REG (SImode, 0);
27941 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27942 cr_off + frame_off));
27943
27944 insn = emit_insn (gen_movsi_to_cr_one
27945 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27946
27947 if (!exit_func && flag_shrink_wrap)
27948 {
27949 add_reg_note (insn, REG_CFA_RESTORE,
27950 gen_rtx_REG (SImode, CR0_REGNO + i));
27951
27952 RTX_FRAME_RELATED_P (insn) = 1;
27953 }
27954
27955 cr_off += reg_size;
27956 }
27957 }
27958
27959 /* Get the old lr if we saved it. If we are restoring registers
27960 out-of-line, then the out-of-line routines can do this for us. */
27961 if (restore_lr && restoring_GPRs_inline)
27962 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
27963
27964 /* Get the old cr if we saved it. */
27965 if (info->cr_save_p)
27966 {
27967 unsigned cr_save_regno = 12;
27968
27969 if (!restoring_GPRs_inline)
27970 {
27971 /* Ensure we don't use the register used by the out-of-line
27972 gpr register restore below. */
27973 bool lr = info->gp_save_offset + info->gp_size == 0;
27974 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
27975 int gpr_ptr_regno = ptr_regno_for_savres (sel);
27976
27977 if (gpr_ptr_regno == 12)
27978 cr_save_regno = 11;
27979 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
27980 }
27981 else if (REGNO (frame_reg_rtx) == 12)
27982 cr_save_regno = 11;
27983
27984 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
27985 info->cr_save_offset + frame_off,
27986 exit_func);
27987 }
27988
27989 /* Set LR here to try to overlap restores below. */
27990 if (restore_lr && restoring_GPRs_inline)
27991 restore_saved_lr (0, exit_func);
27992
27993 /* Load exception handler data registers, if needed. */
27994 if (crtl->calls_eh_return)
27995 {
27996 unsigned int i, regno;
27997
27998 if (TARGET_AIX)
27999 {
28000 rtx reg = gen_rtx_REG (reg_mode, 2);
28001 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28002 frame_off + RS6000_TOC_SAVE_SLOT));
28003 }
28004
28005 for (i = 0; ; ++i)
28006 {
28007 rtx mem;
28008
28009 regno = EH_RETURN_DATA_REGNO (i);
28010 if (regno == INVALID_REGNUM)
28011 break;
28012
28013 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28014 info->ehrd_offset + frame_off
28015 + reg_size * (int) i);
28016
28017 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28018 }
28019 }
28020
28021 /* Restore GPRs. This is done as a PARALLEL if we are using
28022 the load-multiple instructions. */
28023 if (!restoring_GPRs_inline)
28024 {
28025 /* We are jumping to an out-of-line function. */
28026 rtx ptr_reg;
28027 int end_save = info->gp_save_offset + info->gp_size;
28028 bool can_use_exit = end_save == 0;
28029 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28030 int ptr_off;
28031
28032 /* Emit stack reset code if we need it. */
28033 ptr_regno = ptr_regno_for_savres (sel);
28034 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28035 if (can_use_exit)
28036 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28037 else if (end_save + frame_off != 0)
28038 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28039 GEN_INT (end_save + frame_off)));
28040 else if (REGNO (frame_reg_rtx) != ptr_regno)
28041 emit_move_insn (ptr_reg, frame_reg_rtx);
28042 if (REGNO (frame_reg_rtx) == ptr_regno)
28043 frame_off = -end_save;
28044
28045 if (can_use_exit && info->cr_save_p)
28046 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28047
28048 ptr_off = -end_save;
28049 rs6000_emit_savres_rtx (info, ptr_reg,
28050 info->gp_save_offset + ptr_off,
28051 info->lr_save_offset + ptr_off,
28052 reg_mode, sel);
28053 }
28054 else if (using_load_multiple)
28055 {
28056 rtvec p;
28057 p = rtvec_alloc (32 - info->first_gp_reg_save);
28058 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28059 RTVEC_ELT (p, i)
28060 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28061 frame_reg_rtx,
28062 info->gp_save_offset + frame_off + reg_size * i);
28063 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28064 }
28065 else
28066 {
28067 int offset = info->gp_save_offset + frame_off;
28068 for (i = info->first_gp_reg_save; i < 32; i++)
28069 {
28070 if (save_reg_p (i)
28071 && !cfun->machine->gpr_is_wrapped_separately[i])
28072 {
28073 rtx reg = gen_rtx_REG (reg_mode, i);
28074 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28075 }
28076
28077 offset += reg_size;
28078 }
28079 }
28080
28081 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28082 {
28083 /* If the frame pointer was used then we can't delay emitting
28084 a REG_CFA_DEF_CFA note. This must happen on the insn that
28085 restores the frame pointer, r31. We may have already emitted
28086 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28087 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28088 be harmless if emitted. */
28089 if (frame_pointer_needed)
28090 {
28091 insn = get_last_insn ();
28092 add_reg_note (insn, REG_CFA_DEF_CFA,
28093 plus_constant (Pmode, frame_reg_rtx, frame_off));
28094 RTX_FRAME_RELATED_P (insn) = 1;
28095 }
28096
28097 /* Set up cfa_restores. We always need these when
28098 shrink-wrapping. If not shrink-wrapping then we only need
28099 the cfa_restore when the stack location is no longer valid.
28100 The cfa_restores must be emitted on or before the insn that
28101 invalidates the stack, and of course must not be emitted
28102 before the insn that actually does the restore. The latter
28103 is why it is a bad idea to emit the cfa_restores as a group
28104 on the last instruction here that actually does a restore:
28105 That insn may be reordered with respect to others doing
28106 restores. */
28107 if (flag_shrink_wrap
28108 && !restoring_GPRs_inline
28109 && info->first_fp_reg_save == 64)
28110 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28111
28112 for (i = info->first_gp_reg_save; i < 32; i++)
28113 if (save_reg_p (i)
28114 && !cfun->machine->gpr_is_wrapped_separately[i])
28115 {
28116 rtx reg = gen_rtx_REG (reg_mode, i);
28117 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28118 }
28119 }
28120
28121 if (!restoring_GPRs_inline
28122 && info->first_fp_reg_save == 64)
28123 {
28124 /* We are jumping to an out-of-line function. */
28125 if (cfa_restores)
28126 emit_cfa_restores (cfa_restores);
28127 return;
28128 }
28129
28130 if (restore_lr && !restoring_GPRs_inline)
28131 {
28132 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28133 restore_saved_lr (0, exit_func);
28134 }
28135
28136 /* Restore fpr's if we need to do it without calling a function. */
28137 if (restoring_FPRs_inline)
28138 {
28139 int offset = info->fp_save_offset + frame_off;
28140 for (i = info->first_fp_reg_save; i < 64; i++)
28141 {
28142 if (save_reg_p (i)
28143 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28144 {
28145 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28146 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28147 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28148 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28149 cfa_restores);
28150 }
28151
28152 offset += fp_reg_size;
28153 }
28154 }
28155
28156 /* If we saved cr, restore it here. Just those that were used. */
28157 if (info->cr_save_p)
28158 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28159
28160 /* If this is V.4, unwind the stack pointer after all of the loads
28161 have been done, or set up r11 if we are restoring fp out of line. */
28162 ptr_regno = 1;
28163 if (!restoring_FPRs_inline)
28164 {
28165 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28166 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28167 ptr_regno = ptr_regno_for_savres (sel);
28168 }
28169
28170 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28171 if (REGNO (frame_reg_rtx) == ptr_regno)
28172 frame_off = 0;
28173
28174 if (insn && restoring_FPRs_inline)
28175 {
28176 if (cfa_restores)
28177 {
28178 REG_NOTES (insn) = cfa_restores;
28179 cfa_restores = NULL_RTX;
28180 }
28181 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28182 RTX_FRAME_RELATED_P (insn) = 1;
28183 }
28184
28185 if (crtl->calls_eh_return)
28186 {
28187 rtx sa = EH_RETURN_STACKADJ_RTX;
28188 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28189 }
28190
28191 if (!sibcall && restoring_FPRs_inline)
28192 {
28193 if (cfa_restores)
28194 {
28195 /* We can't hang the cfa_restores off a simple return,
28196 since the shrink-wrap code sometimes uses an existing
28197 return. This means there might be a path from
28198 pre-prologue code to this return, and dwarf2cfi code
28199 wants the eh_frame unwinder state to be the same on
28200 all paths to any point. So we need to emit the
28201 cfa_restores before the return. For -m64 we really
28202 don't need epilogue cfa_restores at all, except for
28203 this irritating dwarf2cfi with shrink-wrap
28204 requirement; The stack red-zone means eh_frame info
28205 from the prologue telling the unwinder to restore
28206 from the stack is perfectly good right to the end of
28207 the function. */
28208 emit_insn (gen_blockage ());
28209 emit_cfa_restores (cfa_restores);
28210 cfa_restores = NULL_RTX;
28211 }
28212
28213 emit_jump_insn (targetm.gen_simple_return ());
28214 }
28215
28216 if (!sibcall && !restoring_FPRs_inline)
28217 {
28218 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28219 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28220 int elt = 0;
28221 RTVEC_ELT (p, elt++) = ret_rtx;
28222 if (lr)
28223 RTVEC_ELT (p, elt++)
28224 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28225
28226 /* We have to restore more than two FP registers, so branch to the
28227 restore function. It will return to our caller. */
28228 int i;
28229 int reg;
28230 rtx sym;
28231
28232 if (flag_shrink_wrap)
28233 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28234
28235 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28236 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28237 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28238 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28239
28240 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28241 {
28242 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28243
28244 RTVEC_ELT (p, elt++)
28245 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28246 if (flag_shrink_wrap
28247 && save_reg_p (info->first_fp_reg_save + i))
28248 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28249 }
28250
28251 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28252 }
28253
28254 if (cfa_restores)
28255 {
28256 if (sibcall)
28257 /* Ensure the cfa_restores are hung off an insn that won't
28258 be reordered above other restores. */
28259 emit_insn (gen_blockage ());
28260
28261 emit_cfa_restores (cfa_restores);
28262 }
28263 }
28264
28265 /* Write function epilogue. */
28266
28267 static void
28268 rs6000_output_function_epilogue (FILE *file)
28269 {
28270 #if TARGET_MACHO
28271 macho_branch_islands ();
28272
28273 {
28274 rtx_insn *insn = get_last_insn ();
28275 rtx_insn *deleted_debug_label = NULL;
28276
28277 /* Mach-O doesn't support labels at the end of objects, so if
28278 it looks like we might want one, take special action.
28279
28280 First, collect any sequence of deleted debug labels. */
28281 while (insn
28282 && NOTE_P (insn)
28283 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28284 {
28285 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28286 notes only, instead set their CODE_LABEL_NUMBER to -1,
28287 otherwise there would be code generation differences
28288 in between -g and -g0. */
28289 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28290 deleted_debug_label = insn;
28291 insn = PREV_INSN (insn);
28292 }
28293
28294 /* Second, if we have:
28295 label:
28296 barrier
28297 then this needs to be detected, so skip past the barrier. */
28298
28299 if (insn && BARRIER_P (insn))
28300 insn = PREV_INSN (insn);
28301
28302 /* Up to now we've only seen notes or barriers. */
28303 if (insn)
28304 {
28305 if (LABEL_P (insn)
28306 || (NOTE_P (insn)
28307 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28308 /* Trailing label: <barrier>. */
28309 fputs ("\tnop\n", file);
28310 else
28311 {
28312 /* Lastly, see if we have a completely empty function body. */
28313 while (insn && ! INSN_P (insn))
28314 insn = PREV_INSN (insn);
28315 /* If we don't find any insns, we've got an empty function body;
28316 I.e. completely empty - without a return or branch. This is
28317 taken as the case where a function body has been removed
28318 because it contains an inline __builtin_unreachable(). GCC
28319 states that reaching __builtin_unreachable() means UB so we're
28320 not obliged to do anything special; however, we want
28321 non-zero-sized function bodies. To meet this, and help the
28322 user out, let's trap the case. */
28323 if (insn == NULL)
28324 fputs ("\ttrap\n", file);
28325 }
28326 }
28327 else if (deleted_debug_label)
28328 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28329 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28330 CODE_LABEL_NUMBER (insn) = -1;
28331 }
28332 #endif
28333
28334 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28335 on its format.
28336
28337 We don't output a traceback table if -finhibit-size-directive was
28338 used. The documentation for -finhibit-size-directive reads
28339 ``don't output a @code{.size} assembler directive, or anything
28340 else that would cause trouble if the function is split in the
28341 middle, and the two halves are placed at locations far apart in
28342 memory.'' The traceback table has this property, since it
28343 includes the offset from the start of the function to the
28344 traceback table itself.
28345
28346 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28347 different traceback table. */
28348 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28349 && ! flag_inhibit_size_directive
28350 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28351 {
28352 const char *fname = NULL;
28353 const char *language_string = lang_hooks.name;
28354 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28355 int i;
28356 int optional_tbtab;
28357 rs6000_stack_t *info = rs6000_stack_info ();
28358
28359 if (rs6000_traceback == traceback_full)
28360 optional_tbtab = 1;
28361 else if (rs6000_traceback == traceback_part)
28362 optional_tbtab = 0;
28363 else
28364 optional_tbtab = !optimize_size && !TARGET_ELF;
28365
28366 if (optional_tbtab)
28367 {
28368 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28369 while (*fname == '.') /* V.4 encodes . in the name */
28370 fname++;
28371
28372 /* Need label immediately before tbtab, so we can compute
28373 its offset from the function start. */
28374 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28375 ASM_OUTPUT_LABEL (file, fname);
28376 }
28377
28378 /* The .tbtab pseudo-op can only be used for the first eight
28379 expressions, since it can't handle the possibly variable
28380 length fields that follow. However, if you omit the optional
28381 fields, the assembler outputs zeros for all optional fields
28382 anyways, giving each variable length field is minimum length
28383 (as defined in sys/debug.h). Thus we can not use the .tbtab
28384 pseudo-op at all. */
28385
28386 /* An all-zero word flags the start of the tbtab, for debuggers
28387 that have to find it by searching forward from the entry
28388 point or from the current pc. */
28389 fputs ("\t.long 0\n", file);
28390
28391 /* Tbtab format type. Use format type 0. */
28392 fputs ("\t.byte 0,", file);
28393
28394 /* Language type. Unfortunately, there does not seem to be any
28395 official way to discover the language being compiled, so we
28396 use language_string.
28397 C is 0. Fortran is 1. Ada is 3. C++ is 9.
28398 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28399 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28400 either, so for now use 0. */
28401 if (lang_GNU_C ()
28402 || ! strcmp (language_string, "GNU GIMPLE")
28403 || ! strcmp (language_string, "GNU Go")
28404 || ! strcmp (language_string, "libgccjit"))
28405 i = 0;
28406 else if (! strcmp (language_string, "GNU F77")
28407 || lang_GNU_Fortran ())
28408 i = 1;
28409 else if (! strcmp (language_string, "GNU Ada"))
28410 i = 3;
28411 else if (lang_GNU_CXX ()
28412 || ! strcmp (language_string, "GNU Objective-C++"))
28413 i = 9;
28414 else if (! strcmp (language_string, "GNU Java"))
28415 i = 13;
28416 else if (! strcmp (language_string, "GNU Objective-C"))
28417 i = 14;
28418 else
28419 gcc_unreachable ();
28420 fprintf (file, "%d,", i);
28421
28422 /* 8 single bit fields: global linkage (not set for C extern linkage,
28423 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28424 from start of procedure stored in tbtab, internal function, function
28425 has controlled storage, function has no toc, function uses fp,
28426 function logs/aborts fp operations. */
28427 /* Assume that fp operations are used if any fp reg must be saved. */
28428 fprintf (file, "%d,",
28429 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28430
28431 /* 6 bitfields: function is interrupt handler, name present in
28432 proc table, function calls alloca, on condition directives
28433 (controls stack walks, 3 bits), saves condition reg, saves
28434 link reg. */
28435 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28436 set up as a frame pointer, even when there is no alloca call. */
28437 fprintf (file, "%d,",
28438 ((optional_tbtab << 6)
28439 | ((optional_tbtab & frame_pointer_needed) << 5)
28440 | (info->cr_save_p << 1)
28441 | (info->lr_save_p)));
28442
28443 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28444 (6 bits). */
28445 fprintf (file, "%d,",
28446 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28447
28448 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28449 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28450
28451 if (optional_tbtab)
28452 {
28453 /* Compute the parameter info from the function decl argument
28454 list. */
28455 tree decl;
28456 int next_parm_info_bit = 31;
28457
28458 for (decl = DECL_ARGUMENTS (current_function_decl);
28459 decl; decl = DECL_CHAIN (decl))
28460 {
28461 rtx parameter = DECL_INCOMING_RTL (decl);
28462 machine_mode mode = GET_MODE (parameter);
28463
28464 if (GET_CODE (parameter) == REG)
28465 {
28466 if (SCALAR_FLOAT_MODE_P (mode))
28467 {
28468 int bits;
28469
28470 float_parms++;
28471
28472 switch (mode)
28473 {
28474 case E_SFmode:
28475 case E_SDmode:
28476 bits = 0x2;
28477 break;
28478
28479 case E_DFmode:
28480 case E_DDmode:
28481 case E_TFmode:
28482 case E_TDmode:
28483 case E_IFmode:
28484 case E_KFmode:
28485 bits = 0x3;
28486 break;
28487
28488 default:
28489 gcc_unreachable ();
28490 }
28491
28492 /* If only one bit will fit, don't or in this entry. */
28493 if (next_parm_info_bit > 0)
28494 parm_info |= (bits << (next_parm_info_bit - 1));
28495 next_parm_info_bit -= 2;
28496 }
28497 else
28498 {
28499 fixed_parms += ((GET_MODE_SIZE (mode)
28500 + (UNITS_PER_WORD - 1))
28501 / UNITS_PER_WORD);
28502 next_parm_info_bit -= 1;
28503 }
28504 }
28505 }
28506 }
28507
28508 /* Number of fixed point parameters. */
28509 /* This is actually the number of words of fixed point parameters; thus
28510 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28511 fprintf (file, "%d,", fixed_parms);
28512
28513 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28514 all on stack. */
28515 /* This is actually the number of fp registers that hold parameters;
28516 and thus the maximum value is 13. */
28517 /* Set parameters on stack bit if parameters are not in their original
28518 registers, regardless of whether they are on the stack? Xlc
28519 seems to set the bit when not optimizing. */
28520 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28521
28522 if (optional_tbtab)
28523 {
28524 /* Optional fields follow. Some are variable length. */
28525
28526 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28527 float, 11 double float. */
28528 /* There is an entry for each parameter in a register, in the order
28529 that they occur in the parameter list. Any intervening arguments
28530 on the stack are ignored. If the list overflows a long (max
28531 possible length 34 bits) then completely leave off all elements
28532 that don't fit. */
28533 /* Only emit this long if there was at least one parameter. */
28534 if (fixed_parms || float_parms)
28535 fprintf (file, "\t.long %d\n", parm_info);
28536
28537 /* Offset from start of code to tb table. */
28538 fputs ("\t.long ", file);
28539 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28540 RS6000_OUTPUT_BASENAME (file, fname);
28541 putc ('-', file);
28542 rs6000_output_function_entry (file, fname);
28543 putc ('\n', file);
28544
28545 /* Interrupt handler mask. */
28546 /* Omit this long, since we never set the interrupt handler bit
28547 above. */
28548
28549 /* Number of CTL (controlled storage) anchors. */
28550 /* Omit this long, since the has_ctl bit is never set above. */
28551
28552 /* Displacement into stack of each CTL anchor. */
28553 /* Omit this list of longs, because there are no CTL anchors. */
28554
28555 /* Length of function name. */
28556 if (*fname == '*')
28557 ++fname;
28558 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28559
28560 /* Function name. */
28561 assemble_string (fname, strlen (fname));
28562
28563 /* Register for alloca automatic storage; this is always reg 31.
28564 Only emit this if the alloca bit was set above. */
28565 if (frame_pointer_needed)
28566 fputs ("\t.byte 31\n", file);
28567
28568 fputs ("\t.align 2\n", file);
28569 }
28570 }
28571
28572 /* Arrange to define .LCTOC1 label, if not already done. */
28573 if (need_toc_init)
28574 {
28575 need_toc_init = 0;
28576 if (!toc_initialized)
28577 {
28578 switch_to_section (toc_section);
28579 switch_to_section (current_function_section ());
28580 }
28581 }
28582 }
28583
28584 /* -fsplit-stack support. */
28585
28586 /* A SYMBOL_REF for __morestack. */
28587 static GTY(()) rtx morestack_ref;
28588
28589 static rtx
28590 gen_add3_const (rtx rt, rtx ra, long c)
28591 {
28592 if (TARGET_64BIT)
28593 return gen_adddi3 (rt, ra, GEN_INT (c));
28594 else
28595 return gen_addsi3 (rt, ra, GEN_INT (c));
28596 }
28597
28598 /* Emit -fsplit-stack prologue, which goes before the regular function
28599 prologue (at local entry point in the case of ELFv2). */
28600
28601 void
28602 rs6000_expand_split_stack_prologue (void)
28603 {
28604 rs6000_stack_t *info = rs6000_stack_info ();
28605 unsigned HOST_WIDE_INT allocate;
28606 long alloc_hi, alloc_lo;
28607 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28608 rtx_insn *insn;
28609
28610 gcc_assert (flag_split_stack && reload_completed);
28611
28612 if (!info->push_p)
28613 return;
28614
28615 if (global_regs[29])
28616 {
28617 error ("%qs uses register r29", "-fsplit-stack");
28618 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
28619 "conflicts with %qD", global_regs_decl[29]);
28620 }
28621
28622 allocate = info->total_size;
28623 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
28624 {
28625 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28626 return;
28627 }
28628 if (morestack_ref == NULL_RTX)
28629 {
28630 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
28631 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
28632 | SYMBOL_FLAG_FUNCTION);
28633 }
28634
28635 r0 = gen_rtx_REG (Pmode, 0);
28636 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28637 r12 = gen_rtx_REG (Pmode, 12);
28638 emit_insn (gen_load_split_stack_limit (r0));
28639 /* Always emit two insns here to calculate the requested stack,
28640 so that the linker can edit them when adjusting size for calling
28641 non-split-stack code. */
28642 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
28643 alloc_lo = -allocate - alloc_hi;
28644 if (alloc_hi != 0)
28645 {
28646 emit_insn (gen_add3_const (r12, r1, alloc_hi));
28647 if (alloc_lo != 0)
28648 emit_insn (gen_add3_const (r12, r12, alloc_lo));
28649 else
28650 emit_insn (gen_nop ());
28651 }
28652 else
28653 {
28654 emit_insn (gen_add3_const (r12, r1, alloc_lo));
28655 emit_insn (gen_nop ());
28656 }
28657
28658 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
28659 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
28660 ok_label = gen_label_rtx ();
28661 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28662 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
28663 gen_rtx_LABEL_REF (VOIDmode, ok_label),
28664 pc_rtx);
28665 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28666 JUMP_LABEL (insn) = ok_label;
28667 /* Mark the jump as very likely to be taken. */
28668 add_reg_br_prob_note (insn, profile_probability::very_likely ());
28669
28670 lr = gen_rtx_REG (Pmode, LR_REGNO);
28671 insn = emit_move_insn (r0, lr);
28672 RTX_FRAME_RELATED_P (insn) = 1;
28673 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
28674 RTX_FRAME_RELATED_P (insn) = 1;
28675
28676 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
28677 const0_rtx, const0_rtx));
28678 call_fusage = NULL_RTX;
28679 use_reg (&call_fusage, r12);
28680 /* Say the call uses r0, even though it doesn't, to stop regrename
28681 from twiddling with the insns saving lr, trashing args for cfun.
28682 The insns restoring lr are similarly protected by making
28683 split_stack_return use r0. */
28684 use_reg (&call_fusage, r0);
28685 add_function_usage_to (insn, call_fusage);
28686 /* Indicate that this function can't jump to non-local gotos. */
28687 make_reg_eh_region_note_nothrow_nononlocal (insn);
28688 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
28689 insn = emit_move_insn (lr, r0);
28690 add_reg_note (insn, REG_CFA_RESTORE, lr);
28691 RTX_FRAME_RELATED_P (insn) = 1;
28692 emit_insn (gen_split_stack_return ());
28693
28694 emit_label (ok_label);
28695 LABEL_NUSES (ok_label) = 1;
28696 }
28697
28698 /* Return the internal arg pointer used for function incoming
28699 arguments. When -fsplit-stack, the arg pointer is r12 so we need
28700 to copy it to a pseudo in order for it to be preserved over calls
28701 and suchlike. We'd really like to use a pseudo here for the
28702 internal arg pointer but data-flow analysis is not prepared to
28703 accept pseudos as live at the beginning of a function. */
28704
28705 static rtx
28706 rs6000_internal_arg_pointer (void)
28707 {
28708 if (flag_split_stack
28709 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
28710 == NULL))
28711
28712 {
28713 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
28714 {
28715 rtx pat;
28716
28717 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
28718 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
28719
28720 /* Put the pseudo initialization right after the note at the
28721 beginning of the function. */
28722 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
28723 gen_rtx_REG (Pmode, 12));
28724 push_topmost_sequence ();
28725 emit_insn_after (pat, get_insns ());
28726 pop_topmost_sequence ();
28727 }
28728 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
28729 FIRST_PARM_OFFSET (current_function_decl));
28730 return copy_to_reg (ret);
28731 }
28732 return virtual_incoming_args_rtx;
28733 }
28734
28735 /* We may have to tell the dataflow pass that the split stack prologue
28736 is initializing a register. */
28737
28738 static void
28739 rs6000_live_on_entry (bitmap regs)
28740 {
28741 if (flag_split_stack)
28742 bitmap_set_bit (regs, 12);
28743 }
28744
28745 /* Emit -fsplit-stack dynamic stack allocation space check. */
28746
28747 void
28748 rs6000_split_stack_space_check (rtx size, rtx label)
28749 {
28750 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28751 rtx limit = gen_reg_rtx (Pmode);
28752 rtx requested = gen_reg_rtx (Pmode);
28753 rtx cmp = gen_reg_rtx (CCUNSmode);
28754 rtx jump;
28755
28756 emit_insn (gen_load_split_stack_limit (limit));
28757 if (CONST_INT_P (size))
28758 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
28759 else
28760 {
28761 size = force_reg (Pmode, size);
28762 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
28763 }
28764 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
28765 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28766 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
28767 gen_rtx_LABEL_REF (VOIDmode, label),
28768 pc_rtx);
28769 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28770 JUMP_LABEL (jump) = label;
28771 }
28772 \f
28773 /* A C compound statement that outputs the assembler code for a thunk
28774 function, used to implement C++ virtual function calls with
28775 multiple inheritance. The thunk acts as a wrapper around a virtual
28776 function, adjusting the implicit object parameter before handing
28777 control off to the real function.
28778
28779 First, emit code to add the integer DELTA to the location that
28780 contains the incoming first argument. Assume that this argument
28781 contains a pointer, and is the one used to pass the `this' pointer
28782 in C++. This is the incoming argument *before* the function
28783 prologue, e.g. `%o0' on a sparc. The addition must preserve the
28784 values of all other incoming arguments.
28785
28786 After the addition, emit code to jump to FUNCTION, which is a
28787 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
28788 not touch the return address. Hence returning from FUNCTION will
28789 return to whoever called the current `thunk'.
28790
28791 The effect must be as if FUNCTION had been called directly with the
28792 adjusted first argument. This macro is responsible for emitting
28793 all of the code for a thunk function; output_function_prologue()
28794 and output_function_epilogue() are not invoked.
28795
28796 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
28797 been extracted from it.) It might possibly be useful on some
28798 targets, but probably not.
28799
28800 If you do not define this macro, the target-independent code in the
28801 C++ frontend will generate a less efficient heavyweight thunk that
28802 calls FUNCTION instead of jumping to it. The generic approach does
28803 not support varargs. */
28804
28805 static void
28806 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
28807 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
28808 tree function)
28809 {
28810 rtx this_rtx, funexp;
28811 rtx_insn *insn;
28812
28813 reload_completed = 1;
28814 epilogue_completed = 1;
28815
28816 /* Mark the end of the (empty) prologue. */
28817 emit_note (NOTE_INSN_PROLOGUE_END);
28818
28819 /* Find the "this" pointer. If the function returns a structure,
28820 the structure return pointer is in r3. */
28821 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
28822 this_rtx = gen_rtx_REG (Pmode, 4);
28823 else
28824 this_rtx = gen_rtx_REG (Pmode, 3);
28825
28826 /* Apply the constant offset, if required. */
28827 if (delta)
28828 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
28829
28830 /* Apply the offset from the vtable, if required. */
28831 if (vcall_offset)
28832 {
28833 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
28834 rtx tmp = gen_rtx_REG (Pmode, 12);
28835
28836 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
28837 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
28838 {
28839 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
28840 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
28841 }
28842 else
28843 {
28844 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
28845
28846 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
28847 }
28848 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
28849 }
28850
28851 /* Generate a tail call to the target function. */
28852 if (!TREE_USED (function))
28853 {
28854 assemble_external (function);
28855 TREE_USED (function) = 1;
28856 }
28857 funexp = XEXP (DECL_RTL (function), 0);
28858 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
28859
28860 #if TARGET_MACHO
28861 if (MACHOPIC_INDIRECT)
28862 funexp = machopic_indirect_call_target (funexp);
28863 #endif
28864
28865 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
28866 generate sibcall RTL explicitly. */
28867 insn = emit_call_insn (
28868 gen_rtx_PARALLEL (VOIDmode,
28869 gen_rtvec (3,
28870 gen_rtx_CALL (VOIDmode,
28871 funexp, const0_rtx),
28872 gen_rtx_USE (VOIDmode, const0_rtx),
28873 simple_return_rtx)));
28874 SIBLING_CALL_P (insn) = 1;
28875 emit_barrier ();
28876
28877 /* Run just enough of rest_of_compilation to get the insns emitted.
28878 There's not really enough bulk here to make other passes such as
28879 instruction scheduling worth while. Note that use_thunk calls
28880 assemble_start_function and assemble_end_function. */
28881 insn = get_insns ();
28882 shorten_branches (insn);
28883 final_start_function (insn, file, 1);
28884 final (insn, file, 1);
28885 final_end_function ();
28886
28887 reload_completed = 0;
28888 epilogue_completed = 0;
28889 }
28890 \f
28891 /* A quick summary of the various types of 'constant-pool tables'
28892 under PowerPC:
28893
28894 Target Flags Name One table per
28895 AIX (none) AIX TOC object file
28896 AIX -mfull-toc AIX TOC object file
28897 AIX -mminimal-toc AIX minimal TOC translation unit
28898 SVR4/EABI (none) SVR4 SDATA object file
28899 SVR4/EABI -fpic SVR4 pic object file
28900 SVR4/EABI -fPIC SVR4 PIC translation unit
28901 SVR4/EABI -mrelocatable EABI TOC function
28902 SVR4/EABI -maix AIX TOC object file
28903 SVR4/EABI -maix -mminimal-toc
28904 AIX minimal TOC translation unit
28905
28906 Name Reg. Set by entries contains:
28907 made by addrs? fp? sum?
28908
28909 AIX TOC 2 crt0 as Y option option
28910 AIX minimal TOC 30 prolog gcc Y Y option
28911 SVR4 SDATA 13 crt0 gcc N Y N
28912 SVR4 pic 30 prolog ld Y not yet N
28913 SVR4 PIC 30 prolog gcc Y option option
28914 EABI TOC 30 prolog gcc Y option option
28915
28916 */
28917
28918 /* Hash functions for the hash table. */
28919
28920 static unsigned
28921 rs6000_hash_constant (rtx k)
28922 {
28923 enum rtx_code code = GET_CODE (k);
28924 machine_mode mode = GET_MODE (k);
28925 unsigned result = (code << 3) ^ mode;
28926 const char *format;
28927 int flen, fidx;
28928
28929 format = GET_RTX_FORMAT (code);
28930 flen = strlen (format);
28931 fidx = 0;
28932
28933 switch (code)
28934 {
28935 case LABEL_REF:
28936 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
28937
28938 case CONST_WIDE_INT:
28939 {
28940 int i;
28941 flen = CONST_WIDE_INT_NUNITS (k);
28942 for (i = 0; i < flen; i++)
28943 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
28944 return result;
28945 }
28946
28947 case CONST_DOUBLE:
28948 if (mode != VOIDmode)
28949 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
28950 flen = 2;
28951 break;
28952
28953 case CODE_LABEL:
28954 fidx = 3;
28955 break;
28956
28957 default:
28958 break;
28959 }
28960
28961 for (; fidx < flen; fidx++)
28962 switch (format[fidx])
28963 {
28964 case 's':
28965 {
28966 unsigned i, len;
28967 const char *str = XSTR (k, fidx);
28968 len = strlen (str);
28969 result = result * 613 + len;
28970 for (i = 0; i < len; i++)
28971 result = result * 613 + (unsigned) str[i];
28972 break;
28973 }
28974 case 'u':
28975 case 'e':
28976 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
28977 break;
28978 case 'i':
28979 case 'n':
28980 result = result * 613 + (unsigned) XINT (k, fidx);
28981 break;
28982 case 'w':
28983 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
28984 result = result * 613 + (unsigned) XWINT (k, fidx);
28985 else
28986 {
28987 size_t i;
28988 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
28989 result = result * 613 + (unsigned) (XWINT (k, fidx)
28990 >> CHAR_BIT * i);
28991 }
28992 break;
28993 case '0':
28994 break;
28995 default:
28996 gcc_unreachable ();
28997 }
28998
28999 return result;
29000 }
29001
29002 hashval_t
29003 toc_hasher::hash (toc_hash_struct *thc)
29004 {
29005 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29006 }
29007
29008 /* Compare H1 and H2 for equivalence. */
29009
29010 bool
29011 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29012 {
29013 rtx r1 = h1->key;
29014 rtx r2 = h2->key;
29015
29016 if (h1->key_mode != h2->key_mode)
29017 return 0;
29018
29019 return rtx_equal_p (r1, r2);
29020 }
29021
29022 /* These are the names given by the C++ front-end to vtables, and
29023 vtable-like objects. Ideally, this logic should not be here;
29024 instead, there should be some programmatic way of inquiring as
29025 to whether or not an object is a vtable. */
29026
29027 #define VTABLE_NAME_P(NAME) \
29028 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29029 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29030 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29031 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29032 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29033
29034 #ifdef NO_DOLLAR_IN_LABEL
29035 /* Return a GGC-allocated character string translating dollar signs in
29036 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29037
29038 const char *
29039 rs6000_xcoff_strip_dollar (const char *name)
29040 {
29041 char *strip, *p;
29042 const char *q;
29043 size_t len;
29044
29045 q = (const char *) strchr (name, '$');
29046
29047 if (q == 0 || q == name)
29048 return name;
29049
29050 len = strlen (name);
29051 strip = XALLOCAVEC (char, len + 1);
29052 strcpy (strip, name);
29053 p = strip + (q - name);
29054 while (p)
29055 {
29056 *p = '_';
29057 p = strchr (p + 1, '$');
29058 }
29059
29060 return ggc_alloc_string (strip, len);
29061 }
29062 #endif
29063
29064 void
29065 rs6000_output_symbol_ref (FILE *file, rtx x)
29066 {
29067 const char *name = XSTR (x, 0);
29068
29069 /* Currently C++ toc references to vtables can be emitted before it
29070 is decided whether the vtable is public or private. If this is
29071 the case, then the linker will eventually complain that there is
29072 a reference to an unknown section. Thus, for vtables only,
29073 we emit the TOC reference to reference the identifier and not the
29074 symbol. */
29075 if (VTABLE_NAME_P (name))
29076 {
29077 RS6000_OUTPUT_BASENAME (file, name);
29078 }
29079 else
29080 assemble_name (file, name);
29081 }
29082
29083 /* Output a TOC entry. We derive the entry name from what is being
29084 written. */
29085
29086 void
29087 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29088 {
29089 char buf[256];
29090 const char *name = buf;
29091 rtx base = x;
29092 HOST_WIDE_INT offset = 0;
29093
29094 gcc_assert (!TARGET_NO_TOC);
29095
29096 /* When the linker won't eliminate them, don't output duplicate
29097 TOC entries (this happens on AIX if there is any kind of TOC,
29098 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29099 CODE_LABELs. */
29100 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29101 {
29102 struct toc_hash_struct *h;
29103
29104 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29105 time because GGC is not initialized at that point. */
29106 if (toc_hash_table == NULL)
29107 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29108
29109 h = ggc_alloc<toc_hash_struct> ();
29110 h->key = x;
29111 h->key_mode = mode;
29112 h->labelno = labelno;
29113
29114 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29115 if (*found == NULL)
29116 *found = h;
29117 else /* This is indeed a duplicate.
29118 Set this label equal to that label. */
29119 {
29120 fputs ("\t.set ", file);
29121 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29122 fprintf (file, "%d,", labelno);
29123 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29124 fprintf (file, "%d\n", ((*found)->labelno));
29125
29126 #ifdef HAVE_AS_TLS
29127 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29128 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29129 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29130 {
29131 fputs ("\t.set ", file);
29132 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29133 fprintf (file, "%d,", labelno);
29134 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29135 fprintf (file, "%d\n", ((*found)->labelno));
29136 }
29137 #endif
29138 return;
29139 }
29140 }
29141
29142 /* If we're going to put a double constant in the TOC, make sure it's
29143 aligned properly when strict alignment is on. */
29144 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29145 && STRICT_ALIGNMENT
29146 && GET_MODE_BITSIZE (mode) >= 64
29147 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29148 ASM_OUTPUT_ALIGN (file, 3);
29149 }
29150
29151 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29152
29153 /* Handle FP constants specially. Note that if we have a minimal
29154 TOC, things we put here aren't actually in the TOC, so we can allow
29155 FP constants. */
29156 if (GET_CODE (x) == CONST_DOUBLE &&
29157 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29158 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29159 {
29160 long k[4];
29161
29162 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29163 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29164 else
29165 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29166
29167 if (TARGET_64BIT)
29168 {
29169 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29170 fputs (DOUBLE_INT_ASM_OP, file);
29171 else
29172 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29173 k[0] & 0xffffffff, k[1] & 0xffffffff,
29174 k[2] & 0xffffffff, k[3] & 0xffffffff);
29175 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29176 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29177 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29178 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29179 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29180 return;
29181 }
29182 else
29183 {
29184 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29185 fputs ("\t.long ", file);
29186 else
29187 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29188 k[0] & 0xffffffff, k[1] & 0xffffffff,
29189 k[2] & 0xffffffff, k[3] & 0xffffffff);
29190 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29191 k[0] & 0xffffffff, k[1] & 0xffffffff,
29192 k[2] & 0xffffffff, k[3] & 0xffffffff);
29193 return;
29194 }
29195 }
29196 else if (GET_CODE (x) == CONST_DOUBLE &&
29197 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29198 {
29199 long k[2];
29200
29201 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29202 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29203 else
29204 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29205
29206 if (TARGET_64BIT)
29207 {
29208 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29209 fputs (DOUBLE_INT_ASM_OP, file);
29210 else
29211 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29212 k[0] & 0xffffffff, k[1] & 0xffffffff);
29213 fprintf (file, "0x%lx%08lx\n",
29214 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29215 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29216 return;
29217 }
29218 else
29219 {
29220 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29221 fputs ("\t.long ", file);
29222 else
29223 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29224 k[0] & 0xffffffff, k[1] & 0xffffffff);
29225 fprintf (file, "0x%lx,0x%lx\n",
29226 k[0] & 0xffffffff, k[1] & 0xffffffff);
29227 return;
29228 }
29229 }
29230 else if (GET_CODE (x) == CONST_DOUBLE &&
29231 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29232 {
29233 long l;
29234
29235 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29236 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29237 else
29238 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29239
29240 if (TARGET_64BIT)
29241 {
29242 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29243 fputs (DOUBLE_INT_ASM_OP, file);
29244 else
29245 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29246 if (WORDS_BIG_ENDIAN)
29247 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29248 else
29249 fprintf (file, "0x%lx\n", l & 0xffffffff);
29250 return;
29251 }
29252 else
29253 {
29254 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29255 fputs ("\t.long ", file);
29256 else
29257 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29258 fprintf (file, "0x%lx\n", l & 0xffffffff);
29259 return;
29260 }
29261 }
29262 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29263 {
29264 unsigned HOST_WIDE_INT low;
29265 HOST_WIDE_INT high;
29266
29267 low = INTVAL (x) & 0xffffffff;
29268 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29269
29270 /* TOC entries are always Pmode-sized, so when big-endian
29271 smaller integer constants in the TOC need to be padded.
29272 (This is still a win over putting the constants in
29273 a separate constant pool, because then we'd have
29274 to have both a TOC entry _and_ the actual constant.)
29275
29276 For a 32-bit target, CONST_INT values are loaded and shifted
29277 entirely within `low' and can be stored in one TOC entry. */
29278
29279 /* It would be easy to make this work, but it doesn't now. */
29280 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29281
29282 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29283 {
29284 low |= high << 32;
29285 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29286 high = (HOST_WIDE_INT) low >> 32;
29287 low &= 0xffffffff;
29288 }
29289
29290 if (TARGET_64BIT)
29291 {
29292 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29293 fputs (DOUBLE_INT_ASM_OP, file);
29294 else
29295 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29296 (long) high & 0xffffffff, (long) low & 0xffffffff);
29297 fprintf (file, "0x%lx%08lx\n",
29298 (long) high & 0xffffffff, (long) low & 0xffffffff);
29299 return;
29300 }
29301 else
29302 {
29303 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29304 {
29305 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29306 fputs ("\t.long ", file);
29307 else
29308 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29309 (long) high & 0xffffffff, (long) low & 0xffffffff);
29310 fprintf (file, "0x%lx,0x%lx\n",
29311 (long) high & 0xffffffff, (long) low & 0xffffffff);
29312 }
29313 else
29314 {
29315 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29316 fputs ("\t.long ", file);
29317 else
29318 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29319 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29320 }
29321 return;
29322 }
29323 }
29324
29325 if (GET_CODE (x) == CONST)
29326 {
29327 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29328 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29329
29330 base = XEXP (XEXP (x, 0), 0);
29331 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29332 }
29333
29334 switch (GET_CODE (base))
29335 {
29336 case SYMBOL_REF:
29337 name = XSTR (base, 0);
29338 break;
29339
29340 case LABEL_REF:
29341 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29342 CODE_LABEL_NUMBER (XEXP (base, 0)));
29343 break;
29344
29345 case CODE_LABEL:
29346 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29347 break;
29348
29349 default:
29350 gcc_unreachable ();
29351 }
29352
29353 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29354 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29355 else
29356 {
29357 fputs ("\t.tc ", file);
29358 RS6000_OUTPUT_BASENAME (file, name);
29359
29360 if (offset < 0)
29361 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29362 else if (offset)
29363 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29364
29365 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29366 after other TOC symbols, reducing overflow of small TOC access
29367 to [TC] symbols. */
29368 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29369 ? "[TE]," : "[TC],", file);
29370 }
29371
29372 /* Currently C++ toc references to vtables can be emitted before it
29373 is decided whether the vtable is public or private. If this is
29374 the case, then the linker will eventually complain that there is
29375 a TOC reference to an unknown section. Thus, for vtables only,
29376 we emit the TOC reference to reference the symbol and not the
29377 section. */
29378 if (VTABLE_NAME_P (name))
29379 {
29380 RS6000_OUTPUT_BASENAME (file, name);
29381 if (offset < 0)
29382 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29383 else if (offset > 0)
29384 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29385 }
29386 else
29387 output_addr_const (file, x);
29388
29389 #if HAVE_AS_TLS
29390 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29391 {
29392 switch (SYMBOL_REF_TLS_MODEL (base))
29393 {
29394 case 0:
29395 break;
29396 case TLS_MODEL_LOCAL_EXEC:
29397 fputs ("@le", file);
29398 break;
29399 case TLS_MODEL_INITIAL_EXEC:
29400 fputs ("@ie", file);
29401 break;
29402 /* Use global-dynamic for local-dynamic. */
29403 case TLS_MODEL_GLOBAL_DYNAMIC:
29404 case TLS_MODEL_LOCAL_DYNAMIC:
29405 putc ('\n', file);
29406 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29407 fputs ("\t.tc .", file);
29408 RS6000_OUTPUT_BASENAME (file, name);
29409 fputs ("[TC],", file);
29410 output_addr_const (file, x);
29411 fputs ("@m", file);
29412 break;
29413 default:
29414 gcc_unreachable ();
29415 }
29416 }
29417 #endif
29418
29419 putc ('\n', file);
29420 }
29421 \f
29422 /* Output an assembler pseudo-op to write an ASCII string of N characters
29423 starting at P to FILE.
29424
29425 On the RS/6000, we have to do this using the .byte operation and
29426 write out special characters outside the quoted string.
29427 Also, the assembler is broken; very long strings are truncated,
29428 so we must artificially break them up early. */
29429
29430 void
29431 output_ascii (FILE *file, const char *p, int n)
29432 {
29433 char c;
29434 int i, count_string;
29435 const char *for_string = "\t.byte \"";
29436 const char *for_decimal = "\t.byte ";
29437 const char *to_close = NULL;
29438
29439 count_string = 0;
29440 for (i = 0; i < n; i++)
29441 {
29442 c = *p++;
29443 if (c >= ' ' && c < 0177)
29444 {
29445 if (for_string)
29446 fputs (for_string, file);
29447 putc (c, file);
29448
29449 /* Write two quotes to get one. */
29450 if (c == '"')
29451 {
29452 putc (c, file);
29453 ++count_string;
29454 }
29455
29456 for_string = NULL;
29457 for_decimal = "\"\n\t.byte ";
29458 to_close = "\"\n";
29459 ++count_string;
29460
29461 if (count_string >= 512)
29462 {
29463 fputs (to_close, file);
29464
29465 for_string = "\t.byte \"";
29466 for_decimal = "\t.byte ";
29467 to_close = NULL;
29468 count_string = 0;
29469 }
29470 }
29471 else
29472 {
29473 if (for_decimal)
29474 fputs (for_decimal, file);
29475 fprintf (file, "%d", c);
29476
29477 for_string = "\n\t.byte \"";
29478 for_decimal = ", ";
29479 to_close = "\n";
29480 count_string = 0;
29481 }
29482 }
29483
29484 /* Now close the string if we have written one. Then end the line. */
29485 if (to_close)
29486 fputs (to_close, file);
29487 }
29488 \f
29489 /* Generate a unique section name for FILENAME for a section type
29490 represented by SECTION_DESC. Output goes into BUF.
29491
29492 SECTION_DESC can be any string, as long as it is different for each
29493 possible section type.
29494
29495 We name the section in the same manner as xlc. The name begins with an
29496 underscore followed by the filename (after stripping any leading directory
29497 names) with the last period replaced by the string SECTION_DESC. If
29498 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29499 the name. */
29500
29501 void
29502 rs6000_gen_section_name (char **buf, const char *filename,
29503 const char *section_desc)
29504 {
29505 const char *q, *after_last_slash, *last_period = 0;
29506 char *p;
29507 int len;
29508
29509 after_last_slash = filename;
29510 for (q = filename; *q; q++)
29511 {
29512 if (*q == '/')
29513 after_last_slash = q + 1;
29514 else if (*q == '.')
29515 last_period = q;
29516 }
29517
29518 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29519 *buf = (char *) xmalloc (len);
29520
29521 p = *buf;
29522 *p++ = '_';
29523
29524 for (q = after_last_slash; *q; q++)
29525 {
29526 if (q == last_period)
29527 {
29528 strcpy (p, section_desc);
29529 p += strlen (section_desc);
29530 break;
29531 }
29532
29533 else if (ISALNUM (*q))
29534 *p++ = *q;
29535 }
29536
29537 if (last_period == 0)
29538 strcpy (p, section_desc);
29539 else
29540 *p = '\0';
29541 }
29542 \f
29543 /* Emit profile function. */
29544
29545 void
29546 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29547 {
29548 /* Non-standard profiling for kernels, which just saves LR then calls
29549 _mcount without worrying about arg saves. The idea is to change
29550 the function prologue as little as possible as it isn't easy to
29551 account for arg save/restore code added just for _mcount. */
29552 if (TARGET_PROFILE_KERNEL)
29553 return;
29554
29555 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29556 {
29557 #ifndef NO_PROFILE_COUNTERS
29558 # define NO_PROFILE_COUNTERS 0
29559 #endif
29560 if (NO_PROFILE_COUNTERS)
29561 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29562 LCT_NORMAL, VOIDmode);
29563 else
29564 {
29565 char buf[30];
29566 const char *label_name;
29567 rtx fun;
29568
29569 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29570 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29571 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29572
29573 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29574 LCT_NORMAL, VOIDmode, fun, Pmode);
29575 }
29576 }
29577 else if (DEFAULT_ABI == ABI_DARWIN)
29578 {
29579 const char *mcount_name = RS6000_MCOUNT;
29580 int caller_addr_regno = LR_REGNO;
29581
29582 /* Be conservative and always set this, at least for now. */
29583 crtl->uses_pic_offset_table = 1;
29584
29585 #if TARGET_MACHO
29586 /* For PIC code, set up a stub and collect the caller's address
29587 from r0, which is where the prologue puts it. */
29588 if (MACHOPIC_INDIRECT
29589 && crtl->uses_pic_offset_table)
29590 caller_addr_regno = 0;
29591 #endif
29592 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29593 LCT_NORMAL, VOIDmode,
29594 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29595 }
29596 }
29597
29598 /* Write function profiler code. */
29599
29600 void
29601 output_function_profiler (FILE *file, int labelno)
29602 {
29603 char buf[100];
29604
29605 switch (DEFAULT_ABI)
29606 {
29607 default:
29608 gcc_unreachable ();
29609
29610 case ABI_V4:
29611 if (!TARGET_32BIT)
29612 {
29613 warning (0, "no profiling of 64-bit code for this ABI");
29614 return;
29615 }
29616 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29617 fprintf (file, "\tmflr %s\n", reg_names[0]);
29618 if (NO_PROFILE_COUNTERS)
29619 {
29620 asm_fprintf (file, "\tstw %s,4(%s)\n",
29621 reg_names[0], reg_names[1]);
29622 }
29623 else if (TARGET_SECURE_PLT && flag_pic)
29624 {
29625 if (TARGET_LINK_STACK)
29626 {
29627 char name[32];
29628 get_ppc476_thunk_name (name);
29629 asm_fprintf (file, "\tbl %s\n", name);
29630 }
29631 else
29632 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
29633 asm_fprintf (file, "\tstw %s,4(%s)\n",
29634 reg_names[0], reg_names[1]);
29635 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29636 asm_fprintf (file, "\taddis %s,%s,",
29637 reg_names[12], reg_names[12]);
29638 assemble_name (file, buf);
29639 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
29640 assemble_name (file, buf);
29641 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
29642 }
29643 else if (flag_pic == 1)
29644 {
29645 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
29646 asm_fprintf (file, "\tstw %s,4(%s)\n",
29647 reg_names[0], reg_names[1]);
29648 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29649 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
29650 assemble_name (file, buf);
29651 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
29652 }
29653 else if (flag_pic > 1)
29654 {
29655 asm_fprintf (file, "\tstw %s,4(%s)\n",
29656 reg_names[0], reg_names[1]);
29657 /* Now, we need to get the address of the label. */
29658 if (TARGET_LINK_STACK)
29659 {
29660 char name[32];
29661 get_ppc476_thunk_name (name);
29662 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
29663 assemble_name (file, buf);
29664 fputs ("-.\n1:", file);
29665 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29666 asm_fprintf (file, "\taddi %s,%s,4\n",
29667 reg_names[11], reg_names[11]);
29668 }
29669 else
29670 {
29671 fputs ("\tbcl 20,31,1f\n\t.long ", file);
29672 assemble_name (file, buf);
29673 fputs ("-.\n1:", file);
29674 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29675 }
29676 asm_fprintf (file, "\tlwz %s,0(%s)\n",
29677 reg_names[0], reg_names[11]);
29678 asm_fprintf (file, "\tadd %s,%s,%s\n",
29679 reg_names[0], reg_names[0], reg_names[11]);
29680 }
29681 else
29682 {
29683 asm_fprintf (file, "\tlis %s,", reg_names[12]);
29684 assemble_name (file, buf);
29685 fputs ("@ha\n", file);
29686 asm_fprintf (file, "\tstw %s,4(%s)\n",
29687 reg_names[0], reg_names[1]);
29688 asm_fprintf (file, "\tla %s,", reg_names[0]);
29689 assemble_name (file, buf);
29690 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
29691 }
29692
29693 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
29694 fprintf (file, "\tbl %s%s\n",
29695 RS6000_MCOUNT, flag_pic ? "@plt" : "");
29696 break;
29697
29698 case ABI_AIX:
29699 case ABI_ELFv2:
29700 case ABI_DARWIN:
29701 /* Don't do anything, done in output_profile_hook (). */
29702 break;
29703 }
29704 }
29705
29706 \f
29707
29708 /* The following variable value is the last issued insn. */
29709
29710 static rtx_insn *last_scheduled_insn;
29711
29712 /* The following variable helps to balance issuing of load and
29713 store instructions */
29714
29715 static int load_store_pendulum;
29716
29717 /* The following variable helps pair divide insns during scheduling. */
29718 static int divide_cnt;
29719 /* The following variable helps pair and alternate vector and vector load
29720 insns during scheduling. */
29721 static int vec_pairing;
29722
29723
29724 /* Power4 load update and store update instructions are cracked into a
29725 load or store and an integer insn which are executed in the same cycle.
29726 Branches have their own dispatch slot which does not count against the
29727 GCC issue rate, but it changes the program flow so there are no other
29728 instructions to issue in this cycle. */
29729
29730 static int
29731 rs6000_variable_issue_1 (rtx_insn *insn, int more)
29732 {
29733 last_scheduled_insn = insn;
29734 if (GET_CODE (PATTERN (insn)) == USE
29735 || GET_CODE (PATTERN (insn)) == CLOBBER)
29736 {
29737 cached_can_issue_more = more;
29738 return cached_can_issue_more;
29739 }
29740
29741 if (insn_terminates_group_p (insn, current_group))
29742 {
29743 cached_can_issue_more = 0;
29744 return cached_can_issue_more;
29745 }
29746
29747 /* If no reservation, but reach here */
29748 if (recog_memoized (insn) < 0)
29749 return more;
29750
29751 if (rs6000_sched_groups)
29752 {
29753 if (is_microcoded_insn (insn))
29754 cached_can_issue_more = 0;
29755 else if (is_cracked_insn (insn))
29756 cached_can_issue_more = more > 2 ? more - 2 : 0;
29757 else
29758 cached_can_issue_more = more - 1;
29759
29760 return cached_can_issue_more;
29761 }
29762
29763 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
29764 return 0;
29765
29766 cached_can_issue_more = more - 1;
29767 return cached_can_issue_more;
29768 }
29769
29770 static int
29771 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
29772 {
29773 int r = rs6000_variable_issue_1 (insn, more);
29774 if (verbose)
29775 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
29776 return r;
29777 }
29778
29779 /* Adjust the cost of a scheduling dependency. Return the new cost of
29780 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
29781
29782 static int
29783 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
29784 unsigned int)
29785 {
29786 enum attr_type attr_type;
29787
29788 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
29789 return cost;
29790
29791 switch (dep_type)
29792 {
29793 case REG_DEP_TRUE:
29794 {
29795 /* Data dependency; DEP_INSN writes a register that INSN reads
29796 some cycles later. */
29797
29798 /* Separate a load from a narrower, dependent store. */
29799 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
29800 && GET_CODE (PATTERN (insn)) == SET
29801 && GET_CODE (PATTERN (dep_insn)) == SET
29802 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
29803 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
29804 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
29805 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
29806 return cost + 14;
29807
29808 attr_type = get_attr_type (insn);
29809
29810 switch (attr_type)
29811 {
29812 case TYPE_JMPREG:
29813 /* Tell the first scheduling pass about the latency between
29814 a mtctr and bctr (and mtlr and br/blr). The first
29815 scheduling pass will not know about this latency since
29816 the mtctr instruction, which has the latency associated
29817 to it, will be generated by reload. */
29818 return 4;
29819 case TYPE_BRANCH:
29820 /* Leave some extra cycles between a compare and its
29821 dependent branch, to inhibit expensive mispredicts. */
29822 if ((rs6000_tune == PROCESSOR_PPC603
29823 || rs6000_tune == PROCESSOR_PPC604
29824 || rs6000_tune == PROCESSOR_PPC604e
29825 || rs6000_tune == PROCESSOR_PPC620
29826 || rs6000_tune == PROCESSOR_PPC630
29827 || rs6000_tune == PROCESSOR_PPC750
29828 || rs6000_tune == PROCESSOR_PPC7400
29829 || rs6000_tune == PROCESSOR_PPC7450
29830 || rs6000_tune == PROCESSOR_PPCE5500
29831 || rs6000_tune == PROCESSOR_PPCE6500
29832 || rs6000_tune == PROCESSOR_POWER4
29833 || rs6000_tune == PROCESSOR_POWER5
29834 || rs6000_tune == PROCESSOR_POWER7
29835 || rs6000_tune == PROCESSOR_POWER8
29836 || rs6000_tune == PROCESSOR_POWER9
29837 || rs6000_tune == PROCESSOR_CELL)
29838 && recog_memoized (dep_insn)
29839 && (INSN_CODE (dep_insn) >= 0))
29840
29841 switch (get_attr_type (dep_insn))
29842 {
29843 case TYPE_CMP:
29844 case TYPE_FPCOMPARE:
29845 case TYPE_CR_LOGICAL:
29846 return cost + 2;
29847 case TYPE_EXTS:
29848 case TYPE_MUL:
29849 if (get_attr_dot (dep_insn) == DOT_YES)
29850 return cost + 2;
29851 else
29852 break;
29853 case TYPE_SHIFT:
29854 if (get_attr_dot (dep_insn) == DOT_YES
29855 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
29856 return cost + 2;
29857 else
29858 break;
29859 default:
29860 break;
29861 }
29862 break;
29863
29864 case TYPE_STORE:
29865 case TYPE_FPSTORE:
29866 if ((rs6000_tune == PROCESSOR_POWER6)
29867 && recog_memoized (dep_insn)
29868 && (INSN_CODE (dep_insn) >= 0))
29869 {
29870
29871 if (GET_CODE (PATTERN (insn)) != SET)
29872 /* If this happens, we have to extend this to schedule
29873 optimally. Return default for now. */
29874 return cost;
29875
29876 /* Adjust the cost for the case where the value written
29877 by a fixed point operation is used as the address
29878 gen value on a store. */
29879 switch (get_attr_type (dep_insn))
29880 {
29881 case TYPE_LOAD:
29882 case TYPE_CNTLZ:
29883 {
29884 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29885 return get_attr_sign_extend (dep_insn)
29886 == SIGN_EXTEND_YES ? 6 : 4;
29887 break;
29888 }
29889 case TYPE_SHIFT:
29890 {
29891 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29892 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
29893 6 : 3;
29894 break;
29895 }
29896 case TYPE_INTEGER:
29897 case TYPE_ADD:
29898 case TYPE_LOGICAL:
29899 case TYPE_EXTS:
29900 case TYPE_INSERT:
29901 {
29902 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29903 return 3;
29904 break;
29905 }
29906 case TYPE_STORE:
29907 case TYPE_FPLOAD:
29908 case TYPE_FPSTORE:
29909 {
29910 if (get_attr_update (dep_insn) == UPDATE_YES
29911 && ! rs6000_store_data_bypass_p (dep_insn, insn))
29912 return 3;
29913 break;
29914 }
29915 case TYPE_MUL:
29916 {
29917 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29918 return 17;
29919 break;
29920 }
29921 case TYPE_DIV:
29922 {
29923 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29924 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
29925 break;
29926 }
29927 default:
29928 break;
29929 }
29930 }
29931 break;
29932
29933 case TYPE_LOAD:
29934 if ((rs6000_tune == PROCESSOR_POWER6)
29935 && recog_memoized (dep_insn)
29936 && (INSN_CODE (dep_insn) >= 0))
29937 {
29938
29939 /* Adjust the cost for the case where the value written
29940 by a fixed point instruction is used within the address
29941 gen portion of a subsequent load(u)(x) */
29942 switch (get_attr_type (dep_insn))
29943 {
29944 case TYPE_LOAD:
29945 case TYPE_CNTLZ:
29946 {
29947 if (set_to_load_agen (dep_insn, insn))
29948 return get_attr_sign_extend (dep_insn)
29949 == SIGN_EXTEND_YES ? 6 : 4;
29950 break;
29951 }
29952 case TYPE_SHIFT:
29953 {
29954 if (set_to_load_agen (dep_insn, insn))
29955 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
29956 6 : 3;
29957 break;
29958 }
29959 case TYPE_INTEGER:
29960 case TYPE_ADD:
29961 case TYPE_LOGICAL:
29962 case TYPE_EXTS:
29963 case TYPE_INSERT:
29964 {
29965 if (set_to_load_agen (dep_insn, insn))
29966 return 3;
29967 break;
29968 }
29969 case TYPE_STORE:
29970 case TYPE_FPLOAD:
29971 case TYPE_FPSTORE:
29972 {
29973 if (get_attr_update (dep_insn) == UPDATE_YES
29974 && set_to_load_agen (dep_insn, insn))
29975 return 3;
29976 break;
29977 }
29978 case TYPE_MUL:
29979 {
29980 if (set_to_load_agen (dep_insn, insn))
29981 return 17;
29982 break;
29983 }
29984 case TYPE_DIV:
29985 {
29986 if (set_to_load_agen (dep_insn, insn))
29987 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
29988 break;
29989 }
29990 default:
29991 break;
29992 }
29993 }
29994 break;
29995
29996 case TYPE_FPLOAD:
29997 if ((rs6000_tune == PROCESSOR_POWER6)
29998 && get_attr_update (insn) == UPDATE_NO
29999 && recog_memoized (dep_insn)
30000 && (INSN_CODE (dep_insn) >= 0)
30001 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30002 return 2;
30003
30004 default:
30005 break;
30006 }
30007
30008 /* Fall out to return default cost. */
30009 }
30010 break;
30011
30012 case REG_DEP_OUTPUT:
30013 /* Output dependency; DEP_INSN writes a register that INSN writes some
30014 cycles later. */
30015 if ((rs6000_tune == PROCESSOR_POWER6)
30016 && recog_memoized (dep_insn)
30017 && (INSN_CODE (dep_insn) >= 0))
30018 {
30019 attr_type = get_attr_type (insn);
30020
30021 switch (attr_type)
30022 {
30023 case TYPE_FP:
30024 case TYPE_FPSIMPLE:
30025 if (get_attr_type (dep_insn) == TYPE_FP
30026 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30027 return 1;
30028 break;
30029 case TYPE_FPLOAD:
30030 if (get_attr_update (insn) == UPDATE_NO
30031 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30032 return 2;
30033 break;
30034 default:
30035 break;
30036 }
30037 }
30038 /* Fall through, no cost for output dependency. */
30039 /* FALLTHRU */
30040
30041 case REG_DEP_ANTI:
30042 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30043 cycles later. */
30044 return 0;
30045
30046 default:
30047 gcc_unreachable ();
30048 }
30049
30050 return cost;
30051 }
30052
30053 /* Debug version of rs6000_adjust_cost. */
30054
30055 static int
30056 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30057 int cost, unsigned int dw)
30058 {
30059 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30060
30061 if (ret != cost)
30062 {
30063 const char *dep;
30064
30065 switch (dep_type)
30066 {
30067 default: dep = "unknown depencency"; break;
30068 case REG_DEP_TRUE: dep = "data dependency"; break;
30069 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30070 case REG_DEP_ANTI: dep = "anti depencency"; break;
30071 }
30072
30073 fprintf (stderr,
30074 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30075 "%s, insn:\n", ret, cost, dep);
30076
30077 debug_rtx (insn);
30078 }
30079
30080 return ret;
30081 }
30082
30083 /* The function returns a true if INSN is microcoded.
30084 Return false otherwise. */
30085
30086 static bool
30087 is_microcoded_insn (rtx_insn *insn)
30088 {
30089 if (!insn || !NONDEBUG_INSN_P (insn)
30090 || GET_CODE (PATTERN (insn)) == USE
30091 || GET_CODE (PATTERN (insn)) == CLOBBER)
30092 return false;
30093
30094 if (rs6000_tune == PROCESSOR_CELL)
30095 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30096
30097 if (rs6000_sched_groups
30098 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30099 {
30100 enum attr_type type = get_attr_type (insn);
30101 if ((type == TYPE_LOAD
30102 && get_attr_update (insn) == UPDATE_YES
30103 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30104 || ((type == TYPE_LOAD || type == TYPE_STORE)
30105 && get_attr_update (insn) == UPDATE_YES
30106 && get_attr_indexed (insn) == INDEXED_YES)
30107 || type == TYPE_MFCR)
30108 return true;
30109 }
30110
30111 return false;
30112 }
30113
30114 /* The function returns true if INSN is cracked into 2 instructions
30115 by the processor (and therefore occupies 2 issue slots). */
30116
30117 static bool
30118 is_cracked_insn (rtx_insn *insn)
30119 {
30120 if (!insn || !NONDEBUG_INSN_P (insn)
30121 || GET_CODE (PATTERN (insn)) == USE
30122 || GET_CODE (PATTERN (insn)) == CLOBBER)
30123 return false;
30124
30125 if (rs6000_sched_groups
30126 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30127 {
30128 enum attr_type type = get_attr_type (insn);
30129 if ((type == TYPE_LOAD
30130 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30131 && get_attr_update (insn) == UPDATE_NO)
30132 || (type == TYPE_LOAD
30133 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30134 && get_attr_update (insn) == UPDATE_YES
30135 && get_attr_indexed (insn) == INDEXED_NO)
30136 || (type == TYPE_STORE
30137 && get_attr_update (insn) == UPDATE_YES
30138 && get_attr_indexed (insn) == INDEXED_NO)
30139 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30140 && get_attr_update (insn) == UPDATE_YES)
30141 || (type == TYPE_CR_LOGICAL
30142 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30143 || (type == TYPE_EXTS
30144 && get_attr_dot (insn) == DOT_YES)
30145 || (type == TYPE_SHIFT
30146 && get_attr_dot (insn) == DOT_YES
30147 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30148 || (type == TYPE_MUL
30149 && get_attr_dot (insn) == DOT_YES)
30150 || type == TYPE_DIV
30151 || (type == TYPE_INSERT
30152 && get_attr_size (insn) == SIZE_32))
30153 return true;
30154 }
30155
30156 return false;
30157 }
30158
30159 /* The function returns true if INSN can be issued only from
30160 the branch slot. */
30161
30162 static bool
30163 is_branch_slot_insn (rtx_insn *insn)
30164 {
30165 if (!insn || !NONDEBUG_INSN_P (insn)
30166 || GET_CODE (PATTERN (insn)) == USE
30167 || GET_CODE (PATTERN (insn)) == CLOBBER)
30168 return false;
30169
30170 if (rs6000_sched_groups)
30171 {
30172 enum attr_type type = get_attr_type (insn);
30173 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30174 return true;
30175 return false;
30176 }
30177
30178 return false;
30179 }
30180
30181 /* The function returns true if out_inst sets a value that is
30182 used in the address generation computation of in_insn */
30183 static bool
30184 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30185 {
30186 rtx out_set, in_set;
30187
30188 /* For performance reasons, only handle the simple case where
30189 both loads are a single_set. */
30190 out_set = single_set (out_insn);
30191 if (out_set)
30192 {
30193 in_set = single_set (in_insn);
30194 if (in_set)
30195 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30196 }
30197
30198 return false;
30199 }
30200
30201 /* Try to determine base/offset/size parts of the given MEM.
30202 Return true if successful, false if all the values couldn't
30203 be determined.
30204
30205 This function only looks for REG or REG+CONST address forms.
30206 REG+REG address form will return false. */
30207
30208 static bool
30209 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30210 HOST_WIDE_INT *size)
30211 {
30212 rtx addr_rtx;
30213 if MEM_SIZE_KNOWN_P (mem)
30214 *size = MEM_SIZE (mem);
30215 else
30216 return false;
30217
30218 addr_rtx = (XEXP (mem, 0));
30219 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30220 addr_rtx = XEXP (addr_rtx, 1);
30221
30222 *offset = 0;
30223 while (GET_CODE (addr_rtx) == PLUS
30224 && CONST_INT_P (XEXP (addr_rtx, 1)))
30225 {
30226 *offset += INTVAL (XEXP (addr_rtx, 1));
30227 addr_rtx = XEXP (addr_rtx, 0);
30228 }
30229 if (!REG_P (addr_rtx))
30230 return false;
30231
30232 *base = addr_rtx;
30233 return true;
30234 }
30235
30236 /* The function returns true if the target storage location of
30237 mem1 is adjacent to the target storage location of mem2 */
30238 /* Return 1 if memory locations are adjacent. */
30239
30240 static bool
30241 adjacent_mem_locations (rtx mem1, rtx mem2)
30242 {
30243 rtx reg1, reg2;
30244 HOST_WIDE_INT off1, size1, off2, size2;
30245
30246 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30247 && get_memref_parts (mem2, &reg2, &off2, &size2))
30248 return ((REGNO (reg1) == REGNO (reg2))
30249 && ((off1 + size1 == off2)
30250 || (off2 + size2 == off1)));
30251
30252 return false;
30253 }
30254
30255 /* This function returns true if it can be determined that the two MEM
30256 locations overlap by at least 1 byte based on base reg/offset/size. */
30257
30258 static bool
30259 mem_locations_overlap (rtx mem1, rtx mem2)
30260 {
30261 rtx reg1, reg2;
30262 HOST_WIDE_INT off1, size1, off2, size2;
30263
30264 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30265 && get_memref_parts (mem2, &reg2, &off2, &size2))
30266 return ((REGNO (reg1) == REGNO (reg2))
30267 && (((off1 <= off2) && (off1 + size1 > off2))
30268 || ((off2 <= off1) && (off2 + size2 > off1))));
30269
30270 return false;
30271 }
30272
30273 /* A C statement (sans semicolon) to update the integer scheduling
30274 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30275 INSN earlier, reduce the priority to execute INSN later. Do not
30276 define this macro if you do not need to adjust the scheduling
30277 priorities of insns. */
30278
30279 static int
30280 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30281 {
30282 rtx load_mem, str_mem;
30283 /* On machines (like the 750) which have asymmetric integer units,
30284 where one integer unit can do multiply and divides and the other
30285 can't, reduce the priority of multiply/divide so it is scheduled
30286 before other integer operations. */
30287
30288 #if 0
30289 if (! INSN_P (insn))
30290 return priority;
30291
30292 if (GET_CODE (PATTERN (insn)) == USE)
30293 return priority;
30294
30295 switch (rs6000_tune) {
30296 case PROCESSOR_PPC750:
30297 switch (get_attr_type (insn))
30298 {
30299 default:
30300 break;
30301
30302 case TYPE_MUL:
30303 case TYPE_DIV:
30304 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30305 priority, priority);
30306 if (priority >= 0 && priority < 0x01000000)
30307 priority >>= 3;
30308 break;
30309 }
30310 }
30311 #endif
30312
30313 if (insn_must_be_first_in_group (insn)
30314 && reload_completed
30315 && current_sched_info->sched_max_insns_priority
30316 && rs6000_sched_restricted_insns_priority)
30317 {
30318
30319 /* Prioritize insns that can be dispatched only in the first
30320 dispatch slot. */
30321 if (rs6000_sched_restricted_insns_priority == 1)
30322 /* Attach highest priority to insn. This means that in
30323 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30324 precede 'priority' (critical path) considerations. */
30325 return current_sched_info->sched_max_insns_priority;
30326 else if (rs6000_sched_restricted_insns_priority == 2)
30327 /* Increase priority of insn by a minimal amount. This means that in
30328 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30329 considerations precede dispatch-slot restriction considerations. */
30330 return (priority + 1);
30331 }
30332
30333 if (rs6000_tune == PROCESSOR_POWER6
30334 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30335 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30336 /* Attach highest priority to insn if the scheduler has just issued two
30337 stores and this instruction is a load, or two loads and this instruction
30338 is a store. Power6 wants loads and stores scheduled alternately
30339 when possible */
30340 return current_sched_info->sched_max_insns_priority;
30341
30342 return priority;
30343 }
30344
30345 /* Return true if the instruction is nonpipelined on the Cell. */
30346 static bool
30347 is_nonpipeline_insn (rtx_insn *insn)
30348 {
30349 enum attr_type type;
30350 if (!insn || !NONDEBUG_INSN_P (insn)
30351 || GET_CODE (PATTERN (insn)) == USE
30352 || GET_CODE (PATTERN (insn)) == CLOBBER)
30353 return false;
30354
30355 type = get_attr_type (insn);
30356 if (type == TYPE_MUL
30357 || type == TYPE_DIV
30358 || type == TYPE_SDIV
30359 || type == TYPE_DDIV
30360 || type == TYPE_SSQRT
30361 || type == TYPE_DSQRT
30362 || type == TYPE_MFCR
30363 || type == TYPE_MFCRF
30364 || type == TYPE_MFJMPR)
30365 {
30366 return true;
30367 }
30368 return false;
30369 }
30370
30371
30372 /* Return how many instructions the machine can issue per cycle. */
30373
30374 static int
30375 rs6000_issue_rate (void)
30376 {
30377 /* Unless scheduling for register pressure, use issue rate of 1 for
30378 first scheduling pass to decrease degradation. */
30379 if (!reload_completed && !flag_sched_pressure)
30380 return 1;
30381
30382 switch (rs6000_tune) {
30383 case PROCESSOR_RS64A:
30384 case PROCESSOR_PPC601: /* ? */
30385 case PROCESSOR_PPC7450:
30386 return 3;
30387 case PROCESSOR_PPC440:
30388 case PROCESSOR_PPC603:
30389 case PROCESSOR_PPC750:
30390 case PROCESSOR_PPC7400:
30391 case PROCESSOR_PPC8540:
30392 case PROCESSOR_PPC8548:
30393 case PROCESSOR_CELL:
30394 case PROCESSOR_PPCE300C2:
30395 case PROCESSOR_PPCE300C3:
30396 case PROCESSOR_PPCE500MC:
30397 case PROCESSOR_PPCE500MC64:
30398 case PROCESSOR_PPCE5500:
30399 case PROCESSOR_PPCE6500:
30400 case PROCESSOR_TITAN:
30401 return 2;
30402 case PROCESSOR_PPC476:
30403 case PROCESSOR_PPC604:
30404 case PROCESSOR_PPC604e:
30405 case PROCESSOR_PPC620:
30406 case PROCESSOR_PPC630:
30407 return 4;
30408 case PROCESSOR_POWER4:
30409 case PROCESSOR_POWER5:
30410 case PROCESSOR_POWER6:
30411 case PROCESSOR_POWER7:
30412 return 5;
30413 case PROCESSOR_POWER8:
30414 return 7;
30415 case PROCESSOR_POWER9:
30416 return 6;
30417 default:
30418 return 1;
30419 }
30420 }
30421
30422 /* Return how many instructions to look ahead for better insn
30423 scheduling. */
30424
30425 static int
30426 rs6000_use_sched_lookahead (void)
30427 {
30428 switch (rs6000_tune)
30429 {
30430 case PROCESSOR_PPC8540:
30431 case PROCESSOR_PPC8548:
30432 return 4;
30433
30434 case PROCESSOR_CELL:
30435 return (reload_completed ? 8 : 0);
30436
30437 default:
30438 return 0;
30439 }
30440 }
30441
30442 /* We are choosing insn from the ready queue. Return zero if INSN can be
30443 chosen. */
30444 static int
30445 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30446 {
30447 if (ready_index == 0)
30448 return 0;
30449
30450 if (rs6000_tune != PROCESSOR_CELL)
30451 return 0;
30452
30453 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30454
30455 if (!reload_completed
30456 || is_nonpipeline_insn (insn)
30457 || is_microcoded_insn (insn))
30458 return 1;
30459
30460 return 0;
30461 }
30462
30463 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30464 and return true. */
30465
30466 static bool
30467 find_mem_ref (rtx pat, rtx *mem_ref)
30468 {
30469 const char * fmt;
30470 int i, j;
30471
30472 /* stack_tie does not produce any real memory traffic. */
30473 if (tie_operand (pat, VOIDmode))
30474 return false;
30475
30476 if (GET_CODE (pat) == MEM)
30477 {
30478 *mem_ref = pat;
30479 return true;
30480 }
30481
30482 /* Recursively process the pattern. */
30483 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30484
30485 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30486 {
30487 if (fmt[i] == 'e')
30488 {
30489 if (find_mem_ref (XEXP (pat, i), mem_ref))
30490 return true;
30491 }
30492 else if (fmt[i] == 'E')
30493 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30494 {
30495 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30496 return true;
30497 }
30498 }
30499
30500 return false;
30501 }
30502
30503 /* Determine if PAT is a PATTERN of a load insn. */
30504
30505 static bool
30506 is_load_insn1 (rtx pat, rtx *load_mem)
30507 {
30508 if (!pat || pat == NULL_RTX)
30509 return false;
30510
30511 if (GET_CODE (pat) == SET)
30512 return find_mem_ref (SET_SRC (pat), load_mem);
30513
30514 if (GET_CODE (pat) == PARALLEL)
30515 {
30516 int i;
30517
30518 for (i = 0; i < XVECLEN (pat, 0); i++)
30519 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30520 return true;
30521 }
30522
30523 return false;
30524 }
30525
30526 /* Determine if INSN loads from memory. */
30527
30528 static bool
30529 is_load_insn (rtx insn, rtx *load_mem)
30530 {
30531 if (!insn || !INSN_P (insn))
30532 return false;
30533
30534 if (CALL_P (insn))
30535 return false;
30536
30537 return is_load_insn1 (PATTERN (insn), load_mem);
30538 }
30539
30540 /* Determine if PAT is a PATTERN of a store insn. */
30541
30542 static bool
30543 is_store_insn1 (rtx pat, rtx *str_mem)
30544 {
30545 if (!pat || pat == NULL_RTX)
30546 return false;
30547
30548 if (GET_CODE (pat) == SET)
30549 return find_mem_ref (SET_DEST (pat), str_mem);
30550
30551 if (GET_CODE (pat) == PARALLEL)
30552 {
30553 int i;
30554
30555 for (i = 0; i < XVECLEN (pat, 0); i++)
30556 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30557 return true;
30558 }
30559
30560 return false;
30561 }
30562
30563 /* Determine if INSN stores to memory. */
30564
30565 static bool
30566 is_store_insn (rtx insn, rtx *str_mem)
30567 {
30568 if (!insn || !INSN_P (insn))
30569 return false;
30570
30571 return is_store_insn1 (PATTERN (insn), str_mem);
30572 }
30573
30574 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30575
30576 static bool
30577 is_power9_pairable_vec_type (enum attr_type type)
30578 {
30579 switch (type)
30580 {
30581 case TYPE_VECSIMPLE:
30582 case TYPE_VECCOMPLEX:
30583 case TYPE_VECDIV:
30584 case TYPE_VECCMP:
30585 case TYPE_VECPERM:
30586 case TYPE_VECFLOAT:
30587 case TYPE_VECFDIV:
30588 case TYPE_VECDOUBLE:
30589 return true;
30590 default:
30591 break;
30592 }
30593 return false;
30594 }
30595
30596 /* Returns whether the dependence between INSN and NEXT is considered
30597 costly by the given target. */
30598
30599 static bool
30600 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30601 {
30602 rtx insn;
30603 rtx next;
30604 rtx load_mem, str_mem;
30605
30606 /* If the flag is not enabled - no dependence is considered costly;
30607 allow all dependent insns in the same group.
30608 This is the most aggressive option. */
30609 if (rs6000_sched_costly_dep == no_dep_costly)
30610 return false;
30611
30612 /* If the flag is set to 1 - a dependence is always considered costly;
30613 do not allow dependent instructions in the same group.
30614 This is the most conservative option. */
30615 if (rs6000_sched_costly_dep == all_deps_costly)
30616 return true;
30617
30618 insn = DEP_PRO (dep);
30619 next = DEP_CON (dep);
30620
30621 if (rs6000_sched_costly_dep == store_to_load_dep_costly
30622 && is_load_insn (next, &load_mem)
30623 && is_store_insn (insn, &str_mem))
30624 /* Prevent load after store in the same group. */
30625 return true;
30626
30627 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
30628 && is_load_insn (next, &load_mem)
30629 && is_store_insn (insn, &str_mem)
30630 && DEP_TYPE (dep) == REG_DEP_TRUE
30631 && mem_locations_overlap(str_mem, load_mem))
30632 /* Prevent load after store in the same group if it is a true
30633 dependence. */
30634 return true;
30635
30636 /* The flag is set to X; dependences with latency >= X are considered costly,
30637 and will not be scheduled in the same group. */
30638 if (rs6000_sched_costly_dep <= max_dep_latency
30639 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
30640 return true;
30641
30642 return false;
30643 }
30644
30645 /* Return the next insn after INSN that is found before TAIL is reached,
30646 skipping any "non-active" insns - insns that will not actually occupy
30647 an issue slot. Return NULL_RTX if such an insn is not found. */
30648
30649 static rtx_insn *
30650 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
30651 {
30652 if (insn == NULL_RTX || insn == tail)
30653 return NULL;
30654
30655 while (1)
30656 {
30657 insn = NEXT_INSN (insn);
30658 if (insn == NULL_RTX || insn == tail)
30659 return NULL;
30660
30661 if (CALL_P (insn)
30662 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
30663 || (NONJUMP_INSN_P (insn)
30664 && GET_CODE (PATTERN (insn)) != USE
30665 && GET_CODE (PATTERN (insn)) != CLOBBER
30666 && INSN_CODE (insn) != CODE_FOR_stack_tie))
30667 break;
30668 }
30669 return insn;
30670 }
30671
30672 /* Do Power9 specific sched_reorder2 reordering of ready list. */
30673
30674 static int
30675 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
30676 {
30677 int pos;
30678 int i;
30679 rtx_insn *tmp;
30680 enum attr_type type, type2;
30681
30682 type = get_attr_type (last_scheduled_insn);
30683
30684 /* Try to issue fixed point divides back-to-back in pairs so they will be
30685 routed to separate execution units and execute in parallel. */
30686 if (type == TYPE_DIV && divide_cnt == 0)
30687 {
30688 /* First divide has been scheduled. */
30689 divide_cnt = 1;
30690
30691 /* Scan the ready list looking for another divide, if found move it
30692 to the end of the list so it is chosen next. */
30693 pos = lastpos;
30694 while (pos >= 0)
30695 {
30696 if (recog_memoized (ready[pos]) >= 0
30697 && get_attr_type (ready[pos]) == TYPE_DIV)
30698 {
30699 tmp = ready[pos];
30700 for (i = pos; i < lastpos; i++)
30701 ready[i] = ready[i + 1];
30702 ready[lastpos] = tmp;
30703 break;
30704 }
30705 pos--;
30706 }
30707 }
30708 else
30709 {
30710 /* Last insn was the 2nd divide or not a divide, reset the counter. */
30711 divide_cnt = 0;
30712
30713 /* The best dispatch throughput for vector and vector load insns can be
30714 achieved by interleaving a vector and vector load such that they'll
30715 dispatch to the same superslice. If this pairing cannot be achieved
30716 then it is best to pair vector insns together and vector load insns
30717 together.
30718
30719 To aid in this pairing, vec_pairing maintains the current state with
30720 the following values:
30721
30722 0 : Initial state, no vecload/vector pairing has been started.
30723
30724 1 : A vecload or vector insn has been issued and a candidate for
30725 pairing has been found and moved to the end of the ready
30726 list. */
30727 if (type == TYPE_VECLOAD)
30728 {
30729 /* Issued a vecload. */
30730 if (vec_pairing == 0)
30731 {
30732 int vecload_pos = -1;
30733 /* We issued a single vecload, look for a vector insn to pair it
30734 with. If one isn't found, try to pair another vecload. */
30735 pos = lastpos;
30736 while (pos >= 0)
30737 {
30738 if (recog_memoized (ready[pos]) >= 0)
30739 {
30740 type2 = get_attr_type (ready[pos]);
30741 if (is_power9_pairable_vec_type (type2))
30742 {
30743 /* Found a vector insn to pair with, move it to the
30744 end of the ready list so it is scheduled next. */
30745 tmp = ready[pos];
30746 for (i = pos; i < lastpos; i++)
30747 ready[i] = ready[i + 1];
30748 ready[lastpos] = tmp;
30749 vec_pairing = 1;
30750 return cached_can_issue_more;
30751 }
30752 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
30753 /* Remember position of first vecload seen. */
30754 vecload_pos = pos;
30755 }
30756 pos--;
30757 }
30758 if (vecload_pos >= 0)
30759 {
30760 /* Didn't find a vector to pair with but did find a vecload,
30761 move it to the end of the ready list. */
30762 tmp = ready[vecload_pos];
30763 for (i = vecload_pos; i < lastpos; i++)
30764 ready[i] = ready[i + 1];
30765 ready[lastpos] = tmp;
30766 vec_pairing = 1;
30767 return cached_can_issue_more;
30768 }
30769 }
30770 }
30771 else if (is_power9_pairable_vec_type (type))
30772 {
30773 /* Issued a vector operation. */
30774 if (vec_pairing == 0)
30775 {
30776 int vec_pos = -1;
30777 /* We issued a single vector insn, look for a vecload to pair it
30778 with. If one isn't found, try to pair another vector. */
30779 pos = lastpos;
30780 while (pos >= 0)
30781 {
30782 if (recog_memoized (ready[pos]) >= 0)
30783 {
30784 type2 = get_attr_type (ready[pos]);
30785 if (type2 == TYPE_VECLOAD)
30786 {
30787 /* Found a vecload insn to pair with, move it to the
30788 end of the ready list so it is scheduled next. */
30789 tmp = ready[pos];
30790 for (i = pos; i < lastpos; i++)
30791 ready[i] = ready[i + 1];
30792 ready[lastpos] = tmp;
30793 vec_pairing = 1;
30794 return cached_can_issue_more;
30795 }
30796 else if (is_power9_pairable_vec_type (type2)
30797 && vec_pos == -1)
30798 /* Remember position of first vector insn seen. */
30799 vec_pos = pos;
30800 }
30801 pos--;
30802 }
30803 if (vec_pos >= 0)
30804 {
30805 /* Didn't find a vecload to pair with but did find a vector
30806 insn, move it to the end of the ready list. */
30807 tmp = ready[vec_pos];
30808 for (i = vec_pos; i < lastpos; i++)
30809 ready[i] = ready[i + 1];
30810 ready[lastpos] = tmp;
30811 vec_pairing = 1;
30812 return cached_can_issue_more;
30813 }
30814 }
30815 }
30816
30817 /* We've either finished a vec/vecload pair, couldn't find an insn to
30818 continue the current pair, or the last insn had nothing to do with
30819 with pairing. In any case, reset the state. */
30820 vec_pairing = 0;
30821 }
30822
30823 return cached_can_issue_more;
30824 }
30825
30826 /* We are about to begin issuing insns for this clock cycle. */
30827
30828 static int
30829 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
30830 rtx_insn **ready ATTRIBUTE_UNUSED,
30831 int *pn_ready ATTRIBUTE_UNUSED,
30832 int clock_var ATTRIBUTE_UNUSED)
30833 {
30834 int n_ready = *pn_ready;
30835
30836 if (sched_verbose)
30837 fprintf (dump, "// rs6000_sched_reorder :\n");
30838
30839 /* Reorder the ready list, if the second to last ready insn
30840 is a nonepipeline insn. */
30841 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
30842 {
30843 if (is_nonpipeline_insn (ready[n_ready - 1])
30844 && (recog_memoized (ready[n_ready - 2]) > 0))
30845 /* Simply swap first two insns. */
30846 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
30847 }
30848
30849 if (rs6000_tune == PROCESSOR_POWER6)
30850 load_store_pendulum = 0;
30851
30852 return rs6000_issue_rate ();
30853 }
30854
30855 /* Like rs6000_sched_reorder, but called after issuing each insn. */
30856
30857 static int
30858 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
30859 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
30860 {
30861 if (sched_verbose)
30862 fprintf (dump, "// rs6000_sched_reorder2 :\n");
30863
30864 /* For Power6, we need to handle some special cases to try and keep the
30865 store queue from overflowing and triggering expensive flushes.
30866
30867 This code monitors how load and store instructions are being issued
30868 and skews the ready list one way or the other to increase the likelihood
30869 that a desired instruction is issued at the proper time.
30870
30871 A couple of things are done. First, we maintain a "load_store_pendulum"
30872 to track the current state of load/store issue.
30873
30874 - If the pendulum is at zero, then no loads or stores have been
30875 issued in the current cycle so we do nothing.
30876
30877 - If the pendulum is 1, then a single load has been issued in this
30878 cycle and we attempt to locate another load in the ready list to
30879 issue with it.
30880
30881 - If the pendulum is -2, then two stores have already been
30882 issued in this cycle, so we increase the priority of the first load
30883 in the ready list to increase it's likelihood of being chosen first
30884 in the next cycle.
30885
30886 - If the pendulum is -1, then a single store has been issued in this
30887 cycle and we attempt to locate another store in the ready list to
30888 issue with it, preferring a store to an adjacent memory location to
30889 facilitate store pairing in the store queue.
30890
30891 - If the pendulum is 2, then two loads have already been
30892 issued in this cycle, so we increase the priority of the first store
30893 in the ready list to increase it's likelihood of being chosen first
30894 in the next cycle.
30895
30896 - If the pendulum < -2 or > 2, then do nothing.
30897
30898 Note: This code covers the most common scenarios. There exist non
30899 load/store instructions which make use of the LSU and which
30900 would need to be accounted for to strictly model the behavior
30901 of the machine. Those instructions are currently unaccounted
30902 for to help minimize compile time overhead of this code.
30903 */
30904 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
30905 {
30906 int pos;
30907 int i;
30908 rtx_insn *tmp;
30909 rtx load_mem, str_mem;
30910
30911 if (is_store_insn (last_scheduled_insn, &str_mem))
30912 /* Issuing a store, swing the load_store_pendulum to the left */
30913 load_store_pendulum--;
30914 else if (is_load_insn (last_scheduled_insn, &load_mem))
30915 /* Issuing a load, swing the load_store_pendulum to the right */
30916 load_store_pendulum++;
30917 else
30918 return cached_can_issue_more;
30919
30920 /* If the pendulum is balanced, or there is only one instruction on
30921 the ready list, then all is well, so return. */
30922 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
30923 return cached_can_issue_more;
30924
30925 if (load_store_pendulum == 1)
30926 {
30927 /* A load has been issued in this cycle. Scan the ready list
30928 for another load to issue with it */
30929 pos = *pn_ready-1;
30930
30931 while (pos >= 0)
30932 {
30933 if (is_load_insn (ready[pos], &load_mem))
30934 {
30935 /* Found a load. Move it to the head of the ready list,
30936 and adjust it's priority so that it is more likely to
30937 stay there */
30938 tmp = ready[pos];
30939 for (i=pos; i<*pn_ready-1; i++)
30940 ready[i] = ready[i + 1];
30941 ready[*pn_ready-1] = tmp;
30942
30943 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
30944 INSN_PRIORITY (tmp)++;
30945 break;
30946 }
30947 pos--;
30948 }
30949 }
30950 else if (load_store_pendulum == -2)
30951 {
30952 /* Two stores have been issued in this cycle. Increase the
30953 priority of the first load in the ready list to favor it for
30954 issuing in the next cycle. */
30955 pos = *pn_ready-1;
30956
30957 while (pos >= 0)
30958 {
30959 if (is_load_insn (ready[pos], &load_mem)
30960 && !sel_sched_p ()
30961 && INSN_PRIORITY_KNOWN (ready[pos]))
30962 {
30963 INSN_PRIORITY (ready[pos])++;
30964
30965 /* Adjust the pendulum to account for the fact that a load
30966 was found and increased in priority. This is to prevent
30967 increasing the priority of multiple loads */
30968 load_store_pendulum--;
30969
30970 break;
30971 }
30972 pos--;
30973 }
30974 }
30975 else if (load_store_pendulum == -1)
30976 {
30977 /* A store has been issued in this cycle. Scan the ready list for
30978 another store to issue with it, preferring a store to an adjacent
30979 memory location */
30980 int first_store_pos = -1;
30981
30982 pos = *pn_ready-1;
30983
30984 while (pos >= 0)
30985 {
30986 if (is_store_insn (ready[pos], &str_mem))
30987 {
30988 rtx str_mem2;
30989 /* Maintain the index of the first store found on the
30990 list */
30991 if (first_store_pos == -1)
30992 first_store_pos = pos;
30993
30994 if (is_store_insn (last_scheduled_insn, &str_mem2)
30995 && adjacent_mem_locations (str_mem, str_mem2))
30996 {
30997 /* Found an adjacent store. Move it to the head of the
30998 ready list, and adjust it's priority so that it is
30999 more likely to stay there */
31000 tmp = ready[pos];
31001 for (i=pos; i<*pn_ready-1; i++)
31002 ready[i] = ready[i + 1];
31003 ready[*pn_ready-1] = tmp;
31004
31005 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31006 INSN_PRIORITY (tmp)++;
31007
31008 first_store_pos = -1;
31009
31010 break;
31011 };
31012 }
31013 pos--;
31014 }
31015
31016 if (first_store_pos >= 0)
31017 {
31018 /* An adjacent store wasn't found, but a non-adjacent store was,
31019 so move the non-adjacent store to the front of the ready
31020 list, and adjust its priority so that it is more likely to
31021 stay there. */
31022 tmp = ready[first_store_pos];
31023 for (i=first_store_pos; i<*pn_ready-1; i++)
31024 ready[i] = ready[i + 1];
31025 ready[*pn_ready-1] = tmp;
31026 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31027 INSN_PRIORITY (tmp)++;
31028 }
31029 }
31030 else if (load_store_pendulum == 2)
31031 {
31032 /* Two loads have been issued in this cycle. Increase the priority
31033 of the first store in the ready list to favor it for issuing in
31034 the next cycle. */
31035 pos = *pn_ready-1;
31036
31037 while (pos >= 0)
31038 {
31039 if (is_store_insn (ready[pos], &str_mem)
31040 && !sel_sched_p ()
31041 && INSN_PRIORITY_KNOWN (ready[pos]))
31042 {
31043 INSN_PRIORITY (ready[pos])++;
31044
31045 /* Adjust the pendulum to account for the fact that a store
31046 was found and increased in priority. This is to prevent
31047 increasing the priority of multiple stores */
31048 load_store_pendulum++;
31049
31050 break;
31051 }
31052 pos--;
31053 }
31054 }
31055 }
31056
31057 /* Do Power9 dependent reordering if necessary. */
31058 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31059 && recog_memoized (last_scheduled_insn) >= 0)
31060 return power9_sched_reorder2 (ready, *pn_ready - 1);
31061
31062 return cached_can_issue_more;
31063 }
31064
31065 /* Return whether the presence of INSN causes a dispatch group termination
31066 of group WHICH_GROUP.
31067
31068 If WHICH_GROUP == current_group, this function will return true if INSN
31069 causes the termination of the current group (i.e, the dispatch group to
31070 which INSN belongs). This means that INSN will be the last insn in the
31071 group it belongs to.
31072
31073 If WHICH_GROUP == previous_group, this function will return true if INSN
31074 causes the termination of the previous group (i.e, the dispatch group that
31075 precedes the group to which INSN belongs). This means that INSN will be
31076 the first insn in the group it belongs to). */
31077
31078 static bool
31079 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31080 {
31081 bool first, last;
31082
31083 if (! insn)
31084 return false;
31085
31086 first = insn_must_be_first_in_group (insn);
31087 last = insn_must_be_last_in_group (insn);
31088
31089 if (first && last)
31090 return true;
31091
31092 if (which_group == current_group)
31093 return last;
31094 else if (which_group == previous_group)
31095 return first;
31096
31097 return false;
31098 }
31099
31100
31101 static bool
31102 insn_must_be_first_in_group (rtx_insn *insn)
31103 {
31104 enum attr_type type;
31105
31106 if (!insn
31107 || NOTE_P (insn)
31108 || DEBUG_INSN_P (insn)
31109 || GET_CODE (PATTERN (insn)) == USE
31110 || GET_CODE (PATTERN (insn)) == CLOBBER)
31111 return false;
31112
31113 switch (rs6000_tune)
31114 {
31115 case PROCESSOR_POWER5:
31116 if (is_cracked_insn (insn))
31117 return true;
31118 /* FALLTHRU */
31119 case PROCESSOR_POWER4:
31120 if (is_microcoded_insn (insn))
31121 return true;
31122
31123 if (!rs6000_sched_groups)
31124 return false;
31125
31126 type = get_attr_type (insn);
31127
31128 switch (type)
31129 {
31130 case TYPE_MFCR:
31131 case TYPE_MFCRF:
31132 case TYPE_MTCR:
31133 case TYPE_CR_LOGICAL:
31134 case TYPE_MTJMPR:
31135 case TYPE_MFJMPR:
31136 case TYPE_DIV:
31137 case TYPE_LOAD_L:
31138 case TYPE_STORE_C:
31139 case TYPE_ISYNC:
31140 case TYPE_SYNC:
31141 return true;
31142 default:
31143 break;
31144 }
31145 break;
31146 case PROCESSOR_POWER6:
31147 type = get_attr_type (insn);
31148
31149 switch (type)
31150 {
31151 case TYPE_EXTS:
31152 case TYPE_CNTLZ:
31153 case TYPE_TRAP:
31154 case TYPE_MUL:
31155 case TYPE_INSERT:
31156 case TYPE_FPCOMPARE:
31157 case TYPE_MFCR:
31158 case TYPE_MTCR:
31159 case TYPE_MFJMPR:
31160 case TYPE_MTJMPR:
31161 case TYPE_ISYNC:
31162 case TYPE_SYNC:
31163 case TYPE_LOAD_L:
31164 case TYPE_STORE_C:
31165 return true;
31166 case TYPE_SHIFT:
31167 if (get_attr_dot (insn) == DOT_NO
31168 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31169 return true;
31170 else
31171 break;
31172 case TYPE_DIV:
31173 if (get_attr_size (insn) == SIZE_32)
31174 return true;
31175 else
31176 break;
31177 case TYPE_LOAD:
31178 case TYPE_STORE:
31179 case TYPE_FPLOAD:
31180 case TYPE_FPSTORE:
31181 if (get_attr_update (insn) == UPDATE_YES)
31182 return true;
31183 else
31184 break;
31185 default:
31186 break;
31187 }
31188 break;
31189 case PROCESSOR_POWER7:
31190 type = get_attr_type (insn);
31191
31192 switch (type)
31193 {
31194 case TYPE_CR_LOGICAL:
31195 case TYPE_MFCR:
31196 case TYPE_MFCRF:
31197 case TYPE_MTCR:
31198 case TYPE_DIV:
31199 case TYPE_ISYNC:
31200 case TYPE_LOAD_L:
31201 case TYPE_STORE_C:
31202 case TYPE_MFJMPR:
31203 case TYPE_MTJMPR:
31204 return true;
31205 case TYPE_MUL:
31206 case TYPE_SHIFT:
31207 case TYPE_EXTS:
31208 if (get_attr_dot (insn) == DOT_YES)
31209 return true;
31210 else
31211 break;
31212 case TYPE_LOAD:
31213 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31214 || get_attr_update (insn) == UPDATE_YES)
31215 return true;
31216 else
31217 break;
31218 case TYPE_STORE:
31219 case TYPE_FPLOAD:
31220 case TYPE_FPSTORE:
31221 if (get_attr_update (insn) == UPDATE_YES)
31222 return true;
31223 else
31224 break;
31225 default:
31226 break;
31227 }
31228 break;
31229 case PROCESSOR_POWER8:
31230 type = get_attr_type (insn);
31231
31232 switch (type)
31233 {
31234 case TYPE_CR_LOGICAL:
31235 case TYPE_MFCR:
31236 case TYPE_MFCRF:
31237 case TYPE_MTCR:
31238 case TYPE_SYNC:
31239 case TYPE_ISYNC:
31240 case TYPE_LOAD_L:
31241 case TYPE_STORE_C:
31242 case TYPE_VECSTORE:
31243 case TYPE_MFJMPR:
31244 case TYPE_MTJMPR:
31245 return true;
31246 case TYPE_SHIFT:
31247 case TYPE_EXTS:
31248 case TYPE_MUL:
31249 if (get_attr_dot (insn) == DOT_YES)
31250 return true;
31251 else
31252 break;
31253 case TYPE_LOAD:
31254 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31255 || get_attr_update (insn) == UPDATE_YES)
31256 return true;
31257 else
31258 break;
31259 case TYPE_STORE:
31260 if (get_attr_update (insn) == UPDATE_YES
31261 && get_attr_indexed (insn) == INDEXED_YES)
31262 return true;
31263 else
31264 break;
31265 default:
31266 break;
31267 }
31268 break;
31269 default:
31270 break;
31271 }
31272
31273 return false;
31274 }
31275
31276 static bool
31277 insn_must_be_last_in_group (rtx_insn *insn)
31278 {
31279 enum attr_type type;
31280
31281 if (!insn
31282 || NOTE_P (insn)
31283 || DEBUG_INSN_P (insn)
31284 || GET_CODE (PATTERN (insn)) == USE
31285 || GET_CODE (PATTERN (insn)) == CLOBBER)
31286 return false;
31287
31288 switch (rs6000_tune) {
31289 case PROCESSOR_POWER4:
31290 case PROCESSOR_POWER5:
31291 if (is_microcoded_insn (insn))
31292 return true;
31293
31294 if (is_branch_slot_insn (insn))
31295 return true;
31296
31297 break;
31298 case PROCESSOR_POWER6:
31299 type = get_attr_type (insn);
31300
31301 switch (type)
31302 {
31303 case TYPE_EXTS:
31304 case TYPE_CNTLZ:
31305 case TYPE_TRAP:
31306 case TYPE_MUL:
31307 case TYPE_FPCOMPARE:
31308 case TYPE_MFCR:
31309 case TYPE_MTCR:
31310 case TYPE_MFJMPR:
31311 case TYPE_MTJMPR:
31312 case TYPE_ISYNC:
31313 case TYPE_SYNC:
31314 case TYPE_LOAD_L:
31315 case TYPE_STORE_C:
31316 return true;
31317 case TYPE_SHIFT:
31318 if (get_attr_dot (insn) == DOT_NO
31319 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31320 return true;
31321 else
31322 break;
31323 case TYPE_DIV:
31324 if (get_attr_size (insn) == SIZE_32)
31325 return true;
31326 else
31327 break;
31328 default:
31329 break;
31330 }
31331 break;
31332 case PROCESSOR_POWER7:
31333 type = get_attr_type (insn);
31334
31335 switch (type)
31336 {
31337 case TYPE_ISYNC:
31338 case TYPE_SYNC:
31339 case TYPE_LOAD_L:
31340 case TYPE_STORE_C:
31341 return true;
31342 case TYPE_LOAD:
31343 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31344 && get_attr_update (insn) == UPDATE_YES)
31345 return true;
31346 else
31347 break;
31348 case TYPE_STORE:
31349 if (get_attr_update (insn) == UPDATE_YES
31350 && get_attr_indexed (insn) == INDEXED_YES)
31351 return true;
31352 else
31353 break;
31354 default:
31355 break;
31356 }
31357 break;
31358 case PROCESSOR_POWER8:
31359 type = get_attr_type (insn);
31360
31361 switch (type)
31362 {
31363 case TYPE_MFCR:
31364 case TYPE_MTCR:
31365 case TYPE_ISYNC:
31366 case TYPE_SYNC:
31367 case TYPE_LOAD_L:
31368 case TYPE_STORE_C:
31369 return true;
31370 case TYPE_LOAD:
31371 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31372 && get_attr_update (insn) == UPDATE_YES)
31373 return true;
31374 else
31375 break;
31376 case TYPE_STORE:
31377 if (get_attr_update (insn) == UPDATE_YES
31378 && get_attr_indexed (insn) == INDEXED_YES)
31379 return true;
31380 else
31381 break;
31382 default:
31383 break;
31384 }
31385 break;
31386 default:
31387 break;
31388 }
31389
31390 return false;
31391 }
31392
31393 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31394 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31395
31396 static bool
31397 is_costly_group (rtx *group_insns, rtx next_insn)
31398 {
31399 int i;
31400 int issue_rate = rs6000_issue_rate ();
31401
31402 for (i = 0; i < issue_rate; i++)
31403 {
31404 sd_iterator_def sd_it;
31405 dep_t dep;
31406 rtx insn = group_insns[i];
31407
31408 if (!insn)
31409 continue;
31410
31411 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31412 {
31413 rtx next = DEP_CON (dep);
31414
31415 if (next == next_insn
31416 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31417 return true;
31418 }
31419 }
31420
31421 return false;
31422 }
31423
31424 /* Utility of the function redefine_groups.
31425 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31426 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31427 to keep it "far" (in a separate group) from GROUP_INSNS, following
31428 one of the following schemes, depending on the value of the flag
31429 -minsert_sched_nops = X:
31430 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31431 in order to force NEXT_INSN into a separate group.
31432 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31433 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31434 insertion (has a group just ended, how many vacant issue slots remain in the
31435 last group, and how many dispatch groups were encountered so far). */
31436
31437 static int
31438 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31439 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31440 int *group_count)
31441 {
31442 rtx nop;
31443 bool force;
31444 int issue_rate = rs6000_issue_rate ();
31445 bool end = *group_end;
31446 int i;
31447
31448 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31449 return can_issue_more;
31450
31451 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31452 return can_issue_more;
31453
31454 force = is_costly_group (group_insns, next_insn);
31455 if (!force)
31456 return can_issue_more;
31457
31458 if (sched_verbose > 6)
31459 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31460 *group_count ,can_issue_more);
31461
31462 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31463 {
31464 if (*group_end)
31465 can_issue_more = 0;
31466
31467 /* Since only a branch can be issued in the last issue_slot, it is
31468 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31469 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31470 in this case the last nop will start a new group and the branch
31471 will be forced to the new group. */
31472 if (can_issue_more && !is_branch_slot_insn (next_insn))
31473 can_issue_more--;
31474
31475 /* Do we have a special group ending nop? */
31476 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
31477 || rs6000_tune == PROCESSOR_POWER8)
31478 {
31479 nop = gen_group_ending_nop ();
31480 emit_insn_before (nop, next_insn);
31481 can_issue_more = 0;
31482 }
31483 else
31484 while (can_issue_more > 0)
31485 {
31486 nop = gen_nop ();
31487 emit_insn_before (nop, next_insn);
31488 can_issue_more--;
31489 }
31490
31491 *group_end = true;
31492 return 0;
31493 }
31494
31495 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31496 {
31497 int n_nops = rs6000_sched_insert_nops;
31498
31499 /* Nops can't be issued from the branch slot, so the effective
31500 issue_rate for nops is 'issue_rate - 1'. */
31501 if (can_issue_more == 0)
31502 can_issue_more = issue_rate;
31503 can_issue_more--;
31504 if (can_issue_more == 0)
31505 {
31506 can_issue_more = issue_rate - 1;
31507 (*group_count)++;
31508 end = true;
31509 for (i = 0; i < issue_rate; i++)
31510 {
31511 group_insns[i] = 0;
31512 }
31513 }
31514
31515 while (n_nops > 0)
31516 {
31517 nop = gen_nop ();
31518 emit_insn_before (nop, next_insn);
31519 if (can_issue_more == issue_rate - 1) /* new group begins */
31520 end = false;
31521 can_issue_more--;
31522 if (can_issue_more == 0)
31523 {
31524 can_issue_more = issue_rate - 1;
31525 (*group_count)++;
31526 end = true;
31527 for (i = 0; i < issue_rate; i++)
31528 {
31529 group_insns[i] = 0;
31530 }
31531 }
31532 n_nops--;
31533 }
31534
31535 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31536 can_issue_more++;
31537
31538 /* Is next_insn going to start a new group? */
31539 *group_end
31540 = (end
31541 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31542 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31543 || (can_issue_more < issue_rate &&
31544 insn_terminates_group_p (next_insn, previous_group)));
31545 if (*group_end && end)
31546 (*group_count)--;
31547
31548 if (sched_verbose > 6)
31549 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31550 *group_count, can_issue_more);
31551 return can_issue_more;
31552 }
31553
31554 return can_issue_more;
31555 }
31556
31557 /* This function tries to synch the dispatch groups that the compiler "sees"
31558 with the dispatch groups that the processor dispatcher is expected to
31559 form in practice. It tries to achieve this synchronization by forcing the
31560 estimated processor grouping on the compiler (as opposed to the function
31561 'pad_goups' which tries to force the scheduler's grouping on the processor).
31562
31563 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31564 examines the (estimated) dispatch groups that will be formed by the processor
31565 dispatcher. It marks these group boundaries to reflect the estimated
31566 processor grouping, overriding the grouping that the scheduler had marked.
31567 Depending on the value of the flag '-minsert-sched-nops' this function can
31568 force certain insns into separate groups or force a certain distance between
31569 them by inserting nops, for example, if there exists a "costly dependence"
31570 between the insns.
31571
31572 The function estimates the group boundaries that the processor will form as
31573 follows: It keeps track of how many vacant issue slots are available after
31574 each insn. A subsequent insn will start a new group if one of the following
31575 4 cases applies:
31576 - no more vacant issue slots remain in the current dispatch group.
31577 - only the last issue slot, which is the branch slot, is vacant, but the next
31578 insn is not a branch.
31579 - only the last 2 or less issue slots, including the branch slot, are vacant,
31580 which means that a cracked insn (which occupies two issue slots) can't be
31581 issued in this group.
31582 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31583 start a new group. */
31584
31585 static int
31586 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31587 rtx_insn *tail)
31588 {
31589 rtx_insn *insn, *next_insn;
31590 int issue_rate;
31591 int can_issue_more;
31592 int slot, i;
31593 bool group_end;
31594 int group_count = 0;
31595 rtx *group_insns;
31596
31597 /* Initialize. */
31598 issue_rate = rs6000_issue_rate ();
31599 group_insns = XALLOCAVEC (rtx, issue_rate);
31600 for (i = 0; i < issue_rate; i++)
31601 {
31602 group_insns[i] = 0;
31603 }
31604 can_issue_more = issue_rate;
31605 slot = 0;
31606 insn = get_next_active_insn (prev_head_insn, tail);
31607 group_end = false;
31608
31609 while (insn != NULL_RTX)
31610 {
31611 slot = (issue_rate - can_issue_more);
31612 group_insns[slot] = insn;
31613 can_issue_more =
31614 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31615 if (insn_terminates_group_p (insn, current_group))
31616 can_issue_more = 0;
31617
31618 next_insn = get_next_active_insn (insn, tail);
31619 if (next_insn == NULL_RTX)
31620 return group_count + 1;
31621
31622 /* Is next_insn going to start a new group? */
31623 group_end
31624 = (can_issue_more == 0
31625 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31626 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31627 || (can_issue_more < issue_rate &&
31628 insn_terminates_group_p (next_insn, previous_group)));
31629
31630 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
31631 next_insn, &group_end, can_issue_more,
31632 &group_count);
31633
31634 if (group_end)
31635 {
31636 group_count++;
31637 can_issue_more = 0;
31638 for (i = 0; i < issue_rate; i++)
31639 {
31640 group_insns[i] = 0;
31641 }
31642 }
31643
31644 if (GET_MODE (next_insn) == TImode && can_issue_more)
31645 PUT_MODE (next_insn, VOIDmode);
31646 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
31647 PUT_MODE (next_insn, TImode);
31648
31649 insn = next_insn;
31650 if (can_issue_more == 0)
31651 can_issue_more = issue_rate;
31652 } /* while */
31653
31654 return group_count;
31655 }
31656
31657 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
31658 dispatch group boundaries that the scheduler had marked. Pad with nops
31659 any dispatch groups which have vacant issue slots, in order to force the
31660 scheduler's grouping on the processor dispatcher. The function
31661 returns the number of dispatch groups found. */
31662
31663 static int
31664 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31665 rtx_insn *tail)
31666 {
31667 rtx_insn *insn, *next_insn;
31668 rtx nop;
31669 int issue_rate;
31670 int can_issue_more;
31671 int group_end;
31672 int group_count = 0;
31673
31674 /* Initialize issue_rate. */
31675 issue_rate = rs6000_issue_rate ();
31676 can_issue_more = issue_rate;
31677
31678 insn = get_next_active_insn (prev_head_insn, tail);
31679 next_insn = get_next_active_insn (insn, tail);
31680
31681 while (insn != NULL_RTX)
31682 {
31683 can_issue_more =
31684 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31685
31686 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
31687
31688 if (next_insn == NULL_RTX)
31689 break;
31690
31691 if (group_end)
31692 {
31693 /* If the scheduler had marked group termination at this location
31694 (between insn and next_insn), and neither insn nor next_insn will
31695 force group termination, pad the group with nops to force group
31696 termination. */
31697 if (can_issue_more
31698 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
31699 && !insn_terminates_group_p (insn, current_group)
31700 && !insn_terminates_group_p (next_insn, previous_group))
31701 {
31702 if (!is_branch_slot_insn (next_insn))
31703 can_issue_more--;
31704
31705 while (can_issue_more)
31706 {
31707 nop = gen_nop ();
31708 emit_insn_before (nop, next_insn);
31709 can_issue_more--;
31710 }
31711 }
31712
31713 can_issue_more = issue_rate;
31714 group_count++;
31715 }
31716
31717 insn = next_insn;
31718 next_insn = get_next_active_insn (insn, tail);
31719 }
31720
31721 return group_count;
31722 }
31723
31724 /* We're beginning a new block. Initialize data structures as necessary. */
31725
31726 static void
31727 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
31728 int sched_verbose ATTRIBUTE_UNUSED,
31729 int max_ready ATTRIBUTE_UNUSED)
31730 {
31731 last_scheduled_insn = NULL;
31732 load_store_pendulum = 0;
31733 divide_cnt = 0;
31734 vec_pairing = 0;
31735 }
31736
31737 /* The following function is called at the end of scheduling BB.
31738 After reload, it inserts nops at insn group bundling. */
31739
31740 static void
31741 rs6000_sched_finish (FILE *dump, int sched_verbose)
31742 {
31743 int n_groups;
31744
31745 if (sched_verbose)
31746 fprintf (dump, "=== Finishing schedule.\n");
31747
31748 if (reload_completed && rs6000_sched_groups)
31749 {
31750 /* Do not run sched_finish hook when selective scheduling enabled. */
31751 if (sel_sched_p ())
31752 return;
31753
31754 if (rs6000_sched_insert_nops == sched_finish_none)
31755 return;
31756
31757 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
31758 n_groups = pad_groups (dump, sched_verbose,
31759 current_sched_info->prev_head,
31760 current_sched_info->next_tail);
31761 else
31762 n_groups = redefine_groups (dump, sched_verbose,
31763 current_sched_info->prev_head,
31764 current_sched_info->next_tail);
31765
31766 if (sched_verbose >= 6)
31767 {
31768 fprintf (dump, "ngroups = %d\n", n_groups);
31769 print_rtl (dump, current_sched_info->prev_head);
31770 fprintf (dump, "Done finish_sched\n");
31771 }
31772 }
31773 }
31774
31775 struct rs6000_sched_context
31776 {
31777 short cached_can_issue_more;
31778 rtx_insn *last_scheduled_insn;
31779 int load_store_pendulum;
31780 int divide_cnt;
31781 int vec_pairing;
31782 };
31783
31784 typedef struct rs6000_sched_context rs6000_sched_context_def;
31785 typedef rs6000_sched_context_def *rs6000_sched_context_t;
31786
31787 /* Allocate store for new scheduling context. */
31788 static void *
31789 rs6000_alloc_sched_context (void)
31790 {
31791 return xmalloc (sizeof (rs6000_sched_context_def));
31792 }
31793
31794 /* If CLEAN_P is true then initializes _SC with clean data,
31795 and from the global context otherwise. */
31796 static void
31797 rs6000_init_sched_context (void *_sc, bool clean_p)
31798 {
31799 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
31800
31801 if (clean_p)
31802 {
31803 sc->cached_can_issue_more = 0;
31804 sc->last_scheduled_insn = NULL;
31805 sc->load_store_pendulum = 0;
31806 sc->divide_cnt = 0;
31807 sc->vec_pairing = 0;
31808 }
31809 else
31810 {
31811 sc->cached_can_issue_more = cached_can_issue_more;
31812 sc->last_scheduled_insn = last_scheduled_insn;
31813 sc->load_store_pendulum = load_store_pendulum;
31814 sc->divide_cnt = divide_cnt;
31815 sc->vec_pairing = vec_pairing;
31816 }
31817 }
31818
31819 /* Sets the global scheduling context to the one pointed to by _SC. */
31820 static void
31821 rs6000_set_sched_context (void *_sc)
31822 {
31823 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
31824
31825 gcc_assert (sc != NULL);
31826
31827 cached_can_issue_more = sc->cached_can_issue_more;
31828 last_scheduled_insn = sc->last_scheduled_insn;
31829 load_store_pendulum = sc->load_store_pendulum;
31830 divide_cnt = sc->divide_cnt;
31831 vec_pairing = sc->vec_pairing;
31832 }
31833
31834 /* Free _SC. */
31835 static void
31836 rs6000_free_sched_context (void *_sc)
31837 {
31838 gcc_assert (_sc != NULL);
31839
31840 free (_sc);
31841 }
31842
31843 static bool
31844 rs6000_sched_can_speculate_insn (rtx_insn *insn)
31845 {
31846 switch (get_attr_type (insn))
31847 {
31848 case TYPE_DIV:
31849 case TYPE_SDIV:
31850 case TYPE_DDIV:
31851 case TYPE_VECDIV:
31852 case TYPE_SSQRT:
31853 case TYPE_DSQRT:
31854 return false;
31855
31856 default:
31857 return true;
31858 }
31859 }
31860 \f
31861 /* Length in units of the trampoline for entering a nested function. */
31862
31863 int
31864 rs6000_trampoline_size (void)
31865 {
31866 int ret = 0;
31867
31868 switch (DEFAULT_ABI)
31869 {
31870 default:
31871 gcc_unreachable ();
31872
31873 case ABI_AIX:
31874 ret = (TARGET_32BIT) ? 12 : 24;
31875 break;
31876
31877 case ABI_ELFv2:
31878 gcc_assert (!TARGET_32BIT);
31879 ret = 32;
31880 break;
31881
31882 case ABI_DARWIN:
31883 case ABI_V4:
31884 ret = (TARGET_32BIT) ? 40 : 48;
31885 break;
31886 }
31887
31888 return ret;
31889 }
31890
31891 /* Emit RTL insns to initialize the variable parts of a trampoline.
31892 FNADDR is an RTX for the address of the function's pure code.
31893 CXT is an RTX for the static chain value for the function. */
31894
31895 static void
31896 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
31897 {
31898 int regsize = (TARGET_32BIT) ? 4 : 8;
31899 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
31900 rtx ctx_reg = force_reg (Pmode, cxt);
31901 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
31902
31903 switch (DEFAULT_ABI)
31904 {
31905 default:
31906 gcc_unreachable ();
31907
31908 /* Under AIX, just build the 3 word function descriptor */
31909 case ABI_AIX:
31910 {
31911 rtx fnmem, fn_reg, toc_reg;
31912
31913 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
31914 error ("you cannot take the address of a nested function if you use "
31915 "the %qs option", "-mno-pointers-to-nested-functions");
31916
31917 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
31918 fn_reg = gen_reg_rtx (Pmode);
31919 toc_reg = gen_reg_rtx (Pmode);
31920
31921 /* Macro to shorten the code expansions below. */
31922 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
31923
31924 m_tramp = replace_equiv_address (m_tramp, addr);
31925
31926 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
31927 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
31928 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
31929 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
31930 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
31931
31932 # undef MEM_PLUS
31933 }
31934 break;
31935
31936 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
31937 case ABI_ELFv2:
31938 case ABI_DARWIN:
31939 case ABI_V4:
31940 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
31941 LCT_NORMAL, VOIDmode,
31942 addr, Pmode,
31943 GEN_INT (rs6000_trampoline_size ()), SImode,
31944 fnaddr, Pmode,
31945 ctx_reg, Pmode);
31946 break;
31947 }
31948 }
31949
31950 \f
31951 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
31952 identifier as an argument, so the front end shouldn't look it up. */
31953
31954 static bool
31955 rs6000_attribute_takes_identifier_p (const_tree attr_id)
31956 {
31957 return is_attribute_p ("altivec", attr_id);
31958 }
31959
31960 /* Handle the "altivec" attribute. The attribute may have
31961 arguments as follows:
31962
31963 __attribute__((altivec(vector__)))
31964 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
31965 __attribute__((altivec(bool__))) (always followed by 'unsigned')
31966
31967 and may appear more than once (e.g., 'vector bool char') in a
31968 given declaration. */
31969
31970 static tree
31971 rs6000_handle_altivec_attribute (tree *node,
31972 tree name ATTRIBUTE_UNUSED,
31973 tree args,
31974 int flags ATTRIBUTE_UNUSED,
31975 bool *no_add_attrs)
31976 {
31977 tree type = *node, result = NULL_TREE;
31978 machine_mode mode;
31979 int unsigned_p;
31980 char altivec_type
31981 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
31982 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
31983 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
31984 : '?');
31985
31986 while (POINTER_TYPE_P (type)
31987 || TREE_CODE (type) == FUNCTION_TYPE
31988 || TREE_CODE (type) == METHOD_TYPE
31989 || TREE_CODE (type) == ARRAY_TYPE)
31990 type = TREE_TYPE (type);
31991
31992 mode = TYPE_MODE (type);
31993
31994 /* Check for invalid AltiVec type qualifiers. */
31995 if (type == long_double_type_node)
31996 error ("use of %<long double%> in AltiVec types is invalid");
31997 else if (type == boolean_type_node)
31998 error ("use of boolean types in AltiVec types is invalid");
31999 else if (TREE_CODE (type) == COMPLEX_TYPE)
32000 error ("use of %<complex%> in AltiVec types is invalid");
32001 else if (DECIMAL_FLOAT_MODE_P (mode))
32002 error ("use of decimal floating point types in AltiVec types is invalid");
32003 else if (!TARGET_VSX)
32004 {
32005 if (type == long_unsigned_type_node || type == long_integer_type_node)
32006 {
32007 if (TARGET_64BIT)
32008 error ("use of %<long%> in AltiVec types is invalid for "
32009 "64-bit code without %qs", "-mvsx");
32010 else if (rs6000_warn_altivec_long)
32011 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32012 "use %<int%>");
32013 }
32014 else if (type == long_long_unsigned_type_node
32015 || type == long_long_integer_type_node)
32016 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32017 "-mvsx");
32018 else if (type == double_type_node)
32019 error ("use of %<double%> in AltiVec types is invalid without %qs",
32020 "-mvsx");
32021 }
32022
32023 switch (altivec_type)
32024 {
32025 case 'v':
32026 unsigned_p = TYPE_UNSIGNED (type);
32027 switch (mode)
32028 {
32029 case E_TImode:
32030 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32031 break;
32032 case E_DImode:
32033 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32034 break;
32035 case E_SImode:
32036 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32037 break;
32038 case E_HImode:
32039 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32040 break;
32041 case E_QImode:
32042 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32043 break;
32044 case E_SFmode: result = V4SF_type_node; break;
32045 case E_DFmode: result = V2DF_type_node; break;
32046 /* If the user says 'vector int bool', we may be handed the 'bool'
32047 attribute _before_ the 'vector' attribute, and so select the
32048 proper type in the 'b' case below. */
32049 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32050 case E_V2DImode: case E_V2DFmode:
32051 result = type;
32052 default: break;
32053 }
32054 break;
32055 case 'b':
32056 switch (mode)
32057 {
32058 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32059 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32060 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32061 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32062 default: break;
32063 }
32064 break;
32065 case 'p':
32066 switch (mode)
32067 {
32068 case E_V8HImode: result = pixel_V8HI_type_node;
32069 default: break;
32070 }
32071 default: break;
32072 }
32073
32074 /* Propagate qualifiers attached to the element type
32075 onto the vector type. */
32076 if (result && result != type && TYPE_QUALS (type))
32077 result = build_qualified_type (result, TYPE_QUALS (type));
32078
32079 *no_add_attrs = true; /* No need to hang on to the attribute. */
32080
32081 if (result)
32082 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32083
32084 return NULL_TREE;
32085 }
32086
32087 /* AltiVec defines five built-in scalar types that serve as vector
32088 elements; we must teach the compiler how to mangle them. The 128-bit
32089 floating point mangling is target-specific as well. */
32090
32091 static const char *
32092 rs6000_mangle_type (const_tree type)
32093 {
32094 type = TYPE_MAIN_VARIANT (type);
32095
32096 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32097 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32098 return NULL;
32099
32100 if (type == bool_char_type_node) return "U6__boolc";
32101 if (type == bool_short_type_node) return "U6__bools";
32102 if (type == pixel_type_node) return "u7__pixel";
32103 if (type == bool_int_type_node) return "U6__booli";
32104 if (type == bool_long_long_type_node) return "U6__boolx";
32105
32106 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32107 return "g";
32108 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32109 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32110
32111 /* For all other types, use the default mangling. */
32112 return NULL;
32113 }
32114
32115 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32116 struct attribute_spec.handler. */
32117
32118 static tree
32119 rs6000_handle_longcall_attribute (tree *node, tree name,
32120 tree args ATTRIBUTE_UNUSED,
32121 int flags ATTRIBUTE_UNUSED,
32122 bool *no_add_attrs)
32123 {
32124 if (TREE_CODE (*node) != FUNCTION_TYPE
32125 && TREE_CODE (*node) != FIELD_DECL
32126 && TREE_CODE (*node) != TYPE_DECL)
32127 {
32128 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32129 name);
32130 *no_add_attrs = true;
32131 }
32132
32133 return NULL_TREE;
32134 }
32135
32136 /* Set longcall attributes on all functions declared when
32137 rs6000_default_long_calls is true. */
32138 static void
32139 rs6000_set_default_type_attributes (tree type)
32140 {
32141 if (rs6000_default_long_calls
32142 && (TREE_CODE (type) == FUNCTION_TYPE
32143 || TREE_CODE (type) == METHOD_TYPE))
32144 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32145 NULL_TREE,
32146 TYPE_ATTRIBUTES (type));
32147
32148 #if TARGET_MACHO
32149 darwin_set_default_type_attributes (type);
32150 #endif
32151 }
32152
32153 /* Return a reference suitable for calling a function with the
32154 longcall attribute. */
32155
32156 rtx
32157 rs6000_longcall_ref (rtx call_ref)
32158 {
32159 const char *call_name;
32160 tree node;
32161
32162 if (GET_CODE (call_ref) != SYMBOL_REF)
32163 return call_ref;
32164
32165 /* System V adds '.' to the internal name, so skip them. */
32166 call_name = XSTR (call_ref, 0);
32167 if (*call_name == '.')
32168 {
32169 while (*call_name == '.')
32170 call_name++;
32171
32172 node = get_identifier (call_name);
32173 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32174 }
32175
32176 return force_reg (Pmode, call_ref);
32177 }
32178 \f
32179 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32180 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32181 #endif
32182
32183 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32184 struct attribute_spec.handler. */
32185 static tree
32186 rs6000_handle_struct_attribute (tree *node, tree name,
32187 tree args ATTRIBUTE_UNUSED,
32188 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32189 {
32190 tree *type = NULL;
32191 if (DECL_P (*node))
32192 {
32193 if (TREE_CODE (*node) == TYPE_DECL)
32194 type = &TREE_TYPE (*node);
32195 }
32196 else
32197 type = node;
32198
32199 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32200 || TREE_CODE (*type) == UNION_TYPE)))
32201 {
32202 warning (OPT_Wattributes, "%qE attribute ignored", name);
32203 *no_add_attrs = true;
32204 }
32205
32206 else if ((is_attribute_p ("ms_struct", name)
32207 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32208 || ((is_attribute_p ("gcc_struct", name)
32209 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32210 {
32211 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32212 name);
32213 *no_add_attrs = true;
32214 }
32215
32216 return NULL_TREE;
32217 }
32218
32219 static bool
32220 rs6000_ms_bitfield_layout_p (const_tree record_type)
32221 {
32222 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32223 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32224 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32225 }
32226 \f
32227 #ifdef USING_ELFOS_H
32228
32229 /* A get_unnamed_section callback, used for switching to toc_section. */
32230
32231 static void
32232 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32233 {
32234 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32235 && TARGET_MINIMAL_TOC)
32236 {
32237 if (!toc_initialized)
32238 {
32239 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32240 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32241 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32242 fprintf (asm_out_file, "\t.tc ");
32243 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32244 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32245 fprintf (asm_out_file, "\n");
32246
32247 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32248 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32249 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32250 fprintf (asm_out_file, " = .+32768\n");
32251 toc_initialized = 1;
32252 }
32253 else
32254 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32255 }
32256 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32257 {
32258 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32259 if (!toc_initialized)
32260 {
32261 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32262 toc_initialized = 1;
32263 }
32264 }
32265 else
32266 {
32267 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32268 if (!toc_initialized)
32269 {
32270 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32271 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32272 fprintf (asm_out_file, " = .+32768\n");
32273 toc_initialized = 1;
32274 }
32275 }
32276 }
32277
32278 /* Implement TARGET_ASM_INIT_SECTIONS. */
32279
32280 static void
32281 rs6000_elf_asm_init_sections (void)
32282 {
32283 toc_section
32284 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32285
32286 sdata2_section
32287 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32288 SDATA2_SECTION_ASM_OP);
32289 }
32290
32291 /* Implement TARGET_SELECT_RTX_SECTION. */
32292
32293 static section *
32294 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32295 unsigned HOST_WIDE_INT align)
32296 {
32297 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32298 return toc_section;
32299 else
32300 return default_elf_select_rtx_section (mode, x, align);
32301 }
32302 \f
32303 /* For a SYMBOL_REF, set generic flags and then perform some
32304 target-specific processing.
32305
32306 When the AIX ABI is requested on a non-AIX system, replace the
32307 function name with the real name (with a leading .) rather than the
32308 function descriptor name. This saves a lot of overriding code to
32309 read the prefixes. */
32310
32311 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32312 static void
32313 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32314 {
32315 default_encode_section_info (decl, rtl, first);
32316
32317 if (first
32318 && TREE_CODE (decl) == FUNCTION_DECL
32319 && !TARGET_AIX
32320 && DEFAULT_ABI == ABI_AIX)
32321 {
32322 rtx sym_ref = XEXP (rtl, 0);
32323 size_t len = strlen (XSTR (sym_ref, 0));
32324 char *str = XALLOCAVEC (char, len + 2);
32325 str[0] = '.';
32326 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32327 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32328 }
32329 }
32330
32331 static inline bool
32332 compare_section_name (const char *section, const char *templ)
32333 {
32334 int len;
32335
32336 len = strlen (templ);
32337 return (strncmp (section, templ, len) == 0
32338 && (section[len] == 0 || section[len] == '.'));
32339 }
32340
32341 bool
32342 rs6000_elf_in_small_data_p (const_tree decl)
32343 {
32344 if (rs6000_sdata == SDATA_NONE)
32345 return false;
32346
32347 /* We want to merge strings, so we never consider them small data. */
32348 if (TREE_CODE (decl) == STRING_CST)
32349 return false;
32350
32351 /* Functions are never in the small data area. */
32352 if (TREE_CODE (decl) == FUNCTION_DECL)
32353 return false;
32354
32355 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32356 {
32357 const char *section = DECL_SECTION_NAME (decl);
32358 if (compare_section_name (section, ".sdata")
32359 || compare_section_name (section, ".sdata2")
32360 || compare_section_name (section, ".gnu.linkonce.s")
32361 || compare_section_name (section, ".sbss")
32362 || compare_section_name (section, ".sbss2")
32363 || compare_section_name (section, ".gnu.linkonce.sb")
32364 || strcmp (section, ".PPC.EMB.sdata0") == 0
32365 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32366 return true;
32367 }
32368 else
32369 {
32370 /* If we are told not to put readonly data in sdata, then don't. */
32371 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32372 && !rs6000_readonly_in_sdata)
32373 return false;
32374
32375 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32376
32377 if (size > 0
32378 && size <= g_switch_value
32379 /* If it's not public, and we're not going to reference it there,
32380 there's no need to put it in the small data section. */
32381 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32382 return true;
32383 }
32384
32385 return false;
32386 }
32387
32388 #endif /* USING_ELFOS_H */
32389 \f
32390 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32391
32392 static bool
32393 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32394 {
32395 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32396 }
32397
32398 /* Do not place thread-local symbols refs in the object blocks. */
32399
32400 static bool
32401 rs6000_use_blocks_for_decl_p (const_tree decl)
32402 {
32403 return !DECL_THREAD_LOCAL_P (decl);
32404 }
32405 \f
32406 /* Return a REG that occurs in ADDR with coefficient 1.
32407 ADDR can be effectively incremented by incrementing REG.
32408
32409 r0 is special and we must not select it as an address
32410 register by this routine since our caller will try to
32411 increment the returned register via an "la" instruction. */
32412
32413 rtx
32414 find_addr_reg (rtx addr)
32415 {
32416 while (GET_CODE (addr) == PLUS)
32417 {
32418 if (GET_CODE (XEXP (addr, 0)) == REG
32419 && REGNO (XEXP (addr, 0)) != 0)
32420 addr = XEXP (addr, 0);
32421 else if (GET_CODE (XEXP (addr, 1)) == REG
32422 && REGNO (XEXP (addr, 1)) != 0)
32423 addr = XEXP (addr, 1);
32424 else if (CONSTANT_P (XEXP (addr, 0)))
32425 addr = XEXP (addr, 1);
32426 else if (CONSTANT_P (XEXP (addr, 1)))
32427 addr = XEXP (addr, 0);
32428 else
32429 gcc_unreachable ();
32430 }
32431 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32432 return addr;
32433 }
32434
32435 void
32436 rs6000_fatal_bad_address (rtx op)
32437 {
32438 fatal_insn ("bad address", op);
32439 }
32440
32441 #if TARGET_MACHO
32442
32443 typedef struct branch_island_d {
32444 tree function_name;
32445 tree label_name;
32446 int line_number;
32447 } branch_island;
32448
32449
32450 static vec<branch_island, va_gc> *branch_islands;
32451
32452 /* Remember to generate a branch island for far calls to the given
32453 function. */
32454
32455 static void
32456 add_compiler_branch_island (tree label_name, tree function_name,
32457 int line_number)
32458 {
32459 branch_island bi = {function_name, label_name, line_number};
32460 vec_safe_push (branch_islands, bi);
32461 }
32462
32463 /* Generate far-jump branch islands for everything recorded in
32464 branch_islands. Invoked immediately after the last instruction of
32465 the epilogue has been emitted; the branch islands must be appended
32466 to, and contiguous with, the function body. Mach-O stubs are
32467 generated in machopic_output_stub(). */
32468
32469 static void
32470 macho_branch_islands (void)
32471 {
32472 char tmp_buf[512];
32473
32474 while (!vec_safe_is_empty (branch_islands))
32475 {
32476 branch_island *bi = &branch_islands->last ();
32477 const char *label = IDENTIFIER_POINTER (bi->label_name);
32478 const char *name = IDENTIFIER_POINTER (bi->function_name);
32479 char name_buf[512];
32480 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32481 if (name[0] == '*' || name[0] == '&')
32482 strcpy (name_buf, name+1);
32483 else
32484 {
32485 name_buf[0] = '_';
32486 strcpy (name_buf+1, name);
32487 }
32488 strcpy (tmp_buf, "\n");
32489 strcat (tmp_buf, label);
32490 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32491 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32492 dbxout_stabd (N_SLINE, bi->line_number);
32493 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32494 if (flag_pic)
32495 {
32496 if (TARGET_LINK_STACK)
32497 {
32498 char name[32];
32499 get_ppc476_thunk_name (name);
32500 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32501 strcat (tmp_buf, name);
32502 strcat (tmp_buf, "\n");
32503 strcat (tmp_buf, label);
32504 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32505 }
32506 else
32507 {
32508 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32509 strcat (tmp_buf, label);
32510 strcat (tmp_buf, "_pic\n");
32511 strcat (tmp_buf, label);
32512 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32513 }
32514
32515 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32516 strcat (tmp_buf, name_buf);
32517 strcat (tmp_buf, " - ");
32518 strcat (tmp_buf, label);
32519 strcat (tmp_buf, "_pic)\n");
32520
32521 strcat (tmp_buf, "\tmtlr r0\n");
32522
32523 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32524 strcat (tmp_buf, name_buf);
32525 strcat (tmp_buf, " - ");
32526 strcat (tmp_buf, label);
32527 strcat (tmp_buf, "_pic)\n");
32528
32529 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32530 }
32531 else
32532 {
32533 strcat (tmp_buf, ":\nlis r12,hi16(");
32534 strcat (tmp_buf, name_buf);
32535 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32536 strcat (tmp_buf, name_buf);
32537 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32538 }
32539 output_asm_insn (tmp_buf, 0);
32540 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32541 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32542 dbxout_stabd (N_SLINE, bi->line_number);
32543 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32544 branch_islands->pop ();
32545 }
32546 }
32547
32548 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32549 already there or not. */
32550
32551 static int
32552 no_previous_def (tree function_name)
32553 {
32554 branch_island *bi;
32555 unsigned ix;
32556
32557 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32558 if (function_name == bi->function_name)
32559 return 0;
32560 return 1;
32561 }
32562
32563 /* GET_PREV_LABEL gets the label name from the previous definition of
32564 the function. */
32565
32566 static tree
32567 get_prev_label (tree function_name)
32568 {
32569 branch_island *bi;
32570 unsigned ix;
32571
32572 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32573 if (function_name == bi->function_name)
32574 return bi->label_name;
32575 return NULL_TREE;
32576 }
32577
32578 /* INSN is either a function call or a millicode call. It may have an
32579 unconditional jump in its delay slot.
32580
32581 CALL_DEST is the routine we are calling. */
32582
32583 char *
32584 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
32585 int cookie_operand_number)
32586 {
32587 static char buf[256];
32588 if (darwin_emit_branch_islands
32589 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
32590 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
32591 {
32592 tree labelname;
32593 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
32594
32595 if (no_previous_def (funname))
32596 {
32597 rtx label_rtx = gen_label_rtx ();
32598 char *label_buf, temp_buf[256];
32599 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32600 CODE_LABEL_NUMBER (label_rtx));
32601 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32602 labelname = get_identifier (label_buf);
32603 add_compiler_branch_island (labelname, funname, insn_line (insn));
32604 }
32605 else
32606 labelname = get_prev_label (funname);
32607
32608 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32609 instruction will reach 'foo', otherwise link as 'bl L42'".
32610 "L42" should be a 'branch island', that will do a far jump to
32611 'foo'. Branch islands are generated in
32612 macho_branch_islands(). */
32613 sprintf (buf, "jbsr %%z%d,%.246s",
32614 dest_operand_number, IDENTIFIER_POINTER (labelname));
32615 }
32616 else
32617 sprintf (buf, "bl %%z%d", dest_operand_number);
32618 return buf;
32619 }
32620
32621 /* Generate PIC and indirect symbol stubs. */
32622
32623 void
32624 machopic_output_stub (FILE *file, const char *symb, const char *stub)
32625 {
32626 unsigned int length;
32627 char *symbol_name, *lazy_ptr_name;
32628 char *local_label_0;
32629 static int label = 0;
32630
32631 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32632 symb = (*targetm.strip_name_encoding) (symb);
32633
32634
32635 length = strlen (symb);
32636 symbol_name = XALLOCAVEC (char, length + 32);
32637 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
32638
32639 lazy_ptr_name = XALLOCAVEC (char, length + 32);
32640 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
32641
32642 if (flag_pic == 2)
32643 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
32644 else
32645 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
32646
32647 if (flag_pic == 2)
32648 {
32649 fprintf (file, "\t.align 5\n");
32650
32651 fprintf (file, "%s:\n", stub);
32652 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32653
32654 label++;
32655 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
32656 sprintf (local_label_0, "\"L%011d$spb\"", label);
32657
32658 fprintf (file, "\tmflr r0\n");
32659 if (TARGET_LINK_STACK)
32660 {
32661 char name[32];
32662 get_ppc476_thunk_name (name);
32663 fprintf (file, "\tbl %s\n", name);
32664 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32665 }
32666 else
32667 {
32668 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
32669 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32670 }
32671 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
32672 lazy_ptr_name, local_label_0);
32673 fprintf (file, "\tmtlr r0\n");
32674 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
32675 (TARGET_64BIT ? "ldu" : "lwzu"),
32676 lazy_ptr_name, local_label_0);
32677 fprintf (file, "\tmtctr r12\n");
32678 fprintf (file, "\tbctr\n");
32679 }
32680 else
32681 {
32682 fprintf (file, "\t.align 4\n");
32683
32684 fprintf (file, "%s:\n", stub);
32685 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32686
32687 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
32688 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
32689 (TARGET_64BIT ? "ldu" : "lwzu"),
32690 lazy_ptr_name);
32691 fprintf (file, "\tmtctr r12\n");
32692 fprintf (file, "\tbctr\n");
32693 }
32694
32695 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
32696 fprintf (file, "%s:\n", lazy_ptr_name);
32697 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32698 fprintf (file, "%sdyld_stub_binding_helper\n",
32699 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
32700 }
32701
32702 /* Legitimize PIC addresses. If the address is already
32703 position-independent, we return ORIG. Newly generated
32704 position-independent addresses go into a reg. This is REG if non
32705 zero, otherwise we allocate register(s) as necessary. */
32706
32707 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
32708
32709 rtx
32710 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
32711 rtx reg)
32712 {
32713 rtx base, offset;
32714
32715 if (reg == NULL && !reload_completed)
32716 reg = gen_reg_rtx (Pmode);
32717
32718 if (GET_CODE (orig) == CONST)
32719 {
32720 rtx reg_temp;
32721
32722 if (GET_CODE (XEXP (orig, 0)) == PLUS
32723 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
32724 return orig;
32725
32726 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
32727
32728 /* Use a different reg for the intermediate value, as
32729 it will be marked UNCHANGING. */
32730 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
32731 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
32732 Pmode, reg_temp);
32733 offset =
32734 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
32735 Pmode, reg);
32736
32737 if (GET_CODE (offset) == CONST_INT)
32738 {
32739 if (SMALL_INT (offset))
32740 return plus_constant (Pmode, base, INTVAL (offset));
32741 else if (!reload_completed)
32742 offset = force_reg (Pmode, offset);
32743 else
32744 {
32745 rtx mem = force_const_mem (Pmode, orig);
32746 return machopic_legitimize_pic_address (mem, Pmode, reg);
32747 }
32748 }
32749 return gen_rtx_PLUS (Pmode, base, offset);
32750 }
32751
32752 /* Fall back on generic machopic code. */
32753 return machopic_legitimize_pic_address (orig, mode, reg);
32754 }
32755
32756 /* Output a .machine directive for the Darwin assembler, and call
32757 the generic start_file routine. */
32758
32759 static void
32760 rs6000_darwin_file_start (void)
32761 {
32762 static const struct
32763 {
32764 const char *arg;
32765 const char *name;
32766 HOST_WIDE_INT if_set;
32767 } mapping[] = {
32768 { "ppc64", "ppc64", MASK_64BIT },
32769 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
32770 { "power4", "ppc970", 0 },
32771 { "G5", "ppc970", 0 },
32772 { "7450", "ppc7450", 0 },
32773 { "7400", "ppc7400", MASK_ALTIVEC },
32774 { "G4", "ppc7400", 0 },
32775 { "750", "ppc750", 0 },
32776 { "740", "ppc750", 0 },
32777 { "G3", "ppc750", 0 },
32778 { "604e", "ppc604e", 0 },
32779 { "604", "ppc604", 0 },
32780 { "603e", "ppc603", 0 },
32781 { "603", "ppc603", 0 },
32782 { "601", "ppc601", 0 },
32783 { NULL, "ppc", 0 } };
32784 const char *cpu_id = "";
32785 size_t i;
32786
32787 rs6000_file_start ();
32788 darwin_file_start ();
32789
32790 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
32791
32792 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
32793 cpu_id = rs6000_default_cpu;
32794
32795 if (global_options_set.x_rs6000_cpu_index)
32796 cpu_id = processor_target_table[rs6000_cpu_index].name;
32797
32798 /* Look through the mapping array. Pick the first name that either
32799 matches the argument, has a bit set in IF_SET that is also set
32800 in the target flags, or has a NULL name. */
32801
32802 i = 0;
32803 while (mapping[i].arg != NULL
32804 && strcmp (mapping[i].arg, cpu_id) != 0
32805 && (mapping[i].if_set & rs6000_isa_flags) == 0)
32806 i++;
32807
32808 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
32809 }
32810
32811 #endif /* TARGET_MACHO */
32812
32813 #if TARGET_ELF
32814 static int
32815 rs6000_elf_reloc_rw_mask (void)
32816 {
32817 if (flag_pic)
32818 return 3;
32819 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32820 return 2;
32821 else
32822 return 0;
32823 }
32824
32825 /* Record an element in the table of global constructors. SYMBOL is
32826 a SYMBOL_REF of the function to be called; PRIORITY is a number
32827 between 0 and MAX_INIT_PRIORITY.
32828
32829 This differs from default_named_section_asm_out_constructor in
32830 that we have special handling for -mrelocatable. */
32831
32832 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
32833 static void
32834 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
32835 {
32836 const char *section = ".ctors";
32837 char buf[18];
32838
32839 if (priority != DEFAULT_INIT_PRIORITY)
32840 {
32841 sprintf (buf, ".ctors.%.5u",
32842 /* Invert the numbering so the linker puts us in the proper
32843 order; constructors are run from right to left, and the
32844 linker sorts in increasing order. */
32845 MAX_INIT_PRIORITY - priority);
32846 section = buf;
32847 }
32848
32849 switch_to_section (get_section (section, SECTION_WRITE, NULL));
32850 assemble_align (POINTER_SIZE);
32851
32852 if (DEFAULT_ABI == ABI_V4
32853 && (TARGET_RELOCATABLE || flag_pic > 1))
32854 {
32855 fputs ("\t.long (", asm_out_file);
32856 output_addr_const (asm_out_file, symbol);
32857 fputs (")@fixup\n", asm_out_file);
32858 }
32859 else
32860 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
32861 }
32862
32863 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
32864 static void
32865 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
32866 {
32867 const char *section = ".dtors";
32868 char buf[18];
32869
32870 if (priority != DEFAULT_INIT_PRIORITY)
32871 {
32872 sprintf (buf, ".dtors.%.5u",
32873 /* Invert the numbering so the linker puts us in the proper
32874 order; constructors are run from right to left, and the
32875 linker sorts in increasing order. */
32876 MAX_INIT_PRIORITY - priority);
32877 section = buf;
32878 }
32879
32880 switch_to_section (get_section (section, SECTION_WRITE, NULL));
32881 assemble_align (POINTER_SIZE);
32882
32883 if (DEFAULT_ABI == ABI_V4
32884 && (TARGET_RELOCATABLE || flag_pic > 1))
32885 {
32886 fputs ("\t.long (", asm_out_file);
32887 output_addr_const (asm_out_file, symbol);
32888 fputs (")@fixup\n", asm_out_file);
32889 }
32890 else
32891 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
32892 }
32893
32894 void
32895 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
32896 {
32897 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
32898 {
32899 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
32900 ASM_OUTPUT_LABEL (file, name);
32901 fputs (DOUBLE_INT_ASM_OP, file);
32902 rs6000_output_function_entry (file, name);
32903 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
32904 if (DOT_SYMBOLS)
32905 {
32906 fputs ("\t.size\t", file);
32907 assemble_name (file, name);
32908 fputs (",24\n\t.type\t.", file);
32909 assemble_name (file, name);
32910 fputs (",@function\n", file);
32911 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
32912 {
32913 fputs ("\t.globl\t.", file);
32914 assemble_name (file, name);
32915 putc ('\n', file);
32916 }
32917 }
32918 else
32919 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
32920 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
32921 rs6000_output_function_entry (file, name);
32922 fputs (":\n", file);
32923 return;
32924 }
32925
32926 int uses_toc;
32927 if (DEFAULT_ABI == ABI_V4
32928 && (TARGET_RELOCATABLE || flag_pic > 1)
32929 && !TARGET_SECURE_PLT
32930 && (!constant_pool_empty_p () || crtl->profile)
32931 && (uses_toc = uses_TOC ()))
32932 {
32933 char buf[256];
32934
32935 if (uses_toc == 2)
32936 switch_to_other_text_partition ();
32937 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
32938
32939 fprintf (file, "\t.long ");
32940 assemble_name (file, toc_label_name);
32941 need_toc_init = 1;
32942 putc ('-', file);
32943 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
32944 assemble_name (file, buf);
32945 putc ('\n', file);
32946 if (uses_toc == 2)
32947 switch_to_other_text_partition ();
32948 }
32949
32950 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
32951 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
32952
32953 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
32954 {
32955 char buf[256];
32956
32957 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
32958
32959 fprintf (file, "\t.quad .TOC.-");
32960 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
32961 assemble_name (file, buf);
32962 putc ('\n', file);
32963 }
32964
32965 if (DEFAULT_ABI == ABI_AIX)
32966 {
32967 const char *desc_name, *orig_name;
32968
32969 orig_name = (*targetm.strip_name_encoding) (name);
32970 desc_name = orig_name;
32971 while (*desc_name == '.')
32972 desc_name++;
32973
32974 if (TREE_PUBLIC (decl))
32975 fprintf (file, "\t.globl %s\n", desc_name);
32976
32977 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32978 fprintf (file, "%s:\n", desc_name);
32979 fprintf (file, "\t.long %s\n", orig_name);
32980 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
32981 fputs ("\t.long 0\n", file);
32982 fprintf (file, "\t.previous\n");
32983 }
32984 ASM_OUTPUT_LABEL (file, name);
32985 }
32986
32987 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
32988 static void
32989 rs6000_elf_file_end (void)
32990 {
32991 #ifdef HAVE_AS_GNU_ATTRIBUTE
32992 /* ??? The value emitted depends on options active at file end.
32993 Assume anyone using #pragma or attributes that might change
32994 options knows what they are doing. */
32995 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
32996 && rs6000_passes_float)
32997 {
32998 int fp;
32999
33000 if (TARGET_HARD_FLOAT)
33001 fp = 1;
33002 else
33003 fp = 2;
33004 if (rs6000_passes_long_double)
33005 {
33006 if (!TARGET_LONG_DOUBLE_128)
33007 fp |= 2 * 4;
33008 else if (TARGET_IEEEQUAD)
33009 fp |= 3 * 4;
33010 else
33011 fp |= 1 * 4;
33012 }
33013 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33014 }
33015 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33016 {
33017 if (rs6000_passes_vector)
33018 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33019 (TARGET_ALTIVEC_ABI ? 2 : 1));
33020 if (rs6000_returns_struct)
33021 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33022 aix_struct_return ? 2 : 1);
33023 }
33024 #endif
33025 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33026 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33027 file_end_indicate_exec_stack ();
33028 #endif
33029
33030 if (flag_split_stack)
33031 file_end_indicate_split_stack ();
33032
33033 if (cpu_builtin_p)
33034 {
33035 /* We have expanded a CPU builtin, so we need to emit a reference to
33036 the special symbol that LIBC uses to declare it supports the
33037 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33038 switch_to_section (data_section);
33039 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33040 fprintf (asm_out_file, "\t%s %s\n",
33041 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33042 }
33043 }
33044 #endif
33045
33046 #if TARGET_XCOFF
33047
33048 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33049 #define HAVE_XCOFF_DWARF_EXTRAS 0
33050 #endif
33051
33052 static enum unwind_info_type
33053 rs6000_xcoff_debug_unwind_info (void)
33054 {
33055 return UI_NONE;
33056 }
33057
33058 static void
33059 rs6000_xcoff_asm_output_anchor (rtx symbol)
33060 {
33061 char buffer[100];
33062
33063 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33064 SYMBOL_REF_BLOCK_OFFSET (symbol));
33065 fprintf (asm_out_file, "%s", SET_ASM_OP);
33066 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33067 fprintf (asm_out_file, ",");
33068 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33069 fprintf (asm_out_file, "\n");
33070 }
33071
33072 static void
33073 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33074 {
33075 fputs (GLOBAL_ASM_OP, stream);
33076 RS6000_OUTPUT_BASENAME (stream, name);
33077 putc ('\n', stream);
33078 }
33079
33080 /* A get_unnamed_decl callback, used for read-only sections. PTR
33081 points to the section string variable. */
33082
33083 static void
33084 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33085 {
33086 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33087 *(const char *const *) directive,
33088 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33089 }
33090
33091 /* Likewise for read-write sections. */
33092
33093 static void
33094 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33095 {
33096 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33097 *(const char *const *) directive,
33098 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33099 }
33100
33101 static void
33102 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33103 {
33104 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33105 *(const char *const *) directive,
33106 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33107 }
33108
33109 /* A get_unnamed_section callback, used for switching to toc_section. */
33110
33111 static void
33112 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33113 {
33114 if (TARGET_MINIMAL_TOC)
33115 {
33116 /* toc_section is always selected at least once from
33117 rs6000_xcoff_file_start, so this is guaranteed to
33118 always be defined once and only once in each file. */
33119 if (!toc_initialized)
33120 {
33121 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33122 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33123 toc_initialized = 1;
33124 }
33125 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33126 (TARGET_32BIT ? "" : ",3"));
33127 }
33128 else
33129 fputs ("\t.toc\n", asm_out_file);
33130 }
33131
33132 /* Implement TARGET_ASM_INIT_SECTIONS. */
33133
33134 static void
33135 rs6000_xcoff_asm_init_sections (void)
33136 {
33137 read_only_data_section
33138 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33139 &xcoff_read_only_section_name);
33140
33141 private_data_section
33142 = get_unnamed_section (SECTION_WRITE,
33143 rs6000_xcoff_output_readwrite_section_asm_op,
33144 &xcoff_private_data_section_name);
33145
33146 tls_data_section
33147 = get_unnamed_section (SECTION_TLS,
33148 rs6000_xcoff_output_tls_section_asm_op,
33149 &xcoff_tls_data_section_name);
33150
33151 tls_private_data_section
33152 = get_unnamed_section (SECTION_TLS,
33153 rs6000_xcoff_output_tls_section_asm_op,
33154 &xcoff_private_data_section_name);
33155
33156 read_only_private_data_section
33157 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33158 &xcoff_private_data_section_name);
33159
33160 toc_section
33161 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33162
33163 readonly_data_section = read_only_data_section;
33164 }
33165
33166 static int
33167 rs6000_xcoff_reloc_rw_mask (void)
33168 {
33169 return 3;
33170 }
33171
33172 static void
33173 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33174 tree decl ATTRIBUTE_UNUSED)
33175 {
33176 int smclass;
33177 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33178
33179 if (flags & SECTION_EXCLUDE)
33180 smclass = 4;
33181 else if (flags & SECTION_DEBUG)
33182 {
33183 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33184 return;
33185 }
33186 else if (flags & SECTION_CODE)
33187 smclass = 0;
33188 else if (flags & SECTION_TLS)
33189 smclass = 3;
33190 else if (flags & SECTION_WRITE)
33191 smclass = 2;
33192 else
33193 smclass = 1;
33194
33195 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33196 (flags & SECTION_CODE) ? "." : "",
33197 name, suffix[smclass], flags & SECTION_ENTSIZE);
33198 }
33199
33200 #define IN_NAMED_SECTION(DECL) \
33201 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33202 && DECL_SECTION_NAME (DECL) != NULL)
33203
33204 static section *
33205 rs6000_xcoff_select_section (tree decl, int reloc,
33206 unsigned HOST_WIDE_INT align)
33207 {
33208 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33209 named section. */
33210 if (align > BIGGEST_ALIGNMENT)
33211 {
33212 resolve_unique_section (decl, reloc, true);
33213 if (IN_NAMED_SECTION (decl))
33214 return get_named_section (decl, NULL, reloc);
33215 }
33216
33217 if (decl_readonly_section (decl, reloc))
33218 {
33219 if (TREE_PUBLIC (decl))
33220 return read_only_data_section;
33221 else
33222 return read_only_private_data_section;
33223 }
33224 else
33225 {
33226 #if HAVE_AS_TLS
33227 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33228 {
33229 if (TREE_PUBLIC (decl))
33230 return tls_data_section;
33231 else if (bss_initializer_p (decl))
33232 {
33233 /* Convert to COMMON to emit in BSS. */
33234 DECL_COMMON (decl) = 1;
33235 return tls_comm_section;
33236 }
33237 else
33238 return tls_private_data_section;
33239 }
33240 else
33241 #endif
33242 if (TREE_PUBLIC (decl))
33243 return data_section;
33244 else
33245 return private_data_section;
33246 }
33247 }
33248
33249 static void
33250 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33251 {
33252 const char *name;
33253
33254 /* Use select_section for private data and uninitialized data with
33255 alignment <= BIGGEST_ALIGNMENT. */
33256 if (!TREE_PUBLIC (decl)
33257 || DECL_COMMON (decl)
33258 || (DECL_INITIAL (decl) == NULL_TREE
33259 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33260 || DECL_INITIAL (decl) == error_mark_node
33261 || (flag_zero_initialized_in_bss
33262 && initializer_zerop (DECL_INITIAL (decl))))
33263 return;
33264
33265 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33266 name = (*targetm.strip_name_encoding) (name);
33267 set_decl_section_name (decl, name);
33268 }
33269
33270 /* Select section for constant in constant pool.
33271
33272 On RS/6000, all constants are in the private read-only data area.
33273 However, if this is being placed in the TOC it must be output as a
33274 toc entry. */
33275
33276 static section *
33277 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33278 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33279 {
33280 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33281 return toc_section;
33282 else
33283 return read_only_private_data_section;
33284 }
33285
33286 /* Remove any trailing [DS] or the like from the symbol name. */
33287
33288 static const char *
33289 rs6000_xcoff_strip_name_encoding (const char *name)
33290 {
33291 size_t len;
33292 if (*name == '*')
33293 name++;
33294 len = strlen (name);
33295 if (name[len - 1] == ']')
33296 return ggc_alloc_string (name, len - 4);
33297 else
33298 return name;
33299 }
33300
33301 /* Section attributes. AIX is always PIC. */
33302
33303 static unsigned int
33304 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33305 {
33306 unsigned int align;
33307 unsigned int flags = default_section_type_flags (decl, name, reloc);
33308
33309 /* Align to at least UNIT size. */
33310 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33311 align = MIN_UNITS_PER_WORD;
33312 else
33313 /* Increase alignment of large objects if not already stricter. */
33314 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33315 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33316 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33317
33318 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33319 }
33320
33321 /* Output at beginning of assembler file.
33322
33323 Initialize the section names for the RS/6000 at this point.
33324
33325 Specify filename, including full path, to assembler.
33326
33327 We want to go into the TOC section so at least one .toc will be emitted.
33328 Also, in order to output proper .bs/.es pairs, we need at least one static
33329 [RW] section emitted.
33330
33331 Finally, declare mcount when profiling to make the assembler happy. */
33332
33333 static void
33334 rs6000_xcoff_file_start (void)
33335 {
33336 rs6000_gen_section_name (&xcoff_bss_section_name,
33337 main_input_filename, ".bss_");
33338 rs6000_gen_section_name (&xcoff_private_data_section_name,
33339 main_input_filename, ".rw_");
33340 rs6000_gen_section_name (&xcoff_read_only_section_name,
33341 main_input_filename, ".ro_");
33342 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33343 main_input_filename, ".tls_");
33344 rs6000_gen_section_name (&xcoff_tbss_section_name,
33345 main_input_filename, ".tbss_[UL]");
33346
33347 fputs ("\t.file\t", asm_out_file);
33348 output_quoted_string (asm_out_file, main_input_filename);
33349 fputc ('\n', asm_out_file);
33350 if (write_symbols != NO_DEBUG)
33351 switch_to_section (private_data_section);
33352 switch_to_section (toc_section);
33353 switch_to_section (text_section);
33354 if (profile_flag)
33355 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33356 rs6000_file_start ();
33357 }
33358
33359 /* Output at end of assembler file.
33360 On the RS/6000, referencing data should automatically pull in text. */
33361
33362 static void
33363 rs6000_xcoff_file_end (void)
33364 {
33365 switch_to_section (text_section);
33366 fputs ("_section_.text:\n", asm_out_file);
33367 switch_to_section (data_section);
33368 fputs (TARGET_32BIT
33369 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33370 asm_out_file);
33371 }
33372
33373 struct declare_alias_data
33374 {
33375 FILE *file;
33376 bool function_descriptor;
33377 };
33378
33379 /* Declare alias N. A helper function for for_node_and_aliases. */
33380
33381 static bool
33382 rs6000_declare_alias (struct symtab_node *n, void *d)
33383 {
33384 struct declare_alias_data *data = (struct declare_alias_data *)d;
33385 /* Main symbol is output specially, because varasm machinery does part of
33386 the job for us - we do not need to declare .globl/lglobs and such. */
33387 if (!n->alias || n->weakref)
33388 return false;
33389
33390 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33391 return false;
33392
33393 /* Prevent assemble_alias from trying to use .set pseudo operation
33394 that does not behave as expected by the middle-end. */
33395 TREE_ASM_WRITTEN (n->decl) = true;
33396
33397 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33398 char *buffer = (char *) alloca (strlen (name) + 2);
33399 char *p;
33400 int dollar_inside = 0;
33401
33402 strcpy (buffer, name);
33403 p = strchr (buffer, '$');
33404 while (p) {
33405 *p = '_';
33406 dollar_inside++;
33407 p = strchr (p + 1, '$');
33408 }
33409 if (TREE_PUBLIC (n->decl))
33410 {
33411 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33412 {
33413 if (dollar_inside) {
33414 if (data->function_descriptor)
33415 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33416 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33417 }
33418 if (data->function_descriptor)
33419 {
33420 fputs ("\t.globl .", data->file);
33421 RS6000_OUTPUT_BASENAME (data->file, buffer);
33422 putc ('\n', data->file);
33423 }
33424 fputs ("\t.globl ", data->file);
33425 RS6000_OUTPUT_BASENAME (data->file, buffer);
33426 putc ('\n', data->file);
33427 }
33428 #ifdef ASM_WEAKEN_DECL
33429 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33430 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33431 #endif
33432 }
33433 else
33434 {
33435 if (dollar_inside)
33436 {
33437 if (data->function_descriptor)
33438 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33439 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33440 }
33441 if (data->function_descriptor)
33442 {
33443 fputs ("\t.lglobl .", data->file);
33444 RS6000_OUTPUT_BASENAME (data->file, buffer);
33445 putc ('\n', data->file);
33446 }
33447 fputs ("\t.lglobl ", data->file);
33448 RS6000_OUTPUT_BASENAME (data->file, buffer);
33449 putc ('\n', data->file);
33450 }
33451 if (data->function_descriptor)
33452 fputs (".", data->file);
33453 RS6000_OUTPUT_BASENAME (data->file, buffer);
33454 fputs (":\n", data->file);
33455 return false;
33456 }
33457
33458
33459 #ifdef HAVE_GAS_HIDDEN
33460 /* Helper function to calculate visibility of a DECL
33461 and return the value as a const string. */
33462
33463 static const char *
33464 rs6000_xcoff_visibility (tree decl)
33465 {
33466 static const char * const visibility_types[] = {
33467 "", ",protected", ",hidden", ",internal"
33468 };
33469
33470 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33471 return visibility_types[vis];
33472 }
33473 #endif
33474
33475
33476 /* This macro produces the initial definition of a function name.
33477 On the RS/6000, we need to place an extra '.' in the function name and
33478 output the function descriptor.
33479 Dollar signs are converted to underscores.
33480
33481 The csect for the function will have already been created when
33482 text_section was selected. We do have to go back to that csect, however.
33483
33484 The third and fourth parameters to the .function pseudo-op (16 and 044)
33485 are placeholders which no longer have any use.
33486
33487 Because AIX assembler's .set command has unexpected semantics, we output
33488 all aliases as alternative labels in front of the definition. */
33489
33490 void
33491 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33492 {
33493 char *buffer = (char *) alloca (strlen (name) + 1);
33494 char *p;
33495 int dollar_inside = 0;
33496 struct declare_alias_data data = {file, false};
33497
33498 strcpy (buffer, name);
33499 p = strchr (buffer, '$');
33500 while (p) {
33501 *p = '_';
33502 dollar_inside++;
33503 p = strchr (p + 1, '$');
33504 }
33505 if (TREE_PUBLIC (decl))
33506 {
33507 if (!RS6000_WEAK || !DECL_WEAK (decl))
33508 {
33509 if (dollar_inside) {
33510 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33511 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33512 }
33513 fputs ("\t.globl .", file);
33514 RS6000_OUTPUT_BASENAME (file, buffer);
33515 #ifdef HAVE_GAS_HIDDEN
33516 fputs (rs6000_xcoff_visibility (decl), file);
33517 #endif
33518 putc ('\n', file);
33519 }
33520 }
33521 else
33522 {
33523 if (dollar_inside) {
33524 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33525 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33526 }
33527 fputs ("\t.lglobl .", file);
33528 RS6000_OUTPUT_BASENAME (file, buffer);
33529 putc ('\n', file);
33530 }
33531 fputs ("\t.csect ", file);
33532 RS6000_OUTPUT_BASENAME (file, buffer);
33533 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33534 RS6000_OUTPUT_BASENAME (file, buffer);
33535 fputs (":\n", file);
33536 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33537 &data, true);
33538 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33539 RS6000_OUTPUT_BASENAME (file, buffer);
33540 fputs (", TOC[tc0], 0\n", file);
33541 in_section = NULL;
33542 switch_to_section (function_section (decl));
33543 putc ('.', file);
33544 RS6000_OUTPUT_BASENAME (file, buffer);
33545 fputs (":\n", file);
33546 data.function_descriptor = true;
33547 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33548 &data, true);
33549 if (!DECL_IGNORED_P (decl))
33550 {
33551 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33552 xcoffout_declare_function (file, decl, buffer);
33553 else if (write_symbols == DWARF2_DEBUG)
33554 {
33555 name = (*targetm.strip_name_encoding) (name);
33556 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33557 }
33558 }
33559 return;
33560 }
33561
33562
33563 /* Output assembly language to globalize a symbol from a DECL,
33564 possibly with visibility. */
33565
33566 void
33567 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33568 {
33569 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33570 fputs (GLOBAL_ASM_OP, stream);
33571 RS6000_OUTPUT_BASENAME (stream, name);
33572 #ifdef HAVE_GAS_HIDDEN
33573 fputs (rs6000_xcoff_visibility (decl), stream);
33574 #endif
33575 putc ('\n', stream);
33576 }
33577
33578 /* Output assembly language to define a symbol as COMMON from a DECL,
33579 possibly with visibility. */
33580
33581 void
33582 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33583 tree decl ATTRIBUTE_UNUSED,
33584 const char *name,
33585 unsigned HOST_WIDE_INT size,
33586 unsigned HOST_WIDE_INT align)
33587 {
33588 unsigned HOST_WIDE_INT align2 = 2;
33589
33590 if (align > 32)
33591 align2 = floor_log2 (align / BITS_PER_UNIT);
33592 else if (size > 4)
33593 align2 = 3;
33594
33595 fputs (COMMON_ASM_OP, stream);
33596 RS6000_OUTPUT_BASENAME (stream, name);
33597
33598 fprintf (stream,
33599 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
33600 size, align2);
33601
33602 #ifdef HAVE_GAS_HIDDEN
33603 if (decl != NULL)
33604 fputs (rs6000_xcoff_visibility (decl), stream);
33605 #endif
33606 putc ('\n', stream);
33607 }
33608
33609 /* This macro produces the initial definition of a object (variable) name.
33610 Because AIX assembler's .set command has unexpected semantics, we output
33611 all aliases as alternative labels in front of the definition. */
33612
33613 void
33614 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
33615 {
33616 struct declare_alias_data data = {file, false};
33617 RS6000_OUTPUT_BASENAME (file, name);
33618 fputs (":\n", file);
33619 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33620 &data, true);
33621 }
33622
33623 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33624
33625 void
33626 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
33627 {
33628 fputs (integer_asm_op (size, FALSE), file);
33629 assemble_name (file, label);
33630 fputs ("-$", file);
33631 }
33632
33633 /* Output a symbol offset relative to the dbase for the current object.
33634 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33635 signed offsets.
33636
33637 __gcc_unwind_dbase is embedded in all executables/libraries through
33638 libgcc/config/rs6000/crtdbase.S. */
33639
33640 void
33641 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
33642 {
33643 fputs (integer_asm_op (size, FALSE), file);
33644 assemble_name (file, label);
33645 fputs("-__gcc_unwind_dbase", file);
33646 }
33647
33648 #ifdef HAVE_AS_TLS
33649 static void
33650 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
33651 {
33652 rtx symbol;
33653 int flags;
33654 const char *symname;
33655
33656 default_encode_section_info (decl, rtl, first);
33657
33658 /* Careful not to prod global register variables. */
33659 if (!MEM_P (rtl))
33660 return;
33661 symbol = XEXP (rtl, 0);
33662 if (GET_CODE (symbol) != SYMBOL_REF)
33663 return;
33664
33665 flags = SYMBOL_REF_FLAGS (symbol);
33666
33667 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33668 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
33669
33670 SYMBOL_REF_FLAGS (symbol) = flags;
33671
33672 /* Append mapping class to extern decls. */
33673 symname = XSTR (symbol, 0);
33674 if (decl /* sync condition with assemble_external () */
33675 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
33676 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
33677 || TREE_CODE (decl) == FUNCTION_DECL)
33678 && symname[strlen (symname) - 1] != ']')
33679 {
33680 char *newname = (char *) alloca (strlen (symname) + 5);
33681 strcpy (newname, symname);
33682 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
33683 ? "[DS]" : "[UA]"));
33684 XSTR (symbol, 0) = ggc_strdup (newname);
33685 }
33686 }
33687 #endif /* HAVE_AS_TLS */
33688 #endif /* TARGET_XCOFF */
33689
33690 void
33691 rs6000_asm_weaken_decl (FILE *stream, tree decl,
33692 const char *name, const char *val)
33693 {
33694 fputs ("\t.weak\t", stream);
33695 RS6000_OUTPUT_BASENAME (stream, name);
33696 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33697 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33698 {
33699 if (TARGET_XCOFF)
33700 fputs ("[DS]", stream);
33701 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33702 if (TARGET_XCOFF)
33703 fputs (rs6000_xcoff_visibility (decl), stream);
33704 #endif
33705 fputs ("\n\t.weak\t.", stream);
33706 RS6000_OUTPUT_BASENAME (stream, name);
33707 }
33708 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33709 if (TARGET_XCOFF)
33710 fputs (rs6000_xcoff_visibility (decl), stream);
33711 #endif
33712 fputc ('\n', stream);
33713 if (val)
33714 {
33715 #ifdef ASM_OUTPUT_DEF
33716 ASM_OUTPUT_DEF (stream, name, val);
33717 #endif
33718 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33719 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33720 {
33721 fputs ("\t.set\t.", stream);
33722 RS6000_OUTPUT_BASENAME (stream, name);
33723 fputs (",.", stream);
33724 RS6000_OUTPUT_BASENAME (stream, val);
33725 fputc ('\n', stream);
33726 }
33727 }
33728 }
33729
33730
33731 /* Return true if INSN should not be copied. */
33732
33733 static bool
33734 rs6000_cannot_copy_insn_p (rtx_insn *insn)
33735 {
33736 return recog_memoized (insn) >= 0
33737 && get_attr_cannot_copy (insn);
33738 }
33739
33740 /* Compute a (partial) cost for rtx X. Return true if the complete
33741 cost has been computed, and false if subexpressions should be
33742 scanned. In either case, *TOTAL contains the cost result. */
33743
33744 static bool
33745 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
33746 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
33747 {
33748 int code = GET_CODE (x);
33749
33750 switch (code)
33751 {
33752 /* On the RS/6000, if it is valid in the insn, it is free. */
33753 case CONST_INT:
33754 if (((outer_code == SET
33755 || outer_code == PLUS
33756 || outer_code == MINUS)
33757 && (satisfies_constraint_I (x)
33758 || satisfies_constraint_L (x)))
33759 || (outer_code == AND
33760 && (satisfies_constraint_K (x)
33761 || (mode == SImode
33762 ? satisfies_constraint_L (x)
33763 : satisfies_constraint_J (x))))
33764 || ((outer_code == IOR || outer_code == XOR)
33765 && (satisfies_constraint_K (x)
33766 || (mode == SImode
33767 ? satisfies_constraint_L (x)
33768 : satisfies_constraint_J (x))))
33769 || outer_code == ASHIFT
33770 || outer_code == ASHIFTRT
33771 || outer_code == LSHIFTRT
33772 || outer_code == ROTATE
33773 || outer_code == ROTATERT
33774 || outer_code == ZERO_EXTRACT
33775 || (outer_code == MULT
33776 && satisfies_constraint_I (x))
33777 || ((outer_code == DIV || outer_code == UDIV
33778 || outer_code == MOD || outer_code == UMOD)
33779 && exact_log2 (INTVAL (x)) >= 0)
33780 || (outer_code == COMPARE
33781 && (satisfies_constraint_I (x)
33782 || satisfies_constraint_K (x)))
33783 || ((outer_code == EQ || outer_code == NE)
33784 && (satisfies_constraint_I (x)
33785 || satisfies_constraint_K (x)
33786 || (mode == SImode
33787 ? satisfies_constraint_L (x)
33788 : satisfies_constraint_J (x))))
33789 || (outer_code == GTU
33790 && satisfies_constraint_I (x))
33791 || (outer_code == LTU
33792 && satisfies_constraint_P (x)))
33793 {
33794 *total = 0;
33795 return true;
33796 }
33797 else if ((outer_code == PLUS
33798 && reg_or_add_cint_operand (x, VOIDmode))
33799 || (outer_code == MINUS
33800 && reg_or_sub_cint_operand (x, VOIDmode))
33801 || ((outer_code == SET
33802 || outer_code == IOR
33803 || outer_code == XOR)
33804 && (INTVAL (x)
33805 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
33806 {
33807 *total = COSTS_N_INSNS (1);
33808 return true;
33809 }
33810 /* FALLTHRU */
33811
33812 case CONST_DOUBLE:
33813 case CONST_WIDE_INT:
33814 case CONST:
33815 case HIGH:
33816 case SYMBOL_REF:
33817 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
33818 return true;
33819
33820 case MEM:
33821 /* When optimizing for size, MEM should be slightly more expensive
33822 than generating address, e.g., (plus (reg) (const)).
33823 L1 cache latency is about two instructions. */
33824 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
33825 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
33826 *total += COSTS_N_INSNS (100);
33827 return true;
33828
33829 case LABEL_REF:
33830 *total = 0;
33831 return true;
33832
33833 case PLUS:
33834 case MINUS:
33835 if (FLOAT_MODE_P (mode))
33836 *total = rs6000_cost->fp;
33837 else
33838 *total = COSTS_N_INSNS (1);
33839 return false;
33840
33841 case MULT:
33842 if (GET_CODE (XEXP (x, 1)) == CONST_INT
33843 && satisfies_constraint_I (XEXP (x, 1)))
33844 {
33845 if (INTVAL (XEXP (x, 1)) >= -256
33846 && INTVAL (XEXP (x, 1)) <= 255)
33847 *total = rs6000_cost->mulsi_const9;
33848 else
33849 *total = rs6000_cost->mulsi_const;
33850 }
33851 else if (mode == SFmode)
33852 *total = rs6000_cost->fp;
33853 else if (FLOAT_MODE_P (mode))
33854 *total = rs6000_cost->dmul;
33855 else if (mode == DImode)
33856 *total = rs6000_cost->muldi;
33857 else
33858 *total = rs6000_cost->mulsi;
33859 return false;
33860
33861 case FMA:
33862 if (mode == SFmode)
33863 *total = rs6000_cost->fp;
33864 else
33865 *total = rs6000_cost->dmul;
33866 break;
33867
33868 case DIV:
33869 case MOD:
33870 if (FLOAT_MODE_P (mode))
33871 {
33872 *total = mode == DFmode ? rs6000_cost->ddiv
33873 : rs6000_cost->sdiv;
33874 return false;
33875 }
33876 /* FALLTHRU */
33877
33878 case UDIV:
33879 case UMOD:
33880 if (GET_CODE (XEXP (x, 1)) == CONST_INT
33881 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
33882 {
33883 if (code == DIV || code == MOD)
33884 /* Shift, addze */
33885 *total = COSTS_N_INSNS (2);
33886 else
33887 /* Shift */
33888 *total = COSTS_N_INSNS (1);
33889 }
33890 else
33891 {
33892 if (GET_MODE (XEXP (x, 1)) == DImode)
33893 *total = rs6000_cost->divdi;
33894 else
33895 *total = rs6000_cost->divsi;
33896 }
33897 /* Add in shift and subtract for MOD unless we have a mod instruction. */
33898 if (!TARGET_MODULO && (code == MOD || code == UMOD))
33899 *total += COSTS_N_INSNS (2);
33900 return false;
33901
33902 case CTZ:
33903 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
33904 return false;
33905
33906 case FFS:
33907 *total = COSTS_N_INSNS (4);
33908 return false;
33909
33910 case POPCOUNT:
33911 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
33912 return false;
33913
33914 case PARITY:
33915 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
33916 return false;
33917
33918 case NOT:
33919 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
33920 *total = 0;
33921 else
33922 *total = COSTS_N_INSNS (1);
33923 return false;
33924
33925 case AND:
33926 if (CONST_INT_P (XEXP (x, 1)))
33927 {
33928 rtx left = XEXP (x, 0);
33929 rtx_code left_code = GET_CODE (left);
33930
33931 /* rotate-and-mask: 1 insn. */
33932 if ((left_code == ROTATE
33933 || left_code == ASHIFT
33934 || left_code == LSHIFTRT)
33935 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
33936 {
33937 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
33938 if (!CONST_INT_P (XEXP (left, 1)))
33939 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
33940 *total += COSTS_N_INSNS (1);
33941 return true;
33942 }
33943
33944 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
33945 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
33946 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
33947 || (val & 0xffff) == val
33948 || (val & 0xffff0000) == val
33949 || ((val & 0xffff) == 0 && mode == SImode))
33950 {
33951 *total = rtx_cost (left, mode, AND, 0, speed);
33952 *total += COSTS_N_INSNS (1);
33953 return true;
33954 }
33955
33956 /* 2 insns. */
33957 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
33958 {
33959 *total = rtx_cost (left, mode, AND, 0, speed);
33960 *total += COSTS_N_INSNS (2);
33961 return true;
33962 }
33963 }
33964
33965 *total = COSTS_N_INSNS (1);
33966 return false;
33967
33968 case IOR:
33969 /* FIXME */
33970 *total = COSTS_N_INSNS (1);
33971 return true;
33972
33973 case CLZ:
33974 case XOR:
33975 case ZERO_EXTRACT:
33976 *total = COSTS_N_INSNS (1);
33977 return false;
33978
33979 case ASHIFT:
33980 /* The EXTSWSLI instruction is a combined instruction. Don't count both
33981 the sign extend and shift separately within the insn. */
33982 if (TARGET_EXTSWSLI && mode == DImode
33983 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
33984 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
33985 {
33986 *total = 0;
33987 return false;
33988 }
33989 /* fall through */
33990
33991 case ASHIFTRT:
33992 case LSHIFTRT:
33993 case ROTATE:
33994 case ROTATERT:
33995 /* Handle mul_highpart. */
33996 if (outer_code == TRUNCATE
33997 && GET_CODE (XEXP (x, 0)) == MULT)
33998 {
33999 if (mode == DImode)
34000 *total = rs6000_cost->muldi;
34001 else
34002 *total = rs6000_cost->mulsi;
34003 return true;
34004 }
34005 else if (outer_code == AND)
34006 *total = 0;
34007 else
34008 *total = COSTS_N_INSNS (1);
34009 return false;
34010
34011 case SIGN_EXTEND:
34012 case ZERO_EXTEND:
34013 if (GET_CODE (XEXP (x, 0)) == MEM)
34014 *total = 0;
34015 else
34016 *total = COSTS_N_INSNS (1);
34017 return false;
34018
34019 case COMPARE:
34020 case NEG:
34021 case ABS:
34022 if (!FLOAT_MODE_P (mode))
34023 {
34024 *total = COSTS_N_INSNS (1);
34025 return false;
34026 }
34027 /* FALLTHRU */
34028
34029 case FLOAT:
34030 case UNSIGNED_FLOAT:
34031 case FIX:
34032 case UNSIGNED_FIX:
34033 case FLOAT_TRUNCATE:
34034 *total = rs6000_cost->fp;
34035 return false;
34036
34037 case FLOAT_EXTEND:
34038 if (mode == DFmode)
34039 *total = rs6000_cost->sfdf_convert;
34040 else
34041 *total = rs6000_cost->fp;
34042 return false;
34043
34044 case UNSPEC:
34045 switch (XINT (x, 1))
34046 {
34047 case UNSPEC_FRSP:
34048 *total = rs6000_cost->fp;
34049 return true;
34050
34051 default:
34052 break;
34053 }
34054 break;
34055
34056 case CALL:
34057 case IF_THEN_ELSE:
34058 if (!speed)
34059 {
34060 *total = COSTS_N_INSNS (1);
34061 return true;
34062 }
34063 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34064 {
34065 *total = rs6000_cost->fp;
34066 return false;
34067 }
34068 break;
34069
34070 case NE:
34071 case EQ:
34072 case GTU:
34073 case LTU:
34074 /* Carry bit requires mode == Pmode.
34075 NEG or PLUS already counted so only add one. */
34076 if (mode == Pmode
34077 && (outer_code == NEG || outer_code == PLUS))
34078 {
34079 *total = COSTS_N_INSNS (1);
34080 return true;
34081 }
34082 /* FALLTHRU */
34083
34084 case GT:
34085 case LT:
34086 case UNORDERED:
34087 if (outer_code == SET)
34088 {
34089 if (XEXP (x, 1) == const0_rtx)
34090 {
34091 *total = COSTS_N_INSNS (2);
34092 return true;
34093 }
34094 else
34095 {
34096 *total = COSTS_N_INSNS (3);
34097 return false;
34098 }
34099 }
34100 /* CC COMPARE. */
34101 if (outer_code == COMPARE)
34102 {
34103 *total = 0;
34104 return true;
34105 }
34106 break;
34107
34108 default:
34109 break;
34110 }
34111
34112 return false;
34113 }
34114
34115 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34116
34117 static bool
34118 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34119 int opno, int *total, bool speed)
34120 {
34121 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34122
34123 fprintf (stderr,
34124 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34125 "opno = %d, total = %d, speed = %s, x:\n",
34126 ret ? "complete" : "scan inner",
34127 GET_MODE_NAME (mode),
34128 GET_RTX_NAME (outer_code),
34129 opno,
34130 *total,
34131 speed ? "true" : "false");
34132
34133 debug_rtx (x);
34134
34135 return ret;
34136 }
34137
34138 static int
34139 rs6000_insn_cost (rtx_insn *insn, bool speed)
34140 {
34141 if (recog_memoized (insn) < 0)
34142 return 0;
34143
34144 if (!speed)
34145 return get_attr_length (insn);
34146
34147 int cost = get_attr_cost (insn);
34148 if (cost > 0)
34149 return cost;
34150
34151 int n = get_attr_length (insn) / 4;
34152 enum attr_type type = get_attr_type (insn);
34153
34154 switch (type)
34155 {
34156 case TYPE_LOAD:
34157 case TYPE_FPLOAD:
34158 case TYPE_VECLOAD:
34159 cost = COSTS_N_INSNS (n + 1);
34160 break;
34161
34162 case TYPE_MUL:
34163 switch (get_attr_size (insn))
34164 {
34165 case SIZE_8:
34166 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34167 break;
34168 case SIZE_16:
34169 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34170 break;
34171 case SIZE_32:
34172 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34173 break;
34174 case SIZE_64:
34175 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34176 break;
34177 default:
34178 gcc_unreachable ();
34179 }
34180 break;
34181 case TYPE_DIV:
34182 switch (get_attr_size (insn))
34183 {
34184 case SIZE_32:
34185 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34186 break;
34187 case SIZE_64:
34188 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34189 break;
34190 default:
34191 gcc_unreachable ();
34192 }
34193 break;
34194
34195 case TYPE_FP:
34196 cost = n * rs6000_cost->fp;
34197 break;
34198 case TYPE_DMUL:
34199 cost = n * rs6000_cost->dmul;
34200 break;
34201 case TYPE_SDIV:
34202 cost = n * rs6000_cost->sdiv;
34203 break;
34204 case TYPE_DDIV:
34205 cost = n * rs6000_cost->ddiv;
34206 break;
34207
34208 case TYPE_SYNC:
34209 case TYPE_LOAD_L:
34210 case TYPE_MFCR:
34211 case TYPE_MFCRF:
34212 cost = COSTS_N_INSNS (n + 2);
34213 break;
34214
34215 default:
34216 cost = COSTS_N_INSNS (n);
34217 }
34218
34219 return cost;
34220 }
34221
34222 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34223
34224 static int
34225 rs6000_debug_address_cost (rtx x, machine_mode mode,
34226 addr_space_t as, bool speed)
34227 {
34228 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34229
34230 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34231 ret, speed ? "true" : "false");
34232 debug_rtx (x);
34233
34234 return ret;
34235 }
34236
34237
34238 /* A C expression returning the cost of moving data from a register of class
34239 CLASS1 to one of CLASS2. */
34240
34241 static int
34242 rs6000_register_move_cost (machine_mode mode,
34243 reg_class_t from, reg_class_t to)
34244 {
34245 int ret;
34246
34247 if (TARGET_DEBUG_COST)
34248 dbg_cost_ctrl++;
34249
34250 /* Moves from/to GENERAL_REGS. */
34251 if (reg_classes_intersect_p (to, GENERAL_REGS)
34252 || reg_classes_intersect_p (from, GENERAL_REGS))
34253 {
34254 reg_class_t rclass = from;
34255
34256 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34257 rclass = to;
34258
34259 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34260 ret = (rs6000_memory_move_cost (mode, rclass, false)
34261 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34262
34263 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34264 shift. */
34265 else if (rclass == CR_REGS)
34266 ret = 4;
34267
34268 /* For those processors that have slow LR/CTR moves, make them more
34269 expensive than memory in order to bias spills to memory .*/
34270 else if ((rs6000_tune == PROCESSOR_POWER6
34271 || rs6000_tune == PROCESSOR_POWER7
34272 || rs6000_tune == PROCESSOR_POWER8
34273 || rs6000_tune == PROCESSOR_POWER9)
34274 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34275 ret = 6 * hard_regno_nregs (0, mode);
34276
34277 else
34278 /* A move will cost one instruction per GPR moved. */
34279 ret = 2 * hard_regno_nregs (0, mode);
34280 }
34281
34282 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34283 else if (VECTOR_MEM_VSX_P (mode)
34284 && reg_classes_intersect_p (to, VSX_REGS)
34285 && reg_classes_intersect_p (from, VSX_REGS))
34286 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34287
34288 /* Moving between two similar registers is just one instruction. */
34289 else if (reg_classes_intersect_p (to, from))
34290 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34291
34292 /* Everything else has to go through GENERAL_REGS. */
34293 else
34294 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34295 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34296
34297 if (TARGET_DEBUG_COST)
34298 {
34299 if (dbg_cost_ctrl == 1)
34300 fprintf (stderr,
34301 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34302 ret, GET_MODE_NAME (mode), reg_class_names[from],
34303 reg_class_names[to]);
34304 dbg_cost_ctrl--;
34305 }
34306
34307 return ret;
34308 }
34309
34310 /* A C expressions returning the cost of moving data of MODE from a register to
34311 or from memory. */
34312
34313 static int
34314 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34315 bool in ATTRIBUTE_UNUSED)
34316 {
34317 int ret;
34318
34319 if (TARGET_DEBUG_COST)
34320 dbg_cost_ctrl++;
34321
34322 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34323 ret = 4 * hard_regno_nregs (0, mode);
34324 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34325 || reg_classes_intersect_p (rclass, VSX_REGS)))
34326 ret = 4 * hard_regno_nregs (32, mode);
34327 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34328 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34329 else
34330 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34331
34332 if (TARGET_DEBUG_COST)
34333 {
34334 if (dbg_cost_ctrl == 1)
34335 fprintf (stderr,
34336 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34337 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34338 dbg_cost_ctrl--;
34339 }
34340
34341 return ret;
34342 }
34343
34344 /* Returns a code for a target-specific builtin that implements
34345 reciprocal of the function, or NULL_TREE if not available. */
34346
34347 static tree
34348 rs6000_builtin_reciprocal (tree fndecl)
34349 {
34350 switch (DECL_FUNCTION_CODE (fndecl))
34351 {
34352 case VSX_BUILTIN_XVSQRTDP:
34353 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34354 return NULL_TREE;
34355
34356 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34357
34358 case VSX_BUILTIN_XVSQRTSP:
34359 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34360 return NULL_TREE;
34361
34362 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34363
34364 default:
34365 return NULL_TREE;
34366 }
34367 }
34368
34369 /* Load up a constant. If the mode is a vector mode, splat the value across
34370 all of the vector elements. */
34371
34372 static rtx
34373 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34374 {
34375 rtx reg;
34376
34377 if (mode == SFmode || mode == DFmode)
34378 {
34379 rtx d = const_double_from_real_value (dconst, mode);
34380 reg = force_reg (mode, d);
34381 }
34382 else if (mode == V4SFmode)
34383 {
34384 rtx d = const_double_from_real_value (dconst, SFmode);
34385 rtvec v = gen_rtvec (4, d, d, d, d);
34386 reg = gen_reg_rtx (mode);
34387 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34388 }
34389 else if (mode == V2DFmode)
34390 {
34391 rtx d = const_double_from_real_value (dconst, DFmode);
34392 rtvec v = gen_rtvec (2, d, d);
34393 reg = gen_reg_rtx (mode);
34394 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34395 }
34396 else
34397 gcc_unreachable ();
34398
34399 return reg;
34400 }
34401
34402 /* Generate an FMA instruction. */
34403
34404 static void
34405 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34406 {
34407 machine_mode mode = GET_MODE (target);
34408 rtx dst;
34409
34410 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34411 gcc_assert (dst != NULL);
34412
34413 if (dst != target)
34414 emit_move_insn (target, dst);
34415 }
34416
34417 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34418
34419 static void
34420 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34421 {
34422 machine_mode mode = GET_MODE (dst);
34423 rtx r;
34424
34425 /* This is a tad more complicated, since the fnma_optab is for
34426 a different expression: fma(-m1, m2, a), which is the same
34427 thing except in the case of signed zeros.
34428
34429 Fortunately we know that if FMA is supported that FNMSUB is
34430 also supported in the ISA. Just expand it directly. */
34431
34432 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34433
34434 r = gen_rtx_NEG (mode, a);
34435 r = gen_rtx_FMA (mode, m1, m2, r);
34436 r = gen_rtx_NEG (mode, r);
34437 emit_insn (gen_rtx_SET (dst, r));
34438 }
34439
34440 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34441 add a reg_note saying that this was a division. Support both scalar and
34442 vector divide. Assumes no trapping math and finite arguments. */
34443
34444 void
34445 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34446 {
34447 machine_mode mode = GET_MODE (dst);
34448 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34449 int i;
34450
34451 /* Low precision estimates guarantee 5 bits of accuracy. High
34452 precision estimates guarantee 14 bits of accuracy. SFmode
34453 requires 23 bits of accuracy. DFmode requires 52 bits of
34454 accuracy. Each pass at least doubles the accuracy, leading
34455 to the following. */
34456 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34457 if (mode == DFmode || mode == V2DFmode)
34458 passes++;
34459
34460 enum insn_code code = optab_handler (smul_optab, mode);
34461 insn_gen_fn gen_mul = GEN_FCN (code);
34462
34463 gcc_assert (code != CODE_FOR_nothing);
34464
34465 one = rs6000_load_constant_and_splat (mode, dconst1);
34466
34467 /* x0 = 1./d estimate */
34468 x0 = gen_reg_rtx (mode);
34469 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34470 UNSPEC_FRES)));
34471
34472 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34473 if (passes > 1) {
34474
34475 /* e0 = 1. - d * x0 */
34476 e0 = gen_reg_rtx (mode);
34477 rs6000_emit_nmsub (e0, d, x0, one);
34478
34479 /* x1 = x0 + e0 * x0 */
34480 x1 = gen_reg_rtx (mode);
34481 rs6000_emit_madd (x1, e0, x0, x0);
34482
34483 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34484 ++i, xprev = xnext, eprev = enext) {
34485
34486 /* enext = eprev * eprev */
34487 enext = gen_reg_rtx (mode);
34488 emit_insn (gen_mul (enext, eprev, eprev));
34489
34490 /* xnext = xprev + enext * xprev */
34491 xnext = gen_reg_rtx (mode);
34492 rs6000_emit_madd (xnext, enext, xprev, xprev);
34493 }
34494
34495 } else
34496 xprev = x0;
34497
34498 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34499
34500 /* u = n * xprev */
34501 u = gen_reg_rtx (mode);
34502 emit_insn (gen_mul (u, n, xprev));
34503
34504 /* v = n - (d * u) */
34505 v = gen_reg_rtx (mode);
34506 rs6000_emit_nmsub (v, d, u, n);
34507
34508 /* dst = (v * xprev) + u */
34509 rs6000_emit_madd (dst, v, xprev, u);
34510
34511 if (note_p)
34512 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34513 }
34514
34515 /* Goldschmidt's Algorithm for single/double-precision floating point
34516 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34517
34518 void
34519 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34520 {
34521 machine_mode mode = GET_MODE (src);
34522 rtx e = gen_reg_rtx (mode);
34523 rtx g = gen_reg_rtx (mode);
34524 rtx h = gen_reg_rtx (mode);
34525
34526 /* Low precision estimates guarantee 5 bits of accuracy. High
34527 precision estimates guarantee 14 bits of accuracy. SFmode
34528 requires 23 bits of accuracy. DFmode requires 52 bits of
34529 accuracy. Each pass at least doubles the accuracy, leading
34530 to the following. */
34531 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34532 if (mode == DFmode || mode == V2DFmode)
34533 passes++;
34534
34535 int i;
34536 rtx mhalf;
34537 enum insn_code code = optab_handler (smul_optab, mode);
34538 insn_gen_fn gen_mul = GEN_FCN (code);
34539
34540 gcc_assert (code != CODE_FOR_nothing);
34541
34542 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34543
34544 /* e = rsqrt estimate */
34545 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34546 UNSPEC_RSQRT)));
34547
34548 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34549 if (!recip)
34550 {
34551 rtx zero = force_reg (mode, CONST0_RTX (mode));
34552
34553 if (mode == SFmode)
34554 {
34555 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34556 e, zero, mode, 0);
34557 if (target != e)
34558 emit_move_insn (e, target);
34559 }
34560 else
34561 {
34562 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34563 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34564 }
34565 }
34566
34567 /* g = sqrt estimate. */
34568 emit_insn (gen_mul (g, e, src));
34569 /* h = 1/(2*sqrt) estimate. */
34570 emit_insn (gen_mul (h, e, mhalf));
34571
34572 if (recip)
34573 {
34574 if (passes == 1)
34575 {
34576 rtx t = gen_reg_rtx (mode);
34577 rs6000_emit_nmsub (t, g, h, mhalf);
34578 /* Apply correction directly to 1/rsqrt estimate. */
34579 rs6000_emit_madd (dst, e, t, e);
34580 }
34581 else
34582 {
34583 for (i = 0; i < passes; i++)
34584 {
34585 rtx t1 = gen_reg_rtx (mode);
34586 rtx g1 = gen_reg_rtx (mode);
34587 rtx h1 = gen_reg_rtx (mode);
34588
34589 rs6000_emit_nmsub (t1, g, h, mhalf);
34590 rs6000_emit_madd (g1, g, t1, g);
34591 rs6000_emit_madd (h1, h, t1, h);
34592
34593 g = g1;
34594 h = h1;
34595 }
34596 /* Multiply by 2 for 1/rsqrt. */
34597 emit_insn (gen_add3_insn (dst, h, h));
34598 }
34599 }
34600 else
34601 {
34602 rtx t = gen_reg_rtx (mode);
34603 rs6000_emit_nmsub (t, g, h, mhalf);
34604 rs6000_emit_madd (dst, g, t, g);
34605 }
34606
34607 return;
34608 }
34609
34610 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34611 (Power7) targets. DST is the target, and SRC is the argument operand. */
34612
34613 void
34614 rs6000_emit_popcount (rtx dst, rtx src)
34615 {
34616 machine_mode mode = GET_MODE (dst);
34617 rtx tmp1, tmp2;
34618
34619 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34620 if (TARGET_POPCNTD)
34621 {
34622 if (mode == SImode)
34623 emit_insn (gen_popcntdsi2 (dst, src));
34624 else
34625 emit_insn (gen_popcntddi2 (dst, src));
34626 return;
34627 }
34628
34629 tmp1 = gen_reg_rtx (mode);
34630
34631 if (mode == SImode)
34632 {
34633 emit_insn (gen_popcntbsi2 (tmp1, src));
34634 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34635 NULL_RTX, 0);
34636 tmp2 = force_reg (SImode, tmp2);
34637 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34638 }
34639 else
34640 {
34641 emit_insn (gen_popcntbdi2 (tmp1, src));
34642 tmp2 = expand_mult (DImode, tmp1,
34643 GEN_INT ((HOST_WIDE_INT)
34644 0x01010101 << 32 | 0x01010101),
34645 NULL_RTX, 0);
34646 tmp2 = force_reg (DImode, tmp2);
34647 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
34648 }
34649 }
34650
34651
34652 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34653 target, and SRC is the argument operand. */
34654
34655 void
34656 rs6000_emit_parity (rtx dst, rtx src)
34657 {
34658 machine_mode mode = GET_MODE (dst);
34659 rtx tmp;
34660
34661 tmp = gen_reg_rtx (mode);
34662
34663 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34664 if (TARGET_CMPB)
34665 {
34666 if (mode == SImode)
34667 {
34668 emit_insn (gen_popcntbsi2 (tmp, src));
34669 emit_insn (gen_paritysi2_cmpb (dst, tmp));
34670 }
34671 else
34672 {
34673 emit_insn (gen_popcntbdi2 (tmp, src));
34674 emit_insn (gen_paritydi2_cmpb (dst, tmp));
34675 }
34676 return;
34677 }
34678
34679 if (mode == SImode)
34680 {
34681 /* Is mult+shift >= shift+xor+shift+xor? */
34682 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
34683 {
34684 rtx tmp1, tmp2, tmp3, tmp4;
34685
34686 tmp1 = gen_reg_rtx (SImode);
34687 emit_insn (gen_popcntbsi2 (tmp1, src));
34688
34689 tmp2 = gen_reg_rtx (SImode);
34690 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
34691 tmp3 = gen_reg_rtx (SImode);
34692 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
34693
34694 tmp4 = gen_reg_rtx (SImode);
34695 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
34696 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
34697 }
34698 else
34699 rs6000_emit_popcount (tmp, src);
34700 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
34701 }
34702 else
34703 {
34704 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
34705 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
34706 {
34707 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
34708
34709 tmp1 = gen_reg_rtx (DImode);
34710 emit_insn (gen_popcntbdi2 (tmp1, src));
34711
34712 tmp2 = gen_reg_rtx (DImode);
34713 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
34714 tmp3 = gen_reg_rtx (DImode);
34715 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
34716
34717 tmp4 = gen_reg_rtx (DImode);
34718 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
34719 tmp5 = gen_reg_rtx (DImode);
34720 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
34721
34722 tmp6 = gen_reg_rtx (DImode);
34723 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
34724 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
34725 }
34726 else
34727 rs6000_emit_popcount (tmp, src);
34728 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
34729 }
34730 }
34731
34732 /* Expand an Altivec constant permutation for little endian mode.
34733 OP0 and OP1 are the input vectors and TARGET is the output vector.
34734 SEL specifies the constant permutation vector.
34735
34736 There are two issues: First, the two input operands must be
34737 swapped so that together they form a double-wide array in LE
34738 order. Second, the vperm instruction has surprising behavior
34739 in LE mode: it interprets the elements of the source vectors
34740 in BE mode ("left to right") and interprets the elements of
34741 the destination vector in LE mode ("right to left"). To
34742 correct for this, we must subtract each element of the permute
34743 control vector from 31.
34744
34745 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
34746 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
34747 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
34748 serve as the permute control vector. Then, in BE mode,
34749
34750 vperm 9,10,11,12
34751
34752 places the desired result in vr9. However, in LE mode the
34753 vector contents will be
34754
34755 vr10 = 00000003 00000002 00000001 00000000
34756 vr11 = 00000007 00000006 00000005 00000004
34757
34758 The result of the vperm using the same permute control vector is
34759
34760 vr9 = 05000000 07000000 01000000 03000000
34761
34762 That is, the leftmost 4 bytes of vr10 are interpreted as the
34763 source for the rightmost 4 bytes of vr9, and so on.
34764
34765 If we change the permute control vector to
34766
34767 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
34768
34769 and issue
34770
34771 vperm 9,11,10,12
34772
34773 we get the desired
34774
34775 vr9 = 00000006 00000004 00000002 00000000. */
34776
34777 static void
34778 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
34779 const vec_perm_indices &sel)
34780 {
34781 unsigned int i;
34782 rtx perm[16];
34783 rtx constv, unspec;
34784
34785 /* Unpack and adjust the constant selector. */
34786 for (i = 0; i < 16; ++i)
34787 {
34788 unsigned int elt = 31 - (sel[i] & 31);
34789 perm[i] = GEN_INT (elt);
34790 }
34791
34792 /* Expand to a permute, swapping the inputs and using the
34793 adjusted selector. */
34794 if (!REG_P (op0))
34795 op0 = force_reg (V16QImode, op0);
34796 if (!REG_P (op1))
34797 op1 = force_reg (V16QImode, op1);
34798
34799 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
34800 constv = force_reg (V16QImode, constv);
34801 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
34802 UNSPEC_VPERM);
34803 if (!REG_P (target))
34804 {
34805 rtx tmp = gen_reg_rtx (V16QImode);
34806 emit_move_insn (tmp, unspec);
34807 unspec = tmp;
34808 }
34809
34810 emit_move_insn (target, unspec);
34811 }
34812
34813 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
34814 permute control vector. But here it's not a constant, so we must
34815 generate a vector NAND or NOR to do the adjustment. */
34816
34817 void
34818 altivec_expand_vec_perm_le (rtx operands[4])
34819 {
34820 rtx notx, iorx, unspec;
34821 rtx target = operands[0];
34822 rtx op0 = operands[1];
34823 rtx op1 = operands[2];
34824 rtx sel = operands[3];
34825 rtx tmp = target;
34826 rtx norreg = gen_reg_rtx (V16QImode);
34827 machine_mode mode = GET_MODE (target);
34828
34829 /* Get everything in regs so the pattern matches. */
34830 if (!REG_P (op0))
34831 op0 = force_reg (mode, op0);
34832 if (!REG_P (op1))
34833 op1 = force_reg (mode, op1);
34834 if (!REG_P (sel))
34835 sel = force_reg (V16QImode, sel);
34836 if (!REG_P (target))
34837 tmp = gen_reg_rtx (mode);
34838
34839 if (TARGET_P9_VECTOR)
34840 {
34841 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
34842 UNSPEC_VPERMR);
34843 }
34844 else
34845 {
34846 /* Invert the selector with a VNAND if available, else a VNOR.
34847 The VNAND is preferred for future fusion opportunities. */
34848 notx = gen_rtx_NOT (V16QImode, sel);
34849 iorx = (TARGET_P8_VECTOR
34850 ? gen_rtx_IOR (V16QImode, notx, notx)
34851 : gen_rtx_AND (V16QImode, notx, notx));
34852 emit_insn (gen_rtx_SET (norreg, iorx));
34853
34854 /* Permute with operands reversed and adjusted selector. */
34855 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
34856 UNSPEC_VPERM);
34857 }
34858
34859 /* Copy into target, possibly by way of a register. */
34860 if (!REG_P (target))
34861 {
34862 emit_move_insn (tmp, unspec);
34863 unspec = tmp;
34864 }
34865
34866 emit_move_insn (target, unspec);
34867 }
34868
34869 /* Expand an Altivec constant permutation. Return true if we match
34870 an efficient implementation; false to fall back to VPERM.
34871
34872 OP0 and OP1 are the input vectors and TARGET is the output vector.
34873 SEL specifies the constant permutation vector. */
34874
34875 static bool
34876 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
34877 const vec_perm_indices &sel)
34878 {
34879 struct altivec_perm_insn {
34880 HOST_WIDE_INT mask;
34881 enum insn_code impl;
34882 unsigned char perm[16];
34883 };
34884 static const struct altivec_perm_insn patterns[] = {
34885 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
34886 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
34887 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
34888 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
34889 { OPTION_MASK_ALTIVEC,
34890 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
34891 : CODE_FOR_altivec_vmrglb_direct),
34892 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
34893 { OPTION_MASK_ALTIVEC,
34894 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
34895 : CODE_FOR_altivec_vmrglh_direct),
34896 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
34897 { OPTION_MASK_ALTIVEC,
34898 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
34899 : CODE_FOR_altivec_vmrglw_direct),
34900 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
34901 { OPTION_MASK_ALTIVEC,
34902 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
34903 : CODE_FOR_altivec_vmrghb_direct),
34904 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
34905 { OPTION_MASK_ALTIVEC,
34906 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
34907 : CODE_FOR_altivec_vmrghh_direct),
34908 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
34909 { OPTION_MASK_ALTIVEC,
34910 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
34911 : CODE_FOR_altivec_vmrghw_direct),
34912 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
34913 { OPTION_MASK_P8_VECTOR,
34914 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
34915 : CODE_FOR_p8_vmrgow_v4sf_direct),
34916 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
34917 { OPTION_MASK_P8_VECTOR,
34918 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
34919 : CODE_FOR_p8_vmrgew_v4sf_direct),
34920 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
34921 };
34922
34923 unsigned int i, j, elt, which;
34924 unsigned char perm[16];
34925 rtx x;
34926 bool one_vec;
34927
34928 /* Unpack the constant selector. */
34929 for (i = which = 0; i < 16; ++i)
34930 {
34931 elt = sel[i] & 31;
34932 which |= (elt < 16 ? 1 : 2);
34933 perm[i] = elt;
34934 }
34935
34936 /* Simplify the constant selector based on operands. */
34937 switch (which)
34938 {
34939 default:
34940 gcc_unreachable ();
34941
34942 case 3:
34943 one_vec = false;
34944 if (!rtx_equal_p (op0, op1))
34945 break;
34946 /* FALLTHRU */
34947
34948 case 2:
34949 for (i = 0; i < 16; ++i)
34950 perm[i] &= 15;
34951 op0 = op1;
34952 one_vec = true;
34953 break;
34954
34955 case 1:
34956 op1 = op0;
34957 one_vec = true;
34958 break;
34959 }
34960
34961 /* Look for splat patterns. */
34962 if (one_vec)
34963 {
34964 elt = perm[0];
34965
34966 for (i = 0; i < 16; ++i)
34967 if (perm[i] != elt)
34968 break;
34969 if (i == 16)
34970 {
34971 if (!BYTES_BIG_ENDIAN)
34972 elt = 15 - elt;
34973 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
34974 return true;
34975 }
34976
34977 if (elt % 2 == 0)
34978 {
34979 for (i = 0; i < 16; i += 2)
34980 if (perm[i] != elt || perm[i + 1] != elt + 1)
34981 break;
34982 if (i == 16)
34983 {
34984 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
34985 x = gen_reg_rtx (V8HImode);
34986 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
34987 GEN_INT (field)));
34988 emit_move_insn (target, gen_lowpart (V16QImode, x));
34989 return true;
34990 }
34991 }
34992
34993 if (elt % 4 == 0)
34994 {
34995 for (i = 0; i < 16; i += 4)
34996 if (perm[i] != elt
34997 || perm[i + 1] != elt + 1
34998 || perm[i + 2] != elt + 2
34999 || perm[i + 3] != elt + 3)
35000 break;
35001 if (i == 16)
35002 {
35003 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35004 x = gen_reg_rtx (V4SImode);
35005 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35006 GEN_INT (field)));
35007 emit_move_insn (target, gen_lowpart (V16QImode, x));
35008 return true;
35009 }
35010 }
35011 }
35012
35013 /* Look for merge and pack patterns. */
35014 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35015 {
35016 bool swapped;
35017
35018 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35019 continue;
35020
35021 elt = patterns[j].perm[0];
35022 if (perm[0] == elt)
35023 swapped = false;
35024 else if (perm[0] == elt + 16)
35025 swapped = true;
35026 else
35027 continue;
35028 for (i = 1; i < 16; ++i)
35029 {
35030 elt = patterns[j].perm[i];
35031 if (swapped)
35032 elt = (elt >= 16 ? elt - 16 : elt + 16);
35033 else if (one_vec && elt >= 16)
35034 elt -= 16;
35035 if (perm[i] != elt)
35036 break;
35037 }
35038 if (i == 16)
35039 {
35040 enum insn_code icode = patterns[j].impl;
35041 machine_mode omode = insn_data[icode].operand[0].mode;
35042 machine_mode imode = insn_data[icode].operand[1].mode;
35043
35044 /* For little-endian, don't use vpkuwum and vpkuhum if the
35045 underlying vector type is not V4SI and V8HI, respectively.
35046 For example, using vpkuwum with a V8HI picks up the even
35047 halfwords (BE numbering) when the even halfwords (LE
35048 numbering) are what we need. */
35049 if (!BYTES_BIG_ENDIAN
35050 && icode == CODE_FOR_altivec_vpkuwum_direct
35051 && ((GET_CODE (op0) == REG
35052 && GET_MODE (op0) != V4SImode)
35053 || (GET_CODE (op0) == SUBREG
35054 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35055 continue;
35056 if (!BYTES_BIG_ENDIAN
35057 && icode == CODE_FOR_altivec_vpkuhum_direct
35058 && ((GET_CODE (op0) == REG
35059 && GET_MODE (op0) != V8HImode)
35060 || (GET_CODE (op0) == SUBREG
35061 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35062 continue;
35063
35064 /* For little-endian, the two input operands must be swapped
35065 (or swapped back) to ensure proper right-to-left numbering
35066 from 0 to 2N-1. */
35067 if (swapped ^ !BYTES_BIG_ENDIAN)
35068 std::swap (op0, op1);
35069 if (imode != V16QImode)
35070 {
35071 op0 = gen_lowpart (imode, op0);
35072 op1 = gen_lowpart (imode, op1);
35073 }
35074 if (omode == V16QImode)
35075 x = target;
35076 else
35077 x = gen_reg_rtx (omode);
35078 emit_insn (GEN_FCN (icode) (x, op0, op1));
35079 if (omode != V16QImode)
35080 emit_move_insn (target, gen_lowpart (V16QImode, x));
35081 return true;
35082 }
35083 }
35084
35085 if (!BYTES_BIG_ENDIAN)
35086 {
35087 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35088 return true;
35089 }
35090
35091 return false;
35092 }
35093
35094 /* Expand a VSX Permute Doubleword constant permutation.
35095 Return true if we match an efficient implementation. */
35096
35097 static bool
35098 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35099 unsigned char perm0, unsigned char perm1)
35100 {
35101 rtx x;
35102
35103 /* If both selectors come from the same operand, fold to single op. */
35104 if ((perm0 & 2) == (perm1 & 2))
35105 {
35106 if (perm0 & 2)
35107 op0 = op1;
35108 else
35109 op1 = op0;
35110 }
35111 /* If both operands are equal, fold to simpler permutation. */
35112 if (rtx_equal_p (op0, op1))
35113 {
35114 perm0 = perm0 & 1;
35115 perm1 = (perm1 & 1) + 2;
35116 }
35117 /* If the first selector comes from the second operand, swap. */
35118 else if (perm0 & 2)
35119 {
35120 if (perm1 & 2)
35121 return false;
35122 perm0 -= 2;
35123 perm1 += 2;
35124 std::swap (op0, op1);
35125 }
35126 /* If the second selector does not come from the second operand, fail. */
35127 else if ((perm1 & 2) == 0)
35128 return false;
35129
35130 /* Success! */
35131 if (target != NULL)
35132 {
35133 machine_mode vmode, dmode;
35134 rtvec v;
35135
35136 vmode = GET_MODE (target);
35137 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35138 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35139 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35140 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35141 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35142 emit_insn (gen_rtx_SET (target, x));
35143 }
35144 return true;
35145 }
35146
35147 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35148
35149 static bool
35150 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35151 rtx op1, const vec_perm_indices &sel)
35152 {
35153 bool testing_p = !target;
35154
35155 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35156 if (TARGET_ALTIVEC && testing_p)
35157 return true;
35158
35159 /* Check for ps_merge* or xxpermdi insns. */
35160 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35161 {
35162 if (testing_p)
35163 {
35164 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35165 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35166 }
35167 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35168 return true;
35169 }
35170
35171 if (TARGET_ALTIVEC)
35172 {
35173 /* Force the target-independent code to lower to V16QImode. */
35174 if (vmode != V16QImode)
35175 return false;
35176 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35177 return true;
35178 }
35179
35180 return false;
35181 }
35182
35183 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35184 OP0 and OP1 are the input vectors and TARGET is the output vector.
35185 PERM specifies the constant permutation vector. */
35186
35187 static void
35188 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35189 machine_mode vmode, const vec_perm_builder &perm)
35190 {
35191 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35192 if (x != target)
35193 emit_move_insn (target, x);
35194 }
35195
35196 /* Expand an extract even operation. */
35197
35198 void
35199 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35200 {
35201 machine_mode vmode = GET_MODE (target);
35202 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35203 vec_perm_builder perm (nelt, nelt, 1);
35204
35205 for (i = 0; i < nelt; i++)
35206 perm.quick_push (i * 2);
35207
35208 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35209 }
35210
35211 /* Expand a vector interleave operation. */
35212
35213 void
35214 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35215 {
35216 machine_mode vmode = GET_MODE (target);
35217 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35218 vec_perm_builder perm (nelt, nelt, 1);
35219
35220 high = (highp ? 0 : nelt / 2);
35221 for (i = 0; i < nelt / 2; i++)
35222 {
35223 perm.quick_push (i + high);
35224 perm.quick_push (i + nelt + high);
35225 }
35226
35227 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35228 }
35229
35230 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35231 void
35232 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35233 {
35234 HOST_WIDE_INT hwi_scale (scale);
35235 REAL_VALUE_TYPE r_pow;
35236 rtvec v = rtvec_alloc (2);
35237 rtx elt;
35238 rtx scale_vec = gen_reg_rtx (V2DFmode);
35239 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35240 elt = const_double_from_real_value (r_pow, DFmode);
35241 RTVEC_ELT (v, 0) = elt;
35242 RTVEC_ELT (v, 1) = elt;
35243 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35244 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35245 }
35246
35247 /* Return an RTX representing where to find the function value of a
35248 function returning MODE. */
35249 static rtx
35250 rs6000_complex_function_value (machine_mode mode)
35251 {
35252 unsigned int regno;
35253 rtx r1, r2;
35254 machine_mode inner = GET_MODE_INNER (mode);
35255 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35256
35257 if (TARGET_FLOAT128_TYPE
35258 && (mode == KCmode
35259 || (mode == TCmode && TARGET_IEEEQUAD)))
35260 regno = ALTIVEC_ARG_RETURN;
35261
35262 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35263 regno = FP_ARG_RETURN;
35264
35265 else
35266 {
35267 regno = GP_ARG_RETURN;
35268
35269 /* 32-bit is OK since it'll go in r3/r4. */
35270 if (TARGET_32BIT && inner_bytes >= 4)
35271 return gen_rtx_REG (mode, regno);
35272 }
35273
35274 if (inner_bytes >= 8)
35275 return gen_rtx_REG (mode, regno);
35276
35277 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35278 const0_rtx);
35279 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35280 GEN_INT (inner_bytes));
35281 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35282 }
35283
35284 /* Return an rtx describing a return value of MODE as a PARALLEL
35285 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35286 stride REG_STRIDE. */
35287
35288 static rtx
35289 rs6000_parallel_return (machine_mode mode,
35290 int n_elts, machine_mode elt_mode,
35291 unsigned int regno, unsigned int reg_stride)
35292 {
35293 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35294
35295 int i;
35296 for (i = 0; i < n_elts; i++)
35297 {
35298 rtx r = gen_rtx_REG (elt_mode, regno);
35299 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35300 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35301 regno += reg_stride;
35302 }
35303
35304 return par;
35305 }
35306
35307 /* Target hook for TARGET_FUNCTION_VALUE.
35308
35309 An integer value is in r3 and a floating-point value is in fp1,
35310 unless -msoft-float. */
35311
35312 static rtx
35313 rs6000_function_value (const_tree valtype,
35314 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35315 bool outgoing ATTRIBUTE_UNUSED)
35316 {
35317 machine_mode mode;
35318 unsigned int regno;
35319 machine_mode elt_mode;
35320 int n_elts;
35321
35322 /* Special handling for structs in darwin64. */
35323 if (TARGET_MACHO
35324 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35325 {
35326 CUMULATIVE_ARGS valcum;
35327 rtx valret;
35328
35329 valcum.words = 0;
35330 valcum.fregno = FP_ARG_MIN_REG;
35331 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35332 /* Do a trial code generation as if this were going to be passed as
35333 an argument; if any part goes in memory, we return NULL. */
35334 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35335 if (valret)
35336 return valret;
35337 /* Otherwise fall through to standard ABI rules. */
35338 }
35339
35340 mode = TYPE_MODE (valtype);
35341
35342 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35343 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35344 {
35345 int first_reg, n_regs;
35346
35347 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35348 {
35349 /* _Decimal128 must use even/odd register pairs. */
35350 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35351 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35352 }
35353 else
35354 {
35355 first_reg = ALTIVEC_ARG_RETURN;
35356 n_regs = 1;
35357 }
35358
35359 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35360 }
35361
35362 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35363 if (TARGET_32BIT && TARGET_POWERPC64)
35364 switch (mode)
35365 {
35366 default:
35367 break;
35368 case E_DImode:
35369 case E_SCmode:
35370 case E_DCmode:
35371 case E_TCmode:
35372 int count = GET_MODE_SIZE (mode) / 4;
35373 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35374 }
35375
35376 if ((INTEGRAL_TYPE_P (valtype)
35377 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35378 || POINTER_TYPE_P (valtype))
35379 mode = TARGET_32BIT ? SImode : DImode;
35380
35381 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35382 /* _Decimal128 must use an even/odd register pair. */
35383 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35384 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35385 && !FLOAT128_VECTOR_P (mode))
35386 regno = FP_ARG_RETURN;
35387 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35388 && targetm.calls.split_complex_arg)
35389 return rs6000_complex_function_value (mode);
35390 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35391 return register is used in both cases, and we won't see V2DImode/V2DFmode
35392 for pure altivec, combine the two cases. */
35393 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35394 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35395 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35396 regno = ALTIVEC_ARG_RETURN;
35397 else
35398 regno = GP_ARG_RETURN;
35399
35400 return gen_rtx_REG (mode, regno);
35401 }
35402
35403 /* Define how to find the value returned by a library function
35404 assuming the value has mode MODE. */
35405 rtx
35406 rs6000_libcall_value (machine_mode mode)
35407 {
35408 unsigned int regno;
35409
35410 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35411 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35412 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35413
35414 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35415 /* _Decimal128 must use an even/odd register pair. */
35416 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35417 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35418 regno = FP_ARG_RETURN;
35419 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35420 return register is used in both cases, and we won't see V2DImode/V2DFmode
35421 for pure altivec, combine the two cases. */
35422 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35423 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35424 regno = ALTIVEC_ARG_RETURN;
35425 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35426 return rs6000_complex_function_value (mode);
35427 else
35428 regno = GP_ARG_RETURN;
35429
35430 return gen_rtx_REG (mode, regno);
35431 }
35432
35433 /* Compute register pressure classes. We implement the target hook to avoid
35434 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35435 lead to incorrect estimates of number of available registers and therefor
35436 increased register pressure/spill. */
35437 static int
35438 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35439 {
35440 int n;
35441
35442 n = 0;
35443 pressure_classes[n++] = GENERAL_REGS;
35444 if (TARGET_VSX)
35445 pressure_classes[n++] = VSX_REGS;
35446 else
35447 {
35448 if (TARGET_ALTIVEC)
35449 pressure_classes[n++] = ALTIVEC_REGS;
35450 if (TARGET_HARD_FLOAT)
35451 pressure_classes[n++] = FLOAT_REGS;
35452 }
35453 pressure_classes[n++] = CR_REGS;
35454 pressure_classes[n++] = SPECIAL_REGS;
35455
35456 return n;
35457 }
35458
35459 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35460 Frame pointer elimination is automatically handled.
35461
35462 For the RS/6000, if frame pointer elimination is being done, we would like
35463 to convert ap into fp, not sp.
35464
35465 We need r30 if -mminimal-toc was specified, and there are constant pool
35466 references. */
35467
35468 static bool
35469 rs6000_can_eliminate (const int from, const int to)
35470 {
35471 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35472 ? ! frame_pointer_needed
35473 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35474 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35475 || constant_pool_empty_p ()
35476 : true);
35477 }
35478
35479 /* Define the offset between two registers, FROM to be eliminated and its
35480 replacement TO, at the start of a routine. */
35481 HOST_WIDE_INT
35482 rs6000_initial_elimination_offset (int from, int to)
35483 {
35484 rs6000_stack_t *info = rs6000_stack_info ();
35485 HOST_WIDE_INT offset;
35486
35487 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35488 offset = info->push_p ? 0 : -info->total_size;
35489 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35490 {
35491 offset = info->push_p ? 0 : -info->total_size;
35492 if (FRAME_GROWS_DOWNWARD)
35493 offset += info->fixed_size + info->vars_size + info->parm_size;
35494 }
35495 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35496 offset = FRAME_GROWS_DOWNWARD
35497 ? info->fixed_size + info->vars_size + info->parm_size
35498 : 0;
35499 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35500 offset = info->total_size;
35501 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35502 offset = info->push_p ? info->total_size : 0;
35503 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35504 offset = 0;
35505 else
35506 gcc_unreachable ();
35507
35508 return offset;
35509 }
35510
35511 /* Fill in sizes of registers used by unwinder. */
35512
35513 static void
35514 rs6000_init_dwarf_reg_sizes_extra (tree address)
35515 {
35516 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35517 {
35518 int i;
35519 machine_mode mode = TYPE_MODE (char_type_node);
35520 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35521 rtx mem = gen_rtx_MEM (BLKmode, addr);
35522 rtx value = gen_int_mode (16, mode);
35523
35524 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35525 The unwinder still needs to know the size of Altivec registers. */
35526
35527 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35528 {
35529 int column = DWARF_REG_TO_UNWIND_COLUMN
35530 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35531 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35532
35533 emit_move_insn (adjust_address (mem, mode, offset), value);
35534 }
35535 }
35536 }
35537
35538 /* Map internal gcc register numbers to debug format register numbers.
35539 FORMAT specifies the type of debug register number to use:
35540 0 -- debug information, except for frame-related sections
35541 1 -- DWARF .debug_frame section
35542 2 -- DWARF .eh_frame section */
35543
35544 unsigned int
35545 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35546 {
35547 /* Except for the above, we use the internal number for non-DWARF
35548 debug information, and also for .eh_frame. */
35549 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35550 return regno;
35551
35552 /* On some platforms, we use the standard DWARF register
35553 numbering for .debug_info and .debug_frame. */
35554 #ifdef RS6000_USE_DWARF_NUMBERING
35555 if (regno <= 63)
35556 return regno;
35557 if (regno == LR_REGNO)
35558 return 108;
35559 if (regno == CTR_REGNO)
35560 return 109;
35561 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35562 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35563 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35564 to the DWARF reg for CR. */
35565 if (format == 1 && regno == CR2_REGNO)
35566 return 64;
35567 if (CR_REGNO_P (regno))
35568 return regno - CR0_REGNO + 86;
35569 if (regno == CA_REGNO)
35570 return 101; /* XER */
35571 if (ALTIVEC_REGNO_P (regno))
35572 return regno - FIRST_ALTIVEC_REGNO + 1124;
35573 if (regno == VRSAVE_REGNO)
35574 return 356;
35575 if (regno == VSCR_REGNO)
35576 return 67;
35577 #endif
35578 return regno;
35579 }
35580
35581 /* target hook eh_return_filter_mode */
35582 static scalar_int_mode
35583 rs6000_eh_return_filter_mode (void)
35584 {
35585 return TARGET_32BIT ? SImode : word_mode;
35586 }
35587
35588 /* Target hook for translate_mode_attribute. */
35589 static machine_mode
35590 rs6000_translate_mode_attribute (machine_mode mode)
35591 {
35592 if ((FLOAT128_IEEE_P (mode)
35593 && ieee128_float_type_node == long_double_type_node)
35594 || (FLOAT128_IBM_P (mode)
35595 && ibm128_float_type_node == long_double_type_node))
35596 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
35597 return mode;
35598 }
35599
35600 /* Target hook for scalar_mode_supported_p. */
35601 static bool
35602 rs6000_scalar_mode_supported_p (scalar_mode mode)
35603 {
35604 /* -m32 does not support TImode. This is the default, from
35605 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35606 same ABI as for -m32. But default_scalar_mode_supported_p allows
35607 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35608 for -mpowerpc64. */
35609 if (TARGET_32BIT && mode == TImode)
35610 return false;
35611
35612 if (DECIMAL_FLOAT_MODE_P (mode))
35613 return default_decimal_float_supported_p ();
35614 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
35615 return true;
35616 else
35617 return default_scalar_mode_supported_p (mode);
35618 }
35619
35620 /* Target hook for vector_mode_supported_p. */
35621 static bool
35622 rs6000_vector_mode_supported_p (machine_mode mode)
35623 {
35624 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35625 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35626 double-double. */
35627 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35628 return true;
35629
35630 else
35631 return false;
35632 }
35633
35634 /* Target hook for floatn_mode. */
35635 static opt_scalar_float_mode
35636 rs6000_floatn_mode (int n, bool extended)
35637 {
35638 if (extended)
35639 {
35640 switch (n)
35641 {
35642 case 32:
35643 return DFmode;
35644
35645 case 64:
35646 if (TARGET_FLOAT128_TYPE)
35647 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35648 else
35649 return opt_scalar_float_mode ();
35650
35651 case 128:
35652 return opt_scalar_float_mode ();
35653
35654 default:
35655 /* Those are the only valid _FloatNx types. */
35656 gcc_unreachable ();
35657 }
35658 }
35659 else
35660 {
35661 switch (n)
35662 {
35663 case 32:
35664 return SFmode;
35665
35666 case 64:
35667 return DFmode;
35668
35669 case 128:
35670 if (TARGET_FLOAT128_TYPE)
35671 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35672 else
35673 return opt_scalar_float_mode ();
35674
35675 default:
35676 return opt_scalar_float_mode ();
35677 }
35678 }
35679
35680 }
35681
35682 /* Target hook for c_mode_for_suffix. */
35683 static machine_mode
35684 rs6000_c_mode_for_suffix (char suffix)
35685 {
35686 if (TARGET_FLOAT128_TYPE)
35687 {
35688 if (suffix == 'q' || suffix == 'Q')
35689 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35690
35691 /* At the moment, we are not defining a suffix for IBM extended double.
35692 If/when the default for -mabi=ieeelongdouble is changed, and we want
35693 to support __ibm128 constants in legacy library code, we may need to
35694 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
35695 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
35696 __float80 constants. */
35697 }
35698
35699 return VOIDmode;
35700 }
35701
35702 /* Target hook for invalid_arg_for_unprototyped_fn. */
35703 static const char *
35704 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
35705 {
35706 return (!rs6000_darwin64_abi
35707 && typelist == 0
35708 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
35709 && (funcdecl == NULL_TREE
35710 || (TREE_CODE (funcdecl) == FUNCTION_DECL
35711 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
35712 ? N_("AltiVec argument passed to unprototyped function")
35713 : NULL;
35714 }
35715
35716 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
35717 setup by using __stack_chk_fail_local hidden function instead of
35718 calling __stack_chk_fail directly. Otherwise it is better to call
35719 __stack_chk_fail directly. */
35720
35721 static tree ATTRIBUTE_UNUSED
35722 rs6000_stack_protect_fail (void)
35723 {
35724 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
35725 ? default_hidden_stack_protect_fail ()
35726 : default_external_stack_protect_fail ();
35727 }
35728
35729 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
35730
35731 #if TARGET_ELF
35732 static unsigned HOST_WIDE_INT
35733 rs6000_asan_shadow_offset (void)
35734 {
35735 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
35736 }
35737 #endif
35738 \f
35739 /* Mask options that we want to support inside of attribute((target)) and
35740 #pragma GCC target operations. Note, we do not include things like
35741 64/32-bit, endianness, hard/soft floating point, etc. that would have
35742 different calling sequences. */
35743
35744 struct rs6000_opt_mask {
35745 const char *name; /* option name */
35746 HOST_WIDE_INT mask; /* mask to set */
35747 bool invert; /* invert sense of mask */
35748 bool valid_target; /* option is a target option */
35749 };
35750
35751 static struct rs6000_opt_mask const rs6000_opt_masks[] =
35752 {
35753 { "altivec", OPTION_MASK_ALTIVEC, false, true },
35754 { "cmpb", OPTION_MASK_CMPB, false, true },
35755 { "crypto", OPTION_MASK_CRYPTO, false, true },
35756 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
35757 { "dlmzb", OPTION_MASK_DLMZB, false, true },
35758 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
35759 false, true },
35760 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
35761 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
35762 { "fprnd", OPTION_MASK_FPRND, false, true },
35763 { "hard-dfp", OPTION_MASK_DFP, false, true },
35764 { "htm", OPTION_MASK_HTM, false, true },
35765 { "isel", OPTION_MASK_ISEL, false, true },
35766 { "mfcrf", OPTION_MASK_MFCRF, false, true },
35767 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
35768 { "modulo", OPTION_MASK_MODULO, false, true },
35769 { "mulhw", OPTION_MASK_MULHW, false, true },
35770 { "multiple", OPTION_MASK_MULTIPLE, false, true },
35771 { "popcntb", OPTION_MASK_POPCNTB, false, true },
35772 { "popcntd", OPTION_MASK_POPCNTD, false, true },
35773 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
35774 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
35775 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
35776 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
35777 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
35778 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
35779 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
35780 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
35781 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
35782 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
35783 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
35784 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
35785 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
35786 { "string", 0, false, true },
35787 { "update", OPTION_MASK_NO_UPDATE, true , true },
35788 { "vsx", OPTION_MASK_VSX, false, true },
35789 #ifdef OPTION_MASK_64BIT
35790 #if TARGET_AIX_OS
35791 { "aix64", OPTION_MASK_64BIT, false, false },
35792 { "aix32", OPTION_MASK_64BIT, true, false },
35793 #else
35794 { "64", OPTION_MASK_64BIT, false, false },
35795 { "32", OPTION_MASK_64BIT, true, false },
35796 #endif
35797 #endif
35798 #ifdef OPTION_MASK_EABI
35799 { "eabi", OPTION_MASK_EABI, false, false },
35800 #endif
35801 #ifdef OPTION_MASK_LITTLE_ENDIAN
35802 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
35803 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
35804 #endif
35805 #ifdef OPTION_MASK_RELOCATABLE
35806 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
35807 #endif
35808 #ifdef OPTION_MASK_STRICT_ALIGN
35809 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
35810 #endif
35811 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
35812 { "string", 0, false, false },
35813 };
35814
35815 /* Builtin mask mapping for printing the flags. */
35816 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
35817 {
35818 { "altivec", RS6000_BTM_ALTIVEC, false, false },
35819 { "vsx", RS6000_BTM_VSX, false, false },
35820 { "fre", RS6000_BTM_FRE, false, false },
35821 { "fres", RS6000_BTM_FRES, false, false },
35822 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
35823 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
35824 { "popcntd", RS6000_BTM_POPCNTD, false, false },
35825 { "cell", RS6000_BTM_CELL, false, false },
35826 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
35827 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
35828 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
35829 { "crypto", RS6000_BTM_CRYPTO, false, false },
35830 { "htm", RS6000_BTM_HTM, false, false },
35831 { "hard-dfp", RS6000_BTM_DFP, false, false },
35832 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
35833 { "long-double-128", RS6000_BTM_LDBL128, false, false },
35834 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
35835 { "float128", RS6000_BTM_FLOAT128, false, false },
35836 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
35837 };
35838
35839 /* Option variables that we want to support inside attribute((target)) and
35840 #pragma GCC target operations. */
35841
35842 struct rs6000_opt_var {
35843 const char *name; /* option name */
35844 size_t global_offset; /* offset of the option in global_options. */
35845 size_t target_offset; /* offset of the option in target options. */
35846 };
35847
35848 static struct rs6000_opt_var const rs6000_opt_vars[] =
35849 {
35850 { "friz",
35851 offsetof (struct gcc_options, x_TARGET_FRIZ),
35852 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
35853 { "avoid-indexed-addresses",
35854 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
35855 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
35856 { "longcall",
35857 offsetof (struct gcc_options, x_rs6000_default_long_calls),
35858 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
35859 { "optimize-swaps",
35860 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
35861 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
35862 { "allow-movmisalign",
35863 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
35864 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
35865 { "sched-groups",
35866 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
35867 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
35868 { "always-hint",
35869 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
35870 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
35871 { "align-branch-targets",
35872 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
35873 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
35874 { "tls-markers",
35875 offsetof (struct gcc_options, x_tls_markers),
35876 offsetof (struct cl_target_option, x_tls_markers), },
35877 { "sched-prolog",
35878 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
35879 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
35880 { "sched-epilog",
35881 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
35882 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
35883 { "speculate-indirect-jumps",
35884 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
35885 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
35886 };
35887
35888 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
35889 parsing. Return true if there were no errors. */
35890
35891 static bool
35892 rs6000_inner_target_options (tree args, bool attr_p)
35893 {
35894 bool ret = true;
35895
35896 if (args == NULL_TREE)
35897 ;
35898
35899 else if (TREE_CODE (args) == STRING_CST)
35900 {
35901 char *p = ASTRDUP (TREE_STRING_POINTER (args));
35902 char *q;
35903
35904 while ((q = strtok (p, ",")) != NULL)
35905 {
35906 bool error_p = false;
35907 bool not_valid_p = false;
35908 const char *cpu_opt = NULL;
35909
35910 p = NULL;
35911 if (strncmp (q, "cpu=", 4) == 0)
35912 {
35913 int cpu_index = rs6000_cpu_name_lookup (q+4);
35914 if (cpu_index >= 0)
35915 rs6000_cpu_index = cpu_index;
35916 else
35917 {
35918 error_p = true;
35919 cpu_opt = q+4;
35920 }
35921 }
35922 else if (strncmp (q, "tune=", 5) == 0)
35923 {
35924 int tune_index = rs6000_cpu_name_lookup (q+5);
35925 if (tune_index >= 0)
35926 rs6000_tune_index = tune_index;
35927 else
35928 {
35929 error_p = true;
35930 cpu_opt = q+5;
35931 }
35932 }
35933 else
35934 {
35935 size_t i;
35936 bool invert = false;
35937 char *r = q;
35938
35939 error_p = true;
35940 if (strncmp (r, "no-", 3) == 0)
35941 {
35942 invert = true;
35943 r += 3;
35944 }
35945
35946 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
35947 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
35948 {
35949 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
35950
35951 if (!rs6000_opt_masks[i].valid_target)
35952 not_valid_p = true;
35953 else
35954 {
35955 error_p = false;
35956 rs6000_isa_flags_explicit |= mask;
35957
35958 /* VSX needs altivec, so -mvsx automagically sets
35959 altivec and disables -mavoid-indexed-addresses. */
35960 if (!invert)
35961 {
35962 if (mask == OPTION_MASK_VSX)
35963 {
35964 mask |= OPTION_MASK_ALTIVEC;
35965 TARGET_AVOID_XFORM = 0;
35966 }
35967 }
35968
35969 if (rs6000_opt_masks[i].invert)
35970 invert = !invert;
35971
35972 if (invert)
35973 rs6000_isa_flags &= ~mask;
35974 else
35975 rs6000_isa_flags |= mask;
35976 }
35977 break;
35978 }
35979
35980 if (error_p && !not_valid_p)
35981 {
35982 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
35983 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
35984 {
35985 size_t j = rs6000_opt_vars[i].global_offset;
35986 *((int *) ((char *)&global_options + j)) = !invert;
35987 error_p = false;
35988 not_valid_p = false;
35989 break;
35990 }
35991 }
35992 }
35993
35994 if (error_p)
35995 {
35996 const char *eprefix, *esuffix;
35997
35998 ret = false;
35999 if (attr_p)
36000 {
36001 eprefix = "__attribute__((__target__(";
36002 esuffix = ")))";
36003 }
36004 else
36005 {
36006 eprefix = "#pragma GCC target ";
36007 esuffix = "";
36008 }
36009
36010 if (cpu_opt)
36011 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36012 q, esuffix);
36013 else if (not_valid_p)
36014 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36015 else
36016 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36017 }
36018 }
36019 }
36020
36021 else if (TREE_CODE (args) == TREE_LIST)
36022 {
36023 do
36024 {
36025 tree value = TREE_VALUE (args);
36026 if (value)
36027 {
36028 bool ret2 = rs6000_inner_target_options (value, attr_p);
36029 if (!ret2)
36030 ret = false;
36031 }
36032 args = TREE_CHAIN (args);
36033 }
36034 while (args != NULL_TREE);
36035 }
36036
36037 else
36038 {
36039 error ("attribute %<target%> argument not a string");
36040 return false;
36041 }
36042
36043 return ret;
36044 }
36045
36046 /* Print out the target options as a list for -mdebug=target. */
36047
36048 static void
36049 rs6000_debug_target_options (tree args, const char *prefix)
36050 {
36051 if (args == NULL_TREE)
36052 fprintf (stderr, "%s<NULL>", prefix);
36053
36054 else if (TREE_CODE (args) == STRING_CST)
36055 {
36056 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36057 char *q;
36058
36059 while ((q = strtok (p, ",")) != NULL)
36060 {
36061 p = NULL;
36062 fprintf (stderr, "%s\"%s\"", prefix, q);
36063 prefix = ", ";
36064 }
36065 }
36066
36067 else if (TREE_CODE (args) == TREE_LIST)
36068 {
36069 do
36070 {
36071 tree value = TREE_VALUE (args);
36072 if (value)
36073 {
36074 rs6000_debug_target_options (value, prefix);
36075 prefix = ", ";
36076 }
36077 args = TREE_CHAIN (args);
36078 }
36079 while (args != NULL_TREE);
36080 }
36081
36082 else
36083 gcc_unreachable ();
36084
36085 return;
36086 }
36087
36088 \f
36089 /* Hook to validate attribute((target("..."))). */
36090
36091 static bool
36092 rs6000_valid_attribute_p (tree fndecl,
36093 tree ARG_UNUSED (name),
36094 tree args,
36095 int flags)
36096 {
36097 struct cl_target_option cur_target;
36098 bool ret;
36099 tree old_optimize;
36100 tree new_target, new_optimize;
36101 tree func_optimize;
36102
36103 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36104
36105 if (TARGET_DEBUG_TARGET)
36106 {
36107 tree tname = DECL_NAME (fndecl);
36108 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36109 if (tname)
36110 fprintf (stderr, "function: %.*s\n",
36111 (int) IDENTIFIER_LENGTH (tname),
36112 IDENTIFIER_POINTER (tname));
36113 else
36114 fprintf (stderr, "function: unknown\n");
36115
36116 fprintf (stderr, "args:");
36117 rs6000_debug_target_options (args, " ");
36118 fprintf (stderr, "\n");
36119
36120 if (flags)
36121 fprintf (stderr, "flags: 0x%x\n", flags);
36122
36123 fprintf (stderr, "--------------------\n");
36124 }
36125
36126 /* attribute((target("default"))) does nothing, beyond
36127 affecting multi-versioning. */
36128 if (TREE_VALUE (args)
36129 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36130 && TREE_CHAIN (args) == NULL_TREE
36131 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36132 return true;
36133
36134 old_optimize = build_optimization_node (&global_options);
36135 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36136
36137 /* If the function changed the optimization levels as well as setting target
36138 options, start with the optimizations specified. */
36139 if (func_optimize && func_optimize != old_optimize)
36140 cl_optimization_restore (&global_options,
36141 TREE_OPTIMIZATION (func_optimize));
36142
36143 /* The target attributes may also change some optimization flags, so update
36144 the optimization options if necessary. */
36145 cl_target_option_save (&cur_target, &global_options);
36146 rs6000_cpu_index = rs6000_tune_index = -1;
36147 ret = rs6000_inner_target_options (args, true);
36148
36149 /* Set up any additional state. */
36150 if (ret)
36151 {
36152 ret = rs6000_option_override_internal (false);
36153 new_target = build_target_option_node (&global_options);
36154 }
36155 else
36156 new_target = NULL;
36157
36158 new_optimize = build_optimization_node (&global_options);
36159
36160 if (!new_target)
36161 ret = false;
36162
36163 else if (fndecl)
36164 {
36165 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36166
36167 if (old_optimize != new_optimize)
36168 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36169 }
36170
36171 cl_target_option_restore (&global_options, &cur_target);
36172
36173 if (old_optimize != new_optimize)
36174 cl_optimization_restore (&global_options,
36175 TREE_OPTIMIZATION (old_optimize));
36176
36177 return ret;
36178 }
36179
36180 \f
36181 /* Hook to validate the current #pragma GCC target and set the state, and
36182 update the macros based on what was changed. If ARGS is NULL, then
36183 POP_TARGET is used to reset the options. */
36184
36185 bool
36186 rs6000_pragma_target_parse (tree args, tree pop_target)
36187 {
36188 tree prev_tree = build_target_option_node (&global_options);
36189 tree cur_tree;
36190 struct cl_target_option *prev_opt, *cur_opt;
36191 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36192 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36193
36194 if (TARGET_DEBUG_TARGET)
36195 {
36196 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36197 fprintf (stderr, "args:");
36198 rs6000_debug_target_options (args, " ");
36199 fprintf (stderr, "\n");
36200
36201 if (pop_target)
36202 {
36203 fprintf (stderr, "pop_target:\n");
36204 debug_tree (pop_target);
36205 }
36206 else
36207 fprintf (stderr, "pop_target: <NULL>\n");
36208
36209 fprintf (stderr, "--------------------\n");
36210 }
36211
36212 if (! args)
36213 {
36214 cur_tree = ((pop_target)
36215 ? pop_target
36216 : target_option_default_node);
36217 cl_target_option_restore (&global_options,
36218 TREE_TARGET_OPTION (cur_tree));
36219 }
36220 else
36221 {
36222 rs6000_cpu_index = rs6000_tune_index = -1;
36223 if (!rs6000_inner_target_options (args, false)
36224 || !rs6000_option_override_internal (false)
36225 || (cur_tree = build_target_option_node (&global_options))
36226 == NULL_TREE)
36227 {
36228 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36229 fprintf (stderr, "invalid pragma\n");
36230
36231 return false;
36232 }
36233 }
36234
36235 target_option_current_node = cur_tree;
36236 rs6000_activate_target_options (target_option_current_node);
36237
36238 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36239 change the macros that are defined. */
36240 if (rs6000_target_modify_macros_ptr)
36241 {
36242 prev_opt = TREE_TARGET_OPTION (prev_tree);
36243 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36244 prev_flags = prev_opt->x_rs6000_isa_flags;
36245
36246 cur_opt = TREE_TARGET_OPTION (cur_tree);
36247 cur_flags = cur_opt->x_rs6000_isa_flags;
36248 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36249
36250 diff_bumask = (prev_bumask ^ cur_bumask);
36251 diff_flags = (prev_flags ^ cur_flags);
36252
36253 if ((diff_flags != 0) || (diff_bumask != 0))
36254 {
36255 /* Delete old macros. */
36256 rs6000_target_modify_macros_ptr (false,
36257 prev_flags & diff_flags,
36258 prev_bumask & diff_bumask);
36259
36260 /* Define new macros. */
36261 rs6000_target_modify_macros_ptr (true,
36262 cur_flags & diff_flags,
36263 cur_bumask & diff_bumask);
36264 }
36265 }
36266
36267 return true;
36268 }
36269
36270 \f
36271 /* Remember the last target of rs6000_set_current_function. */
36272 static GTY(()) tree rs6000_previous_fndecl;
36273
36274 /* Restore target's globals from NEW_TREE and invalidate the
36275 rs6000_previous_fndecl cache. */
36276
36277 void
36278 rs6000_activate_target_options (tree new_tree)
36279 {
36280 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36281 if (TREE_TARGET_GLOBALS (new_tree))
36282 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36283 else if (new_tree == target_option_default_node)
36284 restore_target_globals (&default_target_globals);
36285 else
36286 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36287 rs6000_previous_fndecl = NULL_TREE;
36288 }
36289
36290 /* Establish appropriate back-end context for processing the function
36291 FNDECL. The argument might be NULL to indicate processing at top
36292 level, outside of any function scope. */
36293 static void
36294 rs6000_set_current_function (tree fndecl)
36295 {
36296 if (TARGET_DEBUG_TARGET)
36297 {
36298 fprintf (stderr, "\n==================== rs6000_set_current_function");
36299
36300 if (fndecl)
36301 fprintf (stderr, ", fndecl %s (%p)",
36302 (DECL_NAME (fndecl)
36303 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36304 : "<unknown>"), (void *)fndecl);
36305
36306 if (rs6000_previous_fndecl)
36307 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36308
36309 fprintf (stderr, "\n");
36310 }
36311
36312 /* Only change the context if the function changes. This hook is called
36313 several times in the course of compiling a function, and we don't want to
36314 slow things down too much or call target_reinit when it isn't safe. */
36315 if (fndecl == rs6000_previous_fndecl)
36316 return;
36317
36318 tree old_tree;
36319 if (rs6000_previous_fndecl == NULL_TREE)
36320 old_tree = target_option_current_node;
36321 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36322 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36323 else
36324 old_tree = target_option_default_node;
36325
36326 tree new_tree;
36327 if (fndecl == NULL_TREE)
36328 {
36329 if (old_tree != target_option_current_node)
36330 new_tree = target_option_current_node;
36331 else
36332 new_tree = NULL_TREE;
36333 }
36334 else
36335 {
36336 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36337 if (new_tree == NULL_TREE)
36338 new_tree = target_option_default_node;
36339 }
36340
36341 if (TARGET_DEBUG_TARGET)
36342 {
36343 if (new_tree)
36344 {
36345 fprintf (stderr, "\nnew fndecl target specific options:\n");
36346 debug_tree (new_tree);
36347 }
36348
36349 if (old_tree)
36350 {
36351 fprintf (stderr, "\nold fndecl target specific options:\n");
36352 debug_tree (old_tree);
36353 }
36354
36355 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36356 fprintf (stderr, "--------------------\n");
36357 }
36358
36359 if (new_tree && old_tree != new_tree)
36360 rs6000_activate_target_options (new_tree);
36361
36362 if (fndecl)
36363 rs6000_previous_fndecl = fndecl;
36364 }
36365
36366 \f
36367 /* Save the current options */
36368
36369 static void
36370 rs6000_function_specific_save (struct cl_target_option *ptr,
36371 struct gcc_options *opts)
36372 {
36373 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36374 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36375 }
36376
36377 /* Restore the current options */
36378
36379 static void
36380 rs6000_function_specific_restore (struct gcc_options *opts,
36381 struct cl_target_option *ptr)
36382
36383 {
36384 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36385 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36386 (void) rs6000_option_override_internal (false);
36387 }
36388
36389 /* Print the current options */
36390
36391 static void
36392 rs6000_function_specific_print (FILE *file, int indent,
36393 struct cl_target_option *ptr)
36394 {
36395 rs6000_print_isa_options (file, indent, "Isa options set",
36396 ptr->x_rs6000_isa_flags);
36397
36398 rs6000_print_isa_options (file, indent, "Isa options explicit",
36399 ptr->x_rs6000_isa_flags_explicit);
36400 }
36401
36402 /* Helper function to print the current isa or misc options on a line. */
36403
36404 static void
36405 rs6000_print_options_internal (FILE *file,
36406 int indent,
36407 const char *string,
36408 HOST_WIDE_INT flags,
36409 const char *prefix,
36410 const struct rs6000_opt_mask *opts,
36411 size_t num_elements)
36412 {
36413 size_t i;
36414 size_t start_column = 0;
36415 size_t cur_column;
36416 size_t max_column = 120;
36417 size_t prefix_len = strlen (prefix);
36418 size_t comma_len = 0;
36419 const char *comma = "";
36420
36421 if (indent)
36422 start_column += fprintf (file, "%*s", indent, "");
36423
36424 if (!flags)
36425 {
36426 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36427 return;
36428 }
36429
36430 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36431
36432 /* Print the various mask options. */
36433 cur_column = start_column;
36434 for (i = 0; i < num_elements; i++)
36435 {
36436 bool invert = opts[i].invert;
36437 const char *name = opts[i].name;
36438 const char *no_str = "";
36439 HOST_WIDE_INT mask = opts[i].mask;
36440 size_t len = comma_len + prefix_len + strlen (name);
36441
36442 if (!invert)
36443 {
36444 if ((flags & mask) == 0)
36445 {
36446 no_str = "no-";
36447 len += sizeof ("no-") - 1;
36448 }
36449
36450 flags &= ~mask;
36451 }
36452
36453 else
36454 {
36455 if ((flags & mask) != 0)
36456 {
36457 no_str = "no-";
36458 len += sizeof ("no-") - 1;
36459 }
36460
36461 flags |= mask;
36462 }
36463
36464 cur_column += len;
36465 if (cur_column > max_column)
36466 {
36467 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36468 cur_column = start_column + len;
36469 comma = "";
36470 }
36471
36472 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36473 comma = ", ";
36474 comma_len = sizeof (", ") - 1;
36475 }
36476
36477 fputs ("\n", file);
36478 }
36479
36480 /* Helper function to print the current isa options on a line. */
36481
36482 static void
36483 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36484 HOST_WIDE_INT flags)
36485 {
36486 rs6000_print_options_internal (file, indent, string, flags, "-m",
36487 &rs6000_opt_masks[0],
36488 ARRAY_SIZE (rs6000_opt_masks));
36489 }
36490
36491 static void
36492 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36493 HOST_WIDE_INT flags)
36494 {
36495 rs6000_print_options_internal (file, indent, string, flags, "",
36496 &rs6000_builtin_mask_names[0],
36497 ARRAY_SIZE (rs6000_builtin_mask_names));
36498 }
36499
36500 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36501 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36502 -mupper-regs-df, etc.).
36503
36504 If the user used -mno-power8-vector, we need to turn off all of the implicit
36505 ISA 2.07 and 3.0 options that relate to the vector unit.
36506
36507 If the user used -mno-power9-vector, we need to turn off all of the implicit
36508 ISA 3.0 options that relate to the vector unit.
36509
36510 This function does not handle explicit options such as the user specifying
36511 -mdirect-move. These are handled in rs6000_option_override_internal, and
36512 the appropriate error is given if needed.
36513
36514 We return a mask of all of the implicit options that should not be enabled
36515 by default. */
36516
36517 static HOST_WIDE_INT
36518 rs6000_disable_incompatible_switches (void)
36519 {
36520 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36521 size_t i, j;
36522
36523 static const struct {
36524 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36525 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36526 const char *const name; /* name of the switch. */
36527 } flags[] = {
36528 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36529 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36530 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36531 };
36532
36533 for (i = 0; i < ARRAY_SIZE (flags); i++)
36534 {
36535 HOST_WIDE_INT no_flag = flags[i].no_flag;
36536
36537 if ((rs6000_isa_flags & no_flag) == 0
36538 && (rs6000_isa_flags_explicit & no_flag) != 0)
36539 {
36540 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36541 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36542 & rs6000_isa_flags
36543 & dep_flags);
36544
36545 if (set_flags)
36546 {
36547 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36548 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36549 {
36550 set_flags &= ~rs6000_opt_masks[j].mask;
36551 error ("%<-mno-%s%> turns off %<-m%s%>",
36552 flags[i].name,
36553 rs6000_opt_masks[j].name);
36554 }
36555
36556 gcc_assert (!set_flags);
36557 }
36558
36559 rs6000_isa_flags &= ~dep_flags;
36560 ignore_masks |= no_flag | dep_flags;
36561 }
36562 }
36563
36564 return ignore_masks;
36565 }
36566
36567 \f
36568 /* Helper function for printing the function name when debugging. */
36569
36570 static const char *
36571 get_decl_name (tree fn)
36572 {
36573 tree name;
36574
36575 if (!fn)
36576 return "<null>";
36577
36578 name = DECL_NAME (fn);
36579 if (!name)
36580 return "<no-name>";
36581
36582 return IDENTIFIER_POINTER (name);
36583 }
36584
36585 /* Return the clone id of the target we are compiling code for in a target
36586 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36587 the priority list for the target clones (ordered from lowest to
36588 highest). */
36589
36590 static int
36591 rs6000_clone_priority (tree fndecl)
36592 {
36593 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36594 HOST_WIDE_INT isa_masks;
36595 int ret = CLONE_DEFAULT;
36596 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
36597 const char *attrs_str = NULL;
36598
36599 attrs = TREE_VALUE (TREE_VALUE (attrs));
36600 attrs_str = TREE_STRING_POINTER (attrs);
36601
36602 /* Return priority zero for default function. Return the ISA needed for the
36603 function if it is not the default. */
36604 if (strcmp (attrs_str, "default") != 0)
36605 {
36606 if (fn_opts == NULL_TREE)
36607 fn_opts = target_option_default_node;
36608
36609 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
36610 isa_masks = rs6000_isa_flags;
36611 else
36612 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
36613
36614 for (ret = CLONE_MAX - 1; ret != 0; ret--)
36615 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
36616 break;
36617 }
36618
36619 if (TARGET_DEBUG_TARGET)
36620 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
36621 get_decl_name (fndecl), ret);
36622
36623 return ret;
36624 }
36625
36626 /* This compares the priority of target features in function DECL1 and DECL2.
36627 It returns positive value if DECL1 is higher priority, negative value if
36628 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36629 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36630
36631 static int
36632 rs6000_compare_version_priority (tree decl1, tree decl2)
36633 {
36634 int priority1 = rs6000_clone_priority (decl1);
36635 int priority2 = rs6000_clone_priority (decl2);
36636 int ret = priority1 - priority2;
36637
36638 if (TARGET_DEBUG_TARGET)
36639 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
36640 get_decl_name (decl1), get_decl_name (decl2), ret);
36641
36642 return ret;
36643 }
36644
36645 /* Make a dispatcher declaration for the multi-versioned function DECL.
36646 Calls to DECL function will be replaced with calls to the dispatcher
36647 by the front-end. Returns the decl of the dispatcher function. */
36648
36649 static tree
36650 rs6000_get_function_versions_dispatcher (void *decl)
36651 {
36652 tree fn = (tree) decl;
36653 struct cgraph_node *node = NULL;
36654 struct cgraph_node *default_node = NULL;
36655 struct cgraph_function_version_info *node_v = NULL;
36656 struct cgraph_function_version_info *first_v = NULL;
36657
36658 tree dispatch_decl = NULL;
36659
36660 struct cgraph_function_version_info *default_version_info = NULL;
36661 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
36662
36663 if (TARGET_DEBUG_TARGET)
36664 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
36665 get_decl_name (fn));
36666
36667 node = cgraph_node::get (fn);
36668 gcc_assert (node != NULL);
36669
36670 node_v = node->function_version ();
36671 gcc_assert (node_v != NULL);
36672
36673 if (node_v->dispatcher_resolver != NULL)
36674 return node_v->dispatcher_resolver;
36675
36676 /* Find the default version and make it the first node. */
36677 first_v = node_v;
36678 /* Go to the beginning of the chain. */
36679 while (first_v->prev != NULL)
36680 first_v = first_v->prev;
36681
36682 default_version_info = first_v;
36683 while (default_version_info != NULL)
36684 {
36685 const tree decl2 = default_version_info->this_node->decl;
36686 if (is_function_default_version (decl2))
36687 break;
36688 default_version_info = default_version_info->next;
36689 }
36690
36691 /* If there is no default node, just return NULL. */
36692 if (default_version_info == NULL)
36693 return NULL;
36694
36695 /* Make default info the first node. */
36696 if (first_v != default_version_info)
36697 {
36698 default_version_info->prev->next = default_version_info->next;
36699 if (default_version_info->next)
36700 default_version_info->next->prev = default_version_info->prev;
36701 first_v->prev = default_version_info;
36702 default_version_info->next = first_v;
36703 default_version_info->prev = NULL;
36704 }
36705
36706 default_node = default_version_info->this_node;
36707
36708 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
36709 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36710 "target_clones attribute needs GLIBC (2.23 and newer) that "
36711 "exports hardware capability bits");
36712 #else
36713
36714 if (targetm.has_ifunc_p ())
36715 {
36716 struct cgraph_function_version_info *it_v = NULL;
36717 struct cgraph_node *dispatcher_node = NULL;
36718 struct cgraph_function_version_info *dispatcher_version_info = NULL;
36719
36720 /* Right now, the dispatching is done via ifunc. */
36721 dispatch_decl = make_dispatcher_decl (default_node->decl);
36722
36723 dispatcher_node = cgraph_node::get_create (dispatch_decl);
36724 gcc_assert (dispatcher_node != NULL);
36725 dispatcher_node->dispatcher_function = 1;
36726 dispatcher_version_info
36727 = dispatcher_node->insert_new_function_version ();
36728 dispatcher_version_info->next = default_version_info;
36729 dispatcher_node->definition = 1;
36730
36731 /* Set the dispatcher for all the versions. */
36732 it_v = default_version_info;
36733 while (it_v != NULL)
36734 {
36735 it_v->dispatcher_resolver = dispatch_decl;
36736 it_v = it_v->next;
36737 }
36738 }
36739 else
36740 {
36741 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36742 "multiversioning needs ifunc which is not supported "
36743 "on this target");
36744 }
36745 #endif
36746
36747 return dispatch_decl;
36748 }
36749
36750 /* Make the resolver function decl to dispatch the versions of a multi-
36751 versioned function, DEFAULT_DECL. Create an empty basic block in the
36752 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
36753 function. */
36754
36755 static tree
36756 make_resolver_func (const tree default_decl,
36757 const tree dispatch_decl,
36758 basic_block *empty_bb)
36759 {
36760 /* Make the resolver function static. The resolver function returns
36761 void *. */
36762 tree decl_name = clone_function_name (default_decl, "resolver");
36763 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
36764 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
36765 tree decl = build_fn_decl (resolver_name, type);
36766 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
36767
36768 DECL_NAME (decl) = decl_name;
36769 TREE_USED (decl) = 1;
36770 DECL_ARTIFICIAL (decl) = 1;
36771 DECL_IGNORED_P (decl) = 0;
36772 TREE_PUBLIC (decl) = 0;
36773 DECL_UNINLINABLE (decl) = 1;
36774
36775 /* Resolver is not external, body is generated. */
36776 DECL_EXTERNAL (decl) = 0;
36777 DECL_EXTERNAL (dispatch_decl) = 0;
36778
36779 DECL_CONTEXT (decl) = NULL_TREE;
36780 DECL_INITIAL (decl) = make_node (BLOCK);
36781 DECL_STATIC_CONSTRUCTOR (decl) = 0;
36782
36783 /* Build result decl and add to function_decl. */
36784 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
36785 DECL_ARTIFICIAL (t) = 1;
36786 DECL_IGNORED_P (t) = 1;
36787 DECL_RESULT (decl) = t;
36788
36789 gimplify_function_tree (decl);
36790 push_cfun (DECL_STRUCT_FUNCTION (decl));
36791 *empty_bb = init_lowered_empty_function (decl, false,
36792 profile_count::uninitialized ());
36793
36794 cgraph_node::add_new_function (decl, true);
36795 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
36796
36797 pop_cfun ();
36798
36799 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
36800 DECL_ATTRIBUTES (dispatch_decl)
36801 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
36802
36803 cgraph_node::create_same_body_alias (dispatch_decl, decl);
36804
36805 return decl;
36806 }
36807
36808 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
36809 return a pointer to VERSION_DECL if we are running on a machine that
36810 supports the index CLONE_ISA hardware architecture bits. This function will
36811 be called during version dispatch to decide which function version to
36812 execute. It returns the basic block at the end, to which more conditions
36813 can be added. */
36814
36815 static basic_block
36816 add_condition_to_bb (tree function_decl, tree version_decl,
36817 int clone_isa, basic_block new_bb)
36818 {
36819 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
36820
36821 gcc_assert (new_bb != NULL);
36822 gimple_seq gseq = bb_seq (new_bb);
36823
36824
36825 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
36826 build_fold_addr_expr (version_decl));
36827 tree result_var = create_tmp_var (ptr_type_node);
36828 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
36829 gimple *return_stmt = gimple_build_return (result_var);
36830
36831 if (clone_isa == CLONE_DEFAULT)
36832 {
36833 gimple_seq_add_stmt (&gseq, convert_stmt);
36834 gimple_seq_add_stmt (&gseq, return_stmt);
36835 set_bb_seq (new_bb, gseq);
36836 gimple_set_bb (convert_stmt, new_bb);
36837 gimple_set_bb (return_stmt, new_bb);
36838 pop_cfun ();
36839 return new_bb;
36840 }
36841
36842 tree bool_zero = build_int_cst (bool_int_type_node, 0);
36843 tree cond_var = create_tmp_var (bool_int_type_node);
36844 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
36845 const char *arg_str = rs6000_clone_map[clone_isa].name;
36846 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
36847 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
36848 gimple_call_set_lhs (call_cond_stmt, cond_var);
36849
36850 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
36851 gimple_set_bb (call_cond_stmt, new_bb);
36852 gimple_seq_add_stmt (&gseq, call_cond_stmt);
36853
36854 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
36855 NULL_TREE, NULL_TREE);
36856 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
36857 gimple_set_bb (if_else_stmt, new_bb);
36858 gimple_seq_add_stmt (&gseq, if_else_stmt);
36859
36860 gimple_seq_add_stmt (&gseq, convert_stmt);
36861 gimple_seq_add_stmt (&gseq, return_stmt);
36862 set_bb_seq (new_bb, gseq);
36863
36864 basic_block bb1 = new_bb;
36865 edge e12 = split_block (bb1, if_else_stmt);
36866 basic_block bb2 = e12->dest;
36867 e12->flags &= ~EDGE_FALLTHRU;
36868 e12->flags |= EDGE_TRUE_VALUE;
36869
36870 edge e23 = split_block (bb2, return_stmt);
36871 gimple_set_bb (convert_stmt, bb2);
36872 gimple_set_bb (return_stmt, bb2);
36873
36874 basic_block bb3 = e23->dest;
36875 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
36876
36877 remove_edge (e23);
36878 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
36879
36880 pop_cfun ();
36881 return bb3;
36882 }
36883
36884 /* This function generates the dispatch function for multi-versioned functions.
36885 DISPATCH_DECL is the function which will contain the dispatch logic.
36886 FNDECLS are the function choices for dispatch, and is a tree chain.
36887 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
36888 code is generated. */
36889
36890 static int
36891 dispatch_function_versions (tree dispatch_decl,
36892 void *fndecls_p,
36893 basic_block *empty_bb)
36894 {
36895 int ix;
36896 tree ele;
36897 vec<tree> *fndecls;
36898 tree clones[CLONE_MAX];
36899
36900 if (TARGET_DEBUG_TARGET)
36901 fputs ("dispatch_function_versions, top\n", stderr);
36902
36903 gcc_assert (dispatch_decl != NULL
36904 && fndecls_p != NULL
36905 && empty_bb != NULL);
36906
36907 /* fndecls_p is actually a vector. */
36908 fndecls = static_cast<vec<tree> *> (fndecls_p);
36909
36910 /* At least one more version other than the default. */
36911 gcc_assert (fndecls->length () >= 2);
36912
36913 /* The first version in the vector is the default decl. */
36914 memset ((void *) clones, '\0', sizeof (clones));
36915 clones[CLONE_DEFAULT] = (*fndecls)[0];
36916
36917 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
36918 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
36919 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
36920 recent glibc. If we ever need to call __builtin_cpu_init, we would need
36921 to insert the code here to do the call. */
36922
36923 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
36924 {
36925 int priority = rs6000_clone_priority (ele);
36926 if (!clones[priority])
36927 clones[priority] = ele;
36928 }
36929
36930 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
36931 if (clones[ix])
36932 {
36933 if (TARGET_DEBUG_TARGET)
36934 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
36935 ix, get_decl_name (clones[ix]));
36936
36937 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
36938 *empty_bb);
36939 }
36940
36941 return 0;
36942 }
36943
36944 /* Generate the dispatching code body to dispatch multi-versioned function
36945 DECL. The target hook is called to process the "target" attributes and
36946 provide the code to dispatch the right function at run-time. NODE points
36947 to the dispatcher decl whose body will be created. */
36948
36949 static tree
36950 rs6000_generate_version_dispatcher_body (void *node_p)
36951 {
36952 tree resolver;
36953 basic_block empty_bb;
36954 struct cgraph_node *node = (cgraph_node *) node_p;
36955 struct cgraph_function_version_info *ninfo = node->function_version ();
36956
36957 if (ninfo->dispatcher_resolver)
36958 return ninfo->dispatcher_resolver;
36959
36960 /* node is going to be an alias, so remove the finalized bit. */
36961 node->definition = false;
36962
36963 /* The first version in the chain corresponds to the default version. */
36964 ninfo->dispatcher_resolver = resolver
36965 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
36966
36967 if (TARGET_DEBUG_TARGET)
36968 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
36969 get_decl_name (resolver));
36970
36971 push_cfun (DECL_STRUCT_FUNCTION (resolver));
36972 auto_vec<tree, 2> fn_ver_vec;
36973
36974 for (struct cgraph_function_version_info *vinfo = ninfo->next;
36975 vinfo;
36976 vinfo = vinfo->next)
36977 {
36978 struct cgraph_node *version = vinfo->this_node;
36979 /* Check for virtual functions here again, as by this time it should
36980 have been determined if this function needs a vtable index or
36981 not. This happens for methods in derived classes that override
36982 virtual methods in base classes but are not explicitly marked as
36983 virtual. */
36984 if (DECL_VINDEX (version->decl))
36985 sorry ("Virtual function multiversioning not supported");
36986
36987 fn_ver_vec.safe_push (version->decl);
36988 }
36989
36990 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
36991 cgraph_edge::rebuild_edges ();
36992 pop_cfun ();
36993 return resolver;
36994 }
36995
36996 \f
36997 /* Hook to determine if one function can safely inline another. */
36998
36999 static bool
37000 rs6000_can_inline_p (tree caller, tree callee)
37001 {
37002 bool ret = false;
37003 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37004 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37005
37006 /* If callee has no option attributes, then it is ok to inline. */
37007 if (!callee_tree)
37008 ret = true;
37009
37010 /* If caller has no option attributes, but callee does then it is not ok to
37011 inline. */
37012 else if (!caller_tree)
37013 ret = false;
37014
37015 else
37016 {
37017 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37018 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37019
37020 /* Callee's options should a subset of the caller's, i.e. a vsx function
37021 can inline an altivec function but a non-vsx function can't inline a
37022 vsx function. */
37023 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37024 == callee_opts->x_rs6000_isa_flags)
37025 ret = true;
37026 }
37027
37028 if (TARGET_DEBUG_TARGET)
37029 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37030 get_decl_name (caller), get_decl_name (callee),
37031 (ret ? "can" : "cannot"));
37032
37033 return ret;
37034 }
37035 \f
37036 /* Allocate a stack temp and fixup the address so it meets the particular
37037 memory requirements (either offetable or REG+REG addressing). */
37038
37039 rtx
37040 rs6000_allocate_stack_temp (machine_mode mode,
37041 bool offsettable_p,
37042 bool reg_reg_p)
37043 {
37044 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37045 rtx addr = XEXP (stack, 0);
37046 int strict_p = reload_completed;
37047
37048 if (!legitimate_indirect_address_p (addr, strict_p))
37049 {
37050 if (offsettable_p
37051 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37052 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37053
37054 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37055 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37056 }
37057
37058 return stack;
37059 }
37060
37061 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37062 to such a form to deal with memory reference instructions like STFIWX that
37063 only take reg+reg addressing. */
37064
37065 rtx
37066 rs6000_address_for_fpconvert (rtx x)
37067 {
37068 rtx addr;
37069
37070 gcc_assert (MEM_P (x));
37071 addr = XEXP (x, 0);
37072 if (can_create_pseudo_p ()
37073 && ! legitimate_indirect_address_p (addr, reload_completed)
37074 && ! legitimate_indexed_address_p (addr, reload_completed))
37075 {
37076 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37077 {
37078 rtx reg = XEXP (addr, 0);
37079 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37080 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37081 gcc_assert (REG_P (reg));
37082 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37083 addr = reg;
37084 }
37085 else if (GET_CODE (addr) == PRE_MODIFY)
37086 {
37087 rtx reg = XEXP (addr, 0);
37088 rtx expr = XEXP (addr, 1);
37089 gcc_assert (REG_P (reg));
37090 gcc_assert (GET_CODE (expr) == PLUS);
37091 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37092 addr = reg;
37093 }
37094
37095 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37096 }
37097
37098 return x;
37099 }
37100
37101 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37102
37103 On the RS/6000, all integer constants are acceptable, most won't be valid
37104 for particular insns, though. Only easy FP constants are acceptable. */
37105
37106 static bool
37107 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37108 {
37109 if (TARGET_ELF && tls_referenced_p (x))
37110 return false;
37111
37112 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37113 || GET_MODE (x) == VOIDmode
37114 || (TARGET_POWERPC64 && mode == DImode)
37115 || easy_fp_constant (x, mode)
37116 || easy_vector_constant (x, mode));
37117 }
37118
37119 \f
37120 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37121
37122 static bool
37123 chain_already_loaded (rtx_insn *last)
37124 {
37125 for (; last != NULL; last = PREV_INSN (last))
37126 {
37127 if (NONJUMP_INSN_P (last))
37128 {
37129 rtx patt = PATTERN (last);
37130
37131 if (GET_CODE (patt) == SET)
37132 {
37133 rtx lhs = XEXP (patt, 0);
37134
37135 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37136 return true;
37137 }
37138 }
37139 }
37140 return false;
37141 }
37142
37143 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37144
37145 void
37146 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37147 {
37148 const bool direct_call_p
37149 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37150 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37151 rtx toc_load = NULL_RTX;
37152 rtx toc_restore = NULL_RTX;
37153 rtx func_addr;
37154 rtx abi_reg = NULL_RTX;
37155 rtx call[4];
37156 int n_call;
37157 rtx insn;
37158
37159 /* Handle longcall attributes. */
37160 if (INTVAL (cookie) & CALL_LONG)
37161 func_desc = rs6000_longcall_ref (func_desc);
37162
37163 /* Handle indirect calls. */
37164 if (GET_CODE (func_desc) != SYMBOL_REF
37165 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37166 {
37167 /* Save the TOC into its reserved slot before the call,
37168 and prepare to restore it after the call. */
37169 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37170 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37171 rtx stack_toc_mem = gen_frame_mem (Pmode,
37172 gen_rtx_PLUS (Pmode, stack_ptr,
37173 stack_toc_offset));
37174 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37175 gen_rtvec (1, stack_toc_offset),
37176 UNSPEC_TOCSLOT);
37177 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37178
37179 /* Can we optimize saving the TOC in the prologue or
37180 do we need to do it at every call? */
37181 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37182 cfun->machine->save_toc_in_prologue = true;
37183 else
37184 {
37185 MEM_VOLATILE_P (stack_toc_mem) = 1;
37186 emit_move_insn (stack_toc_mem, toc_reg);
37187 }
37188
37189 if (DEFAULT_ABI == ABI_ELFv2)
37190 {
37191 /* A function pointer in the ELFv2 ABI is just a plain address, but
37192 the ABI requires it to be loaded into r12 before the call. */
37193 func_addr = gen_rtx_REG (Pmode, 12);
37194 emit_move_insn (func_addr, func_desc);
37195 abi_reg = func_addr;
37196 }
37197 else
37198 {
37199 /* A function pointer under AIX is a pointer to a data area whose
37200 first word contains the actual address of the function, whose
37201 second word contains a pointer to its TOC, and whose third word
37202 contains a value to place in the static chain register (r11).
37203 Note that if we load the static chain, our "trampoline" need
37204 not have any executable code. */
37205
37206 /* Load up address of the actual function. */
37207 func_desc = force_reg (Pmode, func_desc);
37208 func_addr = gen_reg_rtx (Pmode);
37209 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37210
37211 /* Prepare to load the TOC of the called function. Note that the
37212 TOC load must happen immediately before the actual call so
37213 that unwinding the TOC registers works correctly. See the
37214 comment in frob_update_context. */
37215 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37216 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37217 gen_rtx_PLUS (Pmode, func_desc,
37218 func_toc_offset));
37219 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37220
37221 /* If we have a static chain, load it up. But, if the call was
37222 originally direct, the 3rd word has not been written since no
37223 trampoline has been built, so we ought not to load it, lest we
37224 override a static chain value. */
37225 if (!direct_call_p
37226 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37227 && !chain_already_loaded (get_current_sequence ()->next->last))
37228 {
37229 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37230 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37231 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37232 gen_rtx_PLUS (Pmode, func_desc,
37233 func_sc_offset));
37234 emit_move_insn (sc_reg, func_sc_mem);
37235 abi_reg = sc_reg;
37236 }
37237 }
37238 }
37239 else
37240 {
37241 /* Direct calls use the TOC: for local calls, the callee will
37242 assume the TOC register is set; for non-local calls, the
37243 PLT stub needs the TOC register. */
37244 abi_reg = toc_reg;
37245 func_addr = func_desc;
37246 }
37247
37248 /* Create the call. */
37249 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37250 if (value != NULL_RTX)
37251 call[0] = gen_rtx_SET (value, call[0]);
37252 n_call = 1;
37253
37254 if (toc_load)
37255 call[n_call++] = toc_load;
37256 if (toc_restore)
37257 call[n_call++] = toc_restore;
37258
37259 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37260
37261 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37262 insn = emit_call_insn (insn);
37263
37264 /* Mention all registers defined by the ABI to hold information
37265 as uses in CALL_INSN_FUNCTION_USAGE. */
37266 if (abi_reg)
37267 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37268 }
37269
37270 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37271
37272 void
37273 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37274 {
37275 rtx call[2];
37276 rtx insn;
37277
37278 gcc_assert (INTVAL (cookie) == 0);
37279
37280 /* Create the call. */
37281 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37282 if (value != NULL_RTX)
37283 call[0] = gen_rtx_SET (value, call[0]);
37284
37285 call[1] = simple_return_rtx;
37286
37287 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37288 insn = emit_call_insn (insn);
37289
37290 /* Note use of the TOC register. */
37291 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37292 }
37293
37294 /* Return whether we need to always update the saved TOC pointer when we update
37295 the stack pointer. */
37296
37297 static bool
37298 rs6000_save_toc_in_prologue_p (void)
37299 {
37300 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37301 }
37302
37303 #ifdef HAVE_GAS_HIDDEN
37304 # define USE_HIDDEN_LINKONCE 1
37305 #else
37306 # define USE_HIDDEN_LINKONCE 0
37307 #endif
37308
37309 /* Fills in the label name that should be used for a 476 link stack thunk. */
37310
37311 void
37312 get_ppc476_thunk_name (char name[32])
37313 {
37314 gcc_assert (TARGET_LINK_STACK);
37315
37316 if (USE_HIDDEN_LINKONCE)
37317 sprintf (name, "__ppc476.get_thunk");
37318 else
37319 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37320 }
37321
37322 /* This function emits the simple thunk routine that is used to preserve
37323 the link stack on the 476 cpu. */
37324
37325 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37326 static void
37327 rs6000_code_end (void)
37328 {
37329 char name[32];
37330 tree decl;
37331
37332 if (!TARGET_LINK_STACK)
37333 return;
37334
37335 get_ppc476_thunk_name (name);
37336
37337 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37338 build_function_type_list (void_type_node, NULL_TREE));
37339 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37340 NULL_TREE, void_type_node);
37341 TREE_PUBLIC (decl) = 1;
37342 TREE_STATIC (decl) = 1;
37343
37344 #if RS6000_WEAK
37345 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37346 {
37347 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37348 targetm.asm_out.unique_section (decl, 0);
37349 switch_to_section (get_named_section (decl, NULL, 0));
37350 DECL_WEAK (decl) = 1;
37351 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37352 targetm.asm_out.globalize_label (asm_out_file, name);
37353 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37354 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37355 }
37356 else
37357 #endif
37358 {
37359 switch_to_section (text_section);
37360 ASM_OUTPUT_LABEL (asm_out_file, name);
37361 }
37362
37363 DECL_INITIAL (decl) = make_node (BLOCK);
37364 current_function_decl = decl;
37365 allocate_struct_function (decl, false);
37366 init_function_start (decl);
37367 first_function_block_is_cold = false;
37368 /* Make sure unwind info is emitted for the thunk if needed. */
37369 final_start_function (emit_barrier (), asm_out_file, 1);
37370
37371 fputs ("\tblr\n", asm_out_file);
37372
37373 final_end_function ();
37374 init_insn_lengths ();
37375 free_after_compilation (cfun);
37376 set_cfun (NULL);
37377 current_function_decl = NULL;
37378 }
37379
37380 /* Add r30 to hard reg set if the prologue sets it up and it is not
37381 pic_offset_table_rtx. */
37382
37383 static void
37384 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37385 {
37386 if (!TARGET_SINGLE_PIC_BASE
37387 && TARGET_TOC
37388 && TARGET_MINIMAL_TOC
37389 && !constant_pool_empty_p ())
37390 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37391 if (cfun->machine->split_stack_argp_used)
37392 add_to_hard_reg_set (&set->set, Pmode, 12);
37393
37394 /* Make sure the hard reg set doesn't include r2, which was possibly added
37395 via PIC_OFFSET_TABLE_REGNUM. */
37396 if (TARGET_TOC)
37397 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
37398 }
37399
37400 \f
37401 /* Helper function for rs6000_split_logical to emit a logical instruction after
37402 spliting the operation to single GPR registers.
37403
37404 DEST is the destination register.
37405 OP1 and OP2 are the input source registers.
37406 CODE is the base operation (AND, IOR, XOR, NOT).
37407 MODE is the machine mode.
37408 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37409 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37410 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37411
37412 static void
37413 rs6000_split_logical_inner (rtx dest,
37414 rtx op1,
37415 rtx op2,
37416 enum rtx_code code,
37417 machine_mode mode,
37418 bool complement_final_p,
37419 bool complement_op1_p,
37420 bool complement_op2_p)
37421 {
37422 rtx bool_rtx;
37423
37424 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37425 if (op2 && GET_CODE (op2) == CONST_INT
37426 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37427 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37428 {
37429 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37430 HOST_WIDE_INT value = INTVAL (op2) & mask;
37431
37432 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37433 if (code == AND)
37434 {
37435 if (value == 0)
37436 {
37437 emit_insn (gen_rtx_SET (dest, const0_rtx));
37438 return;
37439 }
37440
37441 else if (value == mask)
37442 {
37443 if (!rtx_equal_p (dest, op1))
37444 emit_insn (gen_rtx_SET (dest, op1));
37445 return;
37446 }
37447 }
37448
37449 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37450 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37451 else if (code == IOR || code == XOR)
37452 {
37453 if (value == 0)
37454 {
37455 if (!rtx_equal_p (dest, op1))
37456 emit_insn (gen_rtx_SET (dest, op1));
37457 return;
37458 }
37459 }
37460 }
37461
37462 if (code == AND && mode == SImode
37463 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37464 {
37465 emit_insn (gen_andsi3 (dest, op1, op2));
37466 return;
37467 }
37468
37469 if (complement_op1_p)
37470 op1 = gen_rtx_NOT (mode, op1);
37471
37472 if (complement_op2_p)
37473 op2 = gen_rtx_NOT (mode, op2);
37474
37475 /* For canonical RTL, if only one arm is inverted it is the first. */
37476 if (!complement_op1_p && complement_op2_p)
37477 std::swap (op1, op2);
37478
37479 bool_rtx = ((code == NOT)
37480 ? gen_rtx_NOT (mode, op1)
37481 : gen_rtx_fmt_ee (code, mode, op1, op2));
37482
37483 if (complement_final_p)
37484 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37485
37486 emit_insn (gen_rtx_SET (dest, bool_rtx));
37487 }
37488
37489 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37490 operations are split immediately during RTL generation to allow for more
37491 optimizations of the AND/IOR/XOR.
37492
37493 OPERANDS is an array containing the destination and two input operands.
37494 CODE is the base operation (AND, IOR, XOR, NOT).
37495 MODE is the machine mode.
37496 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37497 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37498 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37499 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37500 formation of the AND instructions. */
37501
37502 static void
37503 rs6000_split_logical_di (rtx operands[3],
37504 enum rtx_code code,
37505 bool complement_final_p,
37506 bool complement_op1_p,
37507 bool complement_op2_p)
37508 {
37509 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37510 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37511 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37512 enum hi_lo { hi = 0, lo = 1 };
37513 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37514 size_t i;
37515
37516 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37517 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37518 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37519 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37520
37521 if (code == NOT)
37522 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37523 else
37524 {
37525 if (GET_CODE (operands[2]) != CONST_INT)
37526 {
37527 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37528 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37529 }
37530 else
37531 {
37532 HOST_WIDE_INT value = INTVAL (operands[2]);
37533 HOST_WIDE_INT value_hi_lo[2];
37534
37535 gcc_assert (!complement_final_p);
37536 gcc_assert (!complement_op1_p);
37537 gcc_assert (!complement_op2_p);
37538
37539 value_hi_lo[hi] = value >> 32;
37540 value_hi_lo[lo] = value & lower_32bits;
37541
37542 for (i = 0; i < 2; i++)
37543 {
37544 HOST_WIDE_INT sub_value = value_hi_lo[i];
37545
37546 if (sub_value & sign_bit)
37547 sub_value |= upper_32bits;
37548
37549 op2_hi_lo[i] = GEN_INT (sub_value);
37550
37551 /* If this is an AND instruction, check to see if we need to load
37552 the value in a register. */
37553 if (code == AND && sub_value != -1 && sub_value != 0
37554 && !and_operand (op2_hi_lo[i], SImode))
37555 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37556 }
37557 }
37558 }
37559
37560 for (i = 0; i < 2; i++)
37561 {
37562 /* Split large IOR/XOR operations. */
37563 if ((code == IOR || code == XOR)
37564 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37565 && !complement_final_p
37566 && !complement_op1_p
37567 && !complement_op2_p
37568 && !logical_const_operand (op2_hi_lo[i], SImode))
37569 {
37570 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37571 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37572 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37573 rtx tmp = gen_reg_rtx (SImode);
37574
37575 /* Make sure the constant is sign extended. */
37576 if ((hi_16bits & sign_bit) != 0)
37577 hi_16bits |= upper_32bits;
37578
37579 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37580 code, SImode, false, false, false);
37581
37582 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37583 code, SImode, false, false, false);
37584 }
37585 else
37586 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37587 code, SImode, complement_final_p,
37588 complement_op1_p, complement_op2_p);
37589 }
37590
37591 return;
37592 }
37593
37594 /* Split the insns that make up boolean operations operating on multiple GPR
37595 registers. The boolean MD patterns ensure that the inputs either are
37596 exactly the same as the output registers, or there is no overlap.
37597
37598 OPERANDS is an array containing the destination and two input operands.
37599 CODE is the base operation (AND, IOR, XOR, NOT).
37600 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37601 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37602 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37603
37604 void
37605 rs6000_split_logical (rtx operands[3],
37606 enum rtx_code code,
37607 bool complement_final_p,
37608 bool complement_op1_p,
37609 bool complement_op2_p)
37610 {
37611 machine_mode mode = GET_MODE (operands[0]);
37612 machine_mode sub_mode;
37613 rtx op0, op1, op2;
37614 int sub_size, regno0, regno1, nregs, i;
37615
37616 /* If this is DImode, use the specialized version that can run before
37617 register allocation. */
37618 if (mode == DImode && !TARGET_POWERPC64)
37619 {
37620 rs6000_split_logical_di (operands, code, complement_final_p,
37621 complement_op1_p, complement_op2_p);
37622 return;
37623 }
37624
37625 op0 = operands[0];
37626 op1 = operands[1];
37627 op2 = (code == NOT) ? NULL_RTX : operands[2];
37628 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
37629 sub_size = GET_MODE_SIZE (sub_mode);
37630 regno0 = REGNO (op0);
37631 regno1 = REGNO (op1);
37632
37633 gcc_assert (reload_completed);
37634 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37635 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37636
37637 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
37638 gcc_assert (nregs > 1);
37639
37640 if (op2 && REG_P (op2))
37641 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
37642
37643 for (i = 0; i < nregs; i++)
37644 {
37645 int offset = i * sub_size;
37646 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
37647 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
37648 rtx sub_op2 = ((code == NOT)
37649 ? NULL_RTX
37650 : simplify_subreg (sub_mode, op2, mode, offset));
37651
37652 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
37653 complement_final_p, complement_op1_p,
37654 complement_op2_p);
37655 }
37656
37657 return;
37658 }
37659
37660 \f
37661 /* Return true if the peephole2 can combine a load involving a combination of
37662 an addis instruction and a load with an offset that can be fused together on
37663 a power8. */
37664
37665 bool
37666 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
37667 rtx addis_value, /* addis value. */
37668 rtx target, /* target register that is loaded. */
37669 rtx mem) /* bottom part of the memory addr. */
37670 {
37671 rtx addr;
37672 rtx base_reg;
37673
37674 /* Validate arguments. */
37675 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37676 return false;
37677
37678 if (!base_reg_operand (target, GET_MODE (target)))
37679 return false;
37680
37681 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37682 return false;
37683
37684 /* Allow sign/zero extension. */
37685 if (GET_CODE (mem) == ZERO_EXTEND
37686 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
37687 mem = XEXP (mem, 0);
37688
37689 if (!MEM_P (mem))
37690 return false;
37691
37692 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
37693 return false;
37694
37695 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37696 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
37697 return false;
37698
37699 /* Validate that the register used to load the high value is either the
37700 register being loaded, or we can safely replace its use.
37701
37702 This function is only called from the peephole2 pass and we assume that
37703 there are 2 instructions in the peephole (addis and load), so we want to
37704 check if the target register was not used in the memory address and the
37705 register to hold the addis result is dead after the peephole. */
37706 if (REGNO (addis_reg) != REGNO (target))
37707 {
37708 if (reg_mentioned_p (target, mem))
37709 return false;
37710
37711 if (!peep2_reg_dead_p (2, addis_reg))
37712 return false;
37713
37714 /* If the target register being loaded is the stack pointer, we must
37715 avoid loading any other value into it, even temporarily. */
37716 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
37717 return false;
37718 }
37719
37720 base_reg = XEXP (addr, 0);
37721 return REGNO (addis_reg) == REGNO (base_reg);
37722 }
37723
37724 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37725 sequence. We adjust the addis register to use the target register. If the
37726 load sign extends, we adjust the code to do the zero extending load, and an
37727 explicit sign extension later since the fusion only covers zero extending
37728 loads.
37729
37730 The operands are:
37731 operands[0] register set with addis (to be replaced with target)
37732 operands[1] value set via addis
37733 operands[2] target register being loaded
37734 operands[3] D-form memory reference using operands[0]. */
37735
37736 void
37737 expand_fusion_gpr_load (rtx *operands)
37738 {
37739 rtx addis_value = operands[1];
37740 rtx target = operands[2];
37741 rtx orig_mem = operands[3];
37742 rtx new_addr, new_mem, orig_addr, offset;
37743 enum rtx_code plus_or_lo_sum;
37744 machine_mode target_mode = GET_MODE (target);
37745 machine_mode extend_mode = target_mode;
37746 machine_mode ptr_mode = Pmode;
37747 enum rtx_code extend = UNKNOWN;
37748
37749 if (GET_CODE (orig_mem) == ZERO_EXTEND
37750 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
37751 {
37752 extend = GET_CODE (orig_mem);
37753 orig_mem = XEXP (orig_mem, 0);
37754 target_mode = GET_MODE (orig_mem);
37755 }
37756
37757 gcc_assert (MEM_P (orig_mem));
37758
37759 orig_addr = XEXP (orig_mem, 0);
37760 plus_or_lo_sum = GET_CODE (orig_addr);
37761 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37762
37763 offset = XEXP (orig_addr, 1);
37764 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37765 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37766
37767 if (extend != UNKNOWN)
37768 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
37769
37770 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37771 UNSPEC_FUSION_GPR);
37772 emit_insn (gen_rtx_SET (target, new_mem));
37773
37774 if (extend == SIGN_EXTEND)
37775 {
37776 int sub_off = ((BYTES_BIG_ENDIAN)
37777 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
37778 : 0);
37779 rtx sign_reg
37780 = simplify_subreg (target_mode, target, extend_mode, sub_off);
37781
37782 emit_insn (gen_rtx_SET (target,
37783 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
37784 }
37785
37786 return;
37787 }
37788
37789 /* Emit the addis instruction that will be part of a fused instruction
37790 sequence. */
37791
37792 void
37793 emit_fusion_addis (rtx target, rtx addis_value)
37794 {
37795 rtx fuse_ops[10];
37796 const char *addis_str = NULL;
37797
37798 /* Emit the addis instruction. */
37799 fuse_ops[0] = target;
37800 if (satisfies_constraint_L (addis_value))
37801 {
37802 fuse_ops[1] = addis_value;
37803 addis_str = "lis %0,%v1";
37804 }
37805
37806 else if (GET_CODE (addis_value) == PLUS)
37807 {
37808 rtx op0 = XEXP (addis_value, 0);
37809 rtx op1 = XEXP (addis_value, 1);
37810
37811 if (REG_P (op0) && CONST_INT_P (op1)
37812 && satisfies_constraint_L (op1))
37813 {
37814 fuse_ops[1] = op0;
37815 fuse_ops[2] = op1;
37816 addis_str = "addis %0,%1,%v2";
37817 }
37818 }
37819
37820 else if (GET_CODE (addis_value) == HIGH)
37821 {
37822 rtx value = XEXP (addis_value, 0);
37823 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
37824 {
37825 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
37826 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
37827 if (TARGET_ELF)
37828 addis_str = "addis %0,%2,%1@toc@ha";
37829
37830 else if (TARGET_XCOFF)
37831 addis_str = "addis %0,%1@u(%2)";
37832
37833 else
37834 gcc_unreachable ();
37835 }
37836
37837 else if (GET_CODE (value) == PLUS)
37838 {
37839 rtx op0 = XEXP (value, 0);
37840 rtx op1 = XEXP (value, 1);
37841
37842 if (GET_CODE (op0) == UNSPEC
37843 && XINT (op0, 1) == UNSPEC_TOCREL
37844 && CONST_INT_P (op1))
37845 {
37846 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
37847 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
37848 fuse_ops[3] = op1;
37849 if (TARGET_ELF)
37850 addis_str = "addis %0,%2,%1+%3@toc@ha";
37851
37852 else if (TARGET_XCOFF)
37853 addis_str = "addis %0,%1+%3@u(%2)";
37854
37855 else
37856 gcc_unreachable ();
37857 }
37858 }
37859
37860 else if (satisfies_constraint_L (value))
37861 {
37862 fuse_ops[1] = value;
37863 addis_str = "lis %0,%v1";
37864 }
37865
37866 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
37867 {
37868 fuse_ops[1] = value;
37869 addis_str = "lis %0,%1@ha";
37870 }
37871 }
37872
37873 if (!addis_str)
37874 fatal_insn ("Could not generate addis value for fusion", addis_value);
37875
37876 output_asm_insn (addis_str, fuse_ops);
37877 }
37878
37879 /* Emit a D-form load or store instruction that is the second instruction
37880 of a fusion sequence. */
37881
37882 void
37883 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
37884 const char *insn_str)
37885 {
37886 rtx fuse_ops[10];
37887 char insn_template[80];
37888
37889 fuse_ops[0] = load_store_reg;
37890 fuse_ops[1] = addis_reg;
37891
37892 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
37893 {
37894 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
37895 fuse_ops[2] = offset;
37896 output_asm_insn (insn_template, fuse_ops);
37897 }
37898
37899 else if (GET_CODE (offset) == UNSPEC
37900 && XINT (offset, 1) == UNSPEC_TOCREL)
37901 {
37902 if (TARGET_ELF)
37903 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
37904
37905 else if (TARGET_XCOFF)
37906 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37907
37908 else
37909 gcc_unreachable ();
37910
37911 fuse_ops[2] = XVECEXP (offset, 0, 0);
37912 output_asm_insn (insn_template, fuse_ops);
37913 }
37914
37915 else if (GET_CODE (offset) == PLUS
37916 && GET_CODE (XEXP (offset, 0)) == UNSPEC
37917 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
37918 && CONST_INT_P (XEXP (offset, 1)))
37919 {
37920 rtx tocrel_unspec = XEXP (offset, 0);
37921 if (TARGET_ELF)
37922 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
37923
37924 else if (TARGET_XCOFF)
37925 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
37926
37927 else
37928 gcc_unreachable ();
37929
37930 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
37931 fuse_ops[3] = XEXP (offset, 1);
37932 output_asm_insn (insn_template, fuse_ops);
37933 }
37934
37935 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
37936 {
37937 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37938
37939 fuse_ops[2] = offset;
37940 output_asm_insn (insn_template, fuse_ops);
37941 }
37942
37943 else
37944 fatal_insn ("Unable to generate load/store offset for fusion", offset);
37945
37946 return;
37947 }
37948
37949 /* Given an address, convert it into the addis and load offset parts. Addresses
37950 created during the peephole2 process look like:
37951 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
37952 (unspec [(...)] UNSPEC_TOCREL)) */
37953
37954 static void
37955 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
37956 {
37957 rtx hi, lo;
37958
37959 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
37960 {
37961 hi = XEXP (addr, 0);
37962 lo = XEXP (addr, 1);
37963 }
37964 else
37965 gcc_unreachable ();
37966
37967 *p_hi = hi;
37968 *p_lo = lo;
37969 }
37970
37971 /* Return a string to fuse an addis instruction with a gpr load to the same
37972 register that we loaded up the addis instruction. The address that is used
37973 is the logical address that was formed during peephole2:
37974 (lo_sum (high) (low-part))
37975
37976 The code is complicated, so we call output_asm_insn directly, and just
37977 return "". */
37978
37979 const char *
37980 emit_fusion_gpr_load (rtx target, rtx mem)
37981 {
37982 rtx addis_value;
37983 rtx addr;
37984 rtx load_offset;
37985 const char *load_str = NULL;
37986 machine_mode mode;
37987
37988 if (GET_CODE (mem) == ZERO_EXTEND)
37989 mem = XEXP (mem, 0);
37990
37991 gcc_assert (REG_P (target) && MEM_P (mem));
37992
37993 addr = XEXP (mem, 0);
37994 fusion_split_address (addr, &addis_value, &load_offset);
37995
37996 /* Now emit the load instruction to the same register. */
37997 mode = GET_MODE (mem);
37998 switch (mode)
37999 {
38000 case E_QImode:
38001 load_str = "lbz";
38002 break;
38003
38004 case E_HImode:
38005 load_str = "lhz";
38006 break;
38007
38008 case E_SImode:
38009 case E_SFmode:
38010 load_str = "lwz";
38011 break;
38012
38013 case E_DImode:
38014 case E_DFmode:
38015 gcc_assert (TARGET_POWERPC64);
38016 load_str = "ld";
38017 break;
38018
38019 default:
38020 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38021 }
38022
38023 /* Emit the addis instruction. */
38024 emit_fusion_addis (target, addis_value);
38025
38026 /* Emit the D-form load instruction. */
38027 emit_fusion_load_store (target, target, load_offset, load_str);
38028
38029 return "";
38030 }
38031 \f
38032
38033 /* Return true if the peephole2 can combine a load/store involving a
38034 combination of an addis instruction and the memory operation. This was
38035 added to the ISA 3.0 (power9) hardware. */
38036
38037 bool
38038 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38039 rtx addis_value, /* addis value. */
38040 rtx dest, /* destination (memory or register). */
38041 rtx src) /* source (register or memory). */
38042 {
38043 rtx addr, mem, offset;
38044 machine_mode mode = GET_MODE (src);
38045
38046 /* Validate arguments. */
38047 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38048 return false;
38049
38050 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38051 return false;
38052
38053 /* Ignore extend operations that are part of the load. */
38054 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38055 src = XEXP (src, 0);
38056
38057 /* Test for memory<-register or register<-memory. */
38058 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38059 {
38060 if (!MEM_P (dest))
38061 return false;
38062
38063 mem = dest;
38064 }
38065
38066 else if (MEM_P (src))
38067 {
38068 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38069 return false;
38070
38071 mem = src;
38072 }
38073
38074 else
38075 return false;
38076
38077 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38078 if (GET_CODE (addr) == PLUS)
38079 {
38080 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38081 return false;
38082
38083 return satisfies_constraint_I (XEXP (addr, 1));
38084 }
38085
38086 else if (GET_CODE (addr) == LO_SUM)
38087 {
38088 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38089 return false;
38090
38091 offset = XEXP (addr, 1);
38092 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38093 return small_toc_ref (offset, GET_MODE (offset));
38094
38095 else if (TARGET_ELF && !TARGET_POWERPC64)
38096 return CONSTANT_P (offset);
38097 }
38098
38099 return false;
38100 }
38101
38102 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38103 load sequence.
38104
38105 The operands are:
38106 operands[0] register set with addis
38107 operands[1] value set via addis
38108 operands[2] target register being loaded
38109 operands[3] D-form memory reference using operands[0].
38110
38111 This is similar to the fusion introduced with power8, except it scales to
38112 both loads/stores and does not require the result register to be the same as
38113 the base register. At the moment, we only do this if register set with addis
38114 is dead. */
38115
38116 void
38117 expand_fusion_p9_load (rtx *operands)
38118 {
38119 rtx tmp_reg = operands[0];
38120 rtx addis_value = operands[1];
38121 rtx target = operands[2];
38122 rtx orig_mem = operands[3];
38123 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38124 enum rtx_code plus_or_lo_sum;
38125 machine_mode target_mode = GET_MODE (target);
38126 machine_mode extend_mode = target_mode;
38127 machine_mode ptr_mode = Pmode;
38128 enum rtx_code extend = UNKNOWN;
38129
38130 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38131 {
38132 extend = GET_CODE (orig_mem);
38133 orig_mem = XEXP (orig_mem, 0);
38134 target_mode = GET_MODE (orig_mem);
38135 }
38136
38137 gcc_assert (MEM_P (orig_mem));
38138
38139 orig_addr = XEXP (orig_mem, 0);
38140 plus_or_lo_sum = GET_CODE (orig_addr);
38141 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38142
38143 offset = XEXP (orig_addr, 1);
38144 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38145 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38146
38147 if (extend != UNKNOWN)
38148 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38149
38150 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38151 UNSPEC_FUSION_P9);
38152
38153 set = gen_rtx_SET (target, new_mem);
38154 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38155 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38156 emit_insn (insn);
38157
38158 return;
38159 }
38160
38161 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38162 store sequence.
38163
38164 The operands are:
38165 operands[0] register set with addis
38166 operands[1] value set via addis
38167 operands[2] target D-form memory being stored to
38168 operands[3] register being stored
38169
38170 This is similar to the fusion introduced with power8, except it scales to
38171 both loads/stores and does not require the result register to be the same as
38172 the base register. At the moment, we only do this if register set with addis
38173 is dead. */
38174
38175 void
38176 expand_fusion_p9_store (rtx *operands)
38177 {
38178 rtx tmp_reg = operands[0];
38179 rtx addis_value = operands[1];
38180 rtx orig_mem = operands[2];
38181 rtx src = operands[3];
38182 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38183 enum rtx_code plus_or_lo_sum;
38184 machine_mode target_mode = GET_MODE (orig_mem);
38185 machine_mode ptr_mode = Pmode;
38186
38187 gcc_assert (MEM_P (orig_mem));
38188
38189 orig_addr = XEXP (orig_mem, 0);
38190 plus_or_lo_sum = GET_CODE (orig_addr);
38191 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38192
38193 offset = XEXP (orig_addr, 1);
38194 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38195 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38196
38197 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38198 UNSPEC_FUSION_P9);
38199
38200 set = gen_rtx_SET (new_mem, new_src);
38201 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38202 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38203 emit_insn (insn);
38204
38205 return;
38206 }
38207
38208 /* Return a string to fuse an addis instruction with a load using extended
38209 fusion. The address that is used is the logical address that was formed
38210 during peephole2: (lo_sum (high) (low-part))
38211
38212 The code is complicated, so we call output_asm_insn directly, and just
38213 return "". */
38214
38215 const char *
38216 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38217 {
38218 machine_mode mode = GET_MODE (reg);
38219 rtx hi;
38220 rtx lo;
38221 rtx addr;
38222 const char *load_string;
38223 int r;
38224
38225 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38226 {
38227 mem = XEXP (mem, 0);
38228 mode = GET_MODE (mem);
38229 }
38230
38231 if (GET_CODE (reg) == SUBREG)
38232 {
38233 gcc_assert (SUBREG_BYTE (reg) == 0);
38234 reg = SUBREG_REG (reg);
38235 }
38236
38237 if (!REG_P (reg))
38238 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38239
38240 r = REGNO (reg);
38241 if (FP_REGNO_P (r))
38242 {
38243 if (mode == SFmode)
38244 load_string = "lfs";
38245 else if (mode == DFmode || mode == DImode)
38246 load_string = "lfd";
38247 else
38248 gcc_unreachable ();
38249 }
38250 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38251 {
38252 if (mode == SFmode)
38253 load_string = "lxssp";
38254 else if (mode == DFmode || mode == DImode)
38255 load_string = "lxsd";
38256 else
38257 gcc_unreachable ();
38258 }
38259 else if (INT_REGNO_P (r))
38260 {
38261 switch (mode)
38262 {
38263 case E_QImode:
38264 load_string = "lbz";
38265 break;
38266 case E_HImode:
38267 load_string = "lhz";
38268 break;
38269 case E_SImode:
38270 case E_SFmode:
38271 load_string = "lwz";
38272 break;
38273 case E_DImode:
38274 case E_DFmode:
38275 if (!TARGET_POWERPC64)
38276 gcc_unreachable ();
38277 load_string = "ld";
38278 break;
38279 default:
38280 gcc_unreachable ();
38281 }
38282 }
38283 else
38284 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38285
38286 if (!MEM_P (mem))
38287 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38288
38289 addr = XEXP (mem, 0);
38290 fusion_split_address (addr, &hi, &lo);
38291
38292 /* Emit the addis instruction. */
38293 emit_fusion_addis (tmp_reg, hi);
38294
38295 /* Emit the D-form load instruction. */
38296 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38297
38298 return "";
38299 }
38300
38301 /* Return a string to fuse an addis instruction with a store using extended
38302 fusion. The address that is used is the logical address that was formed
38303 during peephole2: (lo_sum (high) (low-part))
38304
38305 The code is complicated, so we call output_asm_insn directly, and just
38306 return "". */
38307
38308 const char *
38309 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38310 {
38311 machine_mode mode = GET_MODE (reg);
38312 rtx hi;
38313 rtx lo;
38314 rtx addr;
38315 const char *store_string;
38316 int r;
38317
38318 if (GET_CODE (reg) == SUBREG)
38319 {
38320 gcc_assert (SUBREG_BYTE (reg) == 0);
38321 reg = SUBREG_REG (reg);
38322 }
38323
38324 if (!REG_P (reg))
38325 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38326
38327 r = REGNO (reg);
38328 if (FP_REGNO_P (r))
38329 {
38330 if (mode == SFmode)
38331 store_string = "stfs";
38332 else if (mode == DFmode)
38333 store_string = "stfd";
38334 else
38335 gcc_unreachable ();
38336 }
38337 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38338 {
38339 if (mode == SFmode)
38340 store_string = "stxssp";
38341 else if (mode == DFmode || mode == DImode)
38342 store_string = "stxsd";
38343 else
38344 gcc_unreachable ();
38345 }
38346 else if (INT_REGNO_P (r))
38347 {
38348 switch (mode)
38349 {
38350 case E_QImode:
38351 store_string = "stb";
38352 break;
38353 case E_HImode:
38354 store_string = "sth";
38355 break;
38356 case E_SImode:
38357 case E_SFmode:
38358 store_string = "stw";
38359 break;
38360 case E_DImode:
38361 case E_DFmode:
38362 if (!TARGET_POWERPC64)
38363 gcc_unreachable ();
38364 store_string = "std";
38365 break;
38366 default:
38367 gcc_unreachable ();
38368 }
38369 }
38370 else
38371 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38372
38373 if (!MEM_P (mem))
38374 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38375
38376 addr = XEXP (mem, 0);
38377 fusion_split_address (addr, &hi, &lo);
38378
38379 /* Emit the addis instruction. */
38380 emit_fusion_addis (tmp_reg, hi);
38381
38382 /* Emit the D-form load instruction. */
38383 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38384
38385 return "";
38386 }
38387
38388 #ifdef RS6000_GLIBC_ATOMIC_FENV
38389 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38390 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38391 #endif
38392
38393 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38394
38395 static void
38396 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38397 {
38398 if (!TARGET_HARD_FLOAT)
38399 {
38400 #ifdef RS6000_GLIBC_ATOMIC_FENV
38401 if (atomic_hold_decl == NULL_TREE)
38402 {
38403 atomic_hold_decl
38404 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38405 get_identifier ("__atomic_feholdexcept"),
38406 build_function_type_list (void_type_node,
38407 double_ptr_type_node,
38408 NULL_TREE));
38409 TREE_PUBLIC (atomic_hold_decl) = 1;
38410 DECL_EXTERNAL (atomic_hold_decl) = 1;
38411 }
38412
38413 if (atomic_clear_decl == NULL_TREE)
38414 {
38415 atomic_clear_decl
38416 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38417 get_identifier ("__atomic_feclearexcept"),
38418 build_function_type_list (void_type_node,
38419 NULL_TREE));
38420 TREE_PUBLIC (atomic_clear_decl) = 1;
38421 DECL_EXTERNAL (atomic_clear_decl) = 1;
38422 }
38423
38424 tree const_double = build_qualified_type (double_type_node,
38425 TYPE_QUAL_CONST);
38426 tree const_double_ptr = build_pointer_type (const_double);
38427 if (atomic_update_decl == NULL_TREE)
38428 {
38429 atomic_update_decl
38430 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38431 get_identifier ("__atomic_feupdateenv"),
38432 build_function_type_list (void_type_node,
38433 const_double_ptr,
38434 NULL_TREE));
38435 TREE_PUBLIC (atomic_update_decl) = 1;
38436 DECL_EXTERNAL (atomic_update_decl) = 1;
38437 }
38438
38439 tree fenv_var = create_tmp_var_raw (double_type_node);
38440 TREE_ADDRESSABLE (fenv_var) = 1;
38441 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38442
38443 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38444 *clear = build_call_expr (atomic_clear_decl, 0);
38445 *update = build_call_expr (atomic_update_decl, 1,
38446 fold_convert (const_double_ptr, fenv_addr));
38447 #endif
38448 return;
38449 }
38450
38451 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38452 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38453 tree call_mffs = build_call_expr (mffs, 0);
38454
38455 /* Generates the equivalent of feholdexcept (&fenv_var)
38456
38457 *fenv_var = __builtin_mffs ();
38458 double fenv_hold;
38459 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38460 __builtin_mtfsf (0xff, fenv_hold); */
38461
38462 /* Mask to clear everything except for the rounding modes and non-IEEE
38463 arithmetic flag. */
38464 const unsigned HOST_WIDE_INT hold_exception_mask =
38465 HOST_WIDE_INT_C (0xffffffff00000007);
38466
38467 tree fenv_var = create_tmp_var_raw (double_type_node);
38468
38469 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38470
38471 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38472 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38473 build_int_cst (uint64_type_node,
38474 hold_exception_mask));
38475
38476 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38477 fenv_llu_and);
38478
38479 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38480 build_int_cst (unsigned_type_node, 0xff),
38481 fenv_hold_mtfsf);
38482
38483 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38484
38485 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38486
38487 double fenv_clear = __builtin_mffs ();
38488 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38489 __builtin_mtfsf (0xff, fenv_clear); */
38490
38491 /* Mask to clear everything except for the rounding modes and non-IEEE
38492 arithmetic flag. */
38493 const unsigned HOST_WIDE_INT clear_exception_mask =
38494 HOST_WIDE_INT_C (0xffffffff00000000);
38495
38496 tree fenv_clear = create_tmp_var_raw (double_type_node);
38497
38498 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38499
38500 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38501 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38502 fenv_clean_llu,
38503 build_int_cst (uint64_type_node,
38504 clear_exception_mask));
38505
38506 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38507 fenv_clear_llu_and);
38508
38509 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38510 build_int_cst (unsigned_type_node, 0xff),
38511 fenv_clear_mtfsf);
38512
38513 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38514
38515 /* Generates the equivalent of feupdateenv (&fenv_var)
38516
38517 double old_fenv = __builtin_mffs ();
38518 double fenv_update;
38519 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38520 (*(uint64_t*)fenv_var 0x1ff80fff);
38521 __builtin_mtfsf (0xff, fenv_update); */
38522
38523 const unsigned HOST_WIDE_INT update_exception_mask =
38524 HOST_WIDE_INT_C (0xffffffff1fffff00);
38525 const unsigned HOST_WIDE_INT new_exception_mask =
38526 HOST_WIDE_INT_C (0x1ff80fff);
38527
38528 tree old_fenv = create_tmp_var_raw (double_type_node);
38529 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38530
38531 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38532 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38533 build_int_cst (uint64_type_node,
38534 update_exception_mask));
38535
38536 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38537 build_int_cst (uint64_type_node,
38538 new_exception_mask));
38539
38540 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38541 old_llu_and, new_llu_and);
38542
38543 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38544 new_llu_mask);
38545
38546 tree update_mtfsf = build_call_expr (mtfsf, 2,
38547 build_int_cst (unsigned_type_node, 0xff),
38548 fenv_update_mtfsf);
38549
38550 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
38551 }
38552
38553 void
38554 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
38555 {
38556 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38557
38558 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38559 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38560
38561 /* The destination of the vmrgew instruction layout is:
38562 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38563 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38564 vmrgew instruction will be correct. */
38565 if (BYTES_BIG_ENDIAN)
38566 {
38567 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
38568 GEN_INT (0)));
38569 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
38570 GEN_INT (3)));
38571 }
38572 else
38573 {
38574 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
38575 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
38576 }
38577
38578 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38579 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38580
38581 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
38582 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
38583
38584 if (BYTES_BIG_ENDIAN)
38585 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38586 else
38587 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38588 }
38589
38590 void
38591 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
38592 {
38593 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38594
38595 rtx_tmp0 = gen_reg_rtx (V2DImode);
38596 rtx_tmp1 = gen_reg_rtx (V2DImode);
38597
38598 /* The destination of the vmrgew instruction layout is:
38599 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38600 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38601 vmrgew instruction will be correct. */
38602 if (BYTES_BIG_ENDIAN)
38603 {
38604 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
38605 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
38606 }
38607 else
38608 {
38609 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
38610 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
38611 }
38612
38613 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38614 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38615
38616 if (signed_convert)
38617 {
38618 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
38619 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
38620 }
38621 else
38622 {
38623 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
38624 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
38625 }
38626
38627 if (BYTES_BIG_ENDIAN)
38628 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38629 else
38630 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38631 }
38632
38633 void
38634 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
38635 rtx src2)
38636 {
38637 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38638
38639 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38640 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38641
38642 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
38643 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
38644
38645 rtx_tmp2 = gen_reg_rtx (V4SImode);
38646 rtx_tmp3 = gen_reg_rtx (V4SImode);
38647
38648 if (signed_convert)
38649 {
38650 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
38651 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
38652 }
38653 else
38654 {
38655 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
38656 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
38657 }
38658
38659 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
38660 }
38661
38662 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
38663
38664 static bool
38665 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
38666 optimization_type opt_type)
38667 {
38668 switch (op)
38669 {
38670 case rsqrt_optab:
38671 return (opt_type == OPTIMIZE_FOR_SPEED
38672 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
38673
38674 default:
38675 return true;
38676 }
38677 }
38678
38679 /* Implement TARGET_CONSTANT_ALIGNMENT. */
38680
38681 static HOST_WIDE_INT
38682 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
38683 {
38684 if (TREE_CODE (exp) == STRING_CST
38685 && (STRICT_ALIGNMENT || !optimize_size))
38686 return MAX (align, BITS_PER_WORD);
38687 return align;
38688 }
38689
38690 /* Implement TARGET_STARTING_FRAME_OFFSET. */
38691
38692 static HOST_WIDE_INT
38693 rs6000_starting_frame_offset (void)
38694 {
38695 if (FRAME_GROWS_DOWNWARD)
38696 return 0;
38697 return RS6000_STARTING_FRAME_OFFSET;
38698 }
38699 \f
38700
38701 /* Create an alias for a mangled name where we have changed the mangling (in
38702 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
38703 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
38704
38705 #if TARGET_ELF && RS6000_WEAK
38706 static void
38707 rs6000_globalize_decl_name (FILE * stream, tree decl)
38708 {
38709 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
38710
38711 targetm.asm_out.globalize_label (stream, name);
38712
38713 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
38714 {
38715 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
38716 const char *old_name;
38717
38718 ieee128_mangling_gcc_8_1 = true;
38719 lang_hooks.set_decl_assembler_name (decl);
38720 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
38721 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
38722 ieee128_mangling_gcc_8_1 = false;
38723
38724 if (strcmp (name, old_name) != 0)
38725 {
38726 fprintf (stream, "\t.weak %s\n", old_name);
38727 fprintf (stream, "\t.set %s,%s\n", old_name, name);
38728 }
38729 }
38730 }
38731 #endif
38732
38733 \f
38734 struct gcc_target targetm = TARGET_INITIALIZER;
38735
38736 #include "gt-rs6000.h"