PR other/16615 [1/5]
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84
85 /* This file should be included last. */
86 #include "target-def.h"
87
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
91
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
105
106 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
107
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack {
110 int reload_completed; /* stack info won't change from here on */
111 int first_gp_reg_save; /* first callee saved GP register used */
112 int first_fp_reg_save; /* first callee saved FP register used */
113 int first_altivec_reg_save; /* first callee saved AltiVec register used */
114 int lr_save_p; /* true if the link reg needs to be saved */
115 int cr_save_p; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask; /* mask of vec registers to save */
117 int push_p; /* true if we need to allocate stack space */
118 int calls_p; /* true if the function makes any calls */
119 int world_save_p; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi; /* which ABI to use */
122 int gp_save_offset; /* offset to save GP regs from initial SP */
123 int fp_save_offset; /* offset to save FP regs from initial SP */
124 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset; /* offset to save LR from initial SP */
126 int cr_save_offset; /* offset to save CR from initial SP */
127 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset; /* offset to save the varargs registers */
129 int ehrd_offset; /* offset to EH return data */
130 int ehcr_offset; /* offset to EH CR field data */
131 int reg_size; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size; /* variable save area size */
133 int parm_size; /* outgoing parameter size */
134 int save_size; /* save area size */
135 int fixed_size; /* fixed size of stack frame */
136 int gp_size; /* size of saved GP registers */
137 int fp_size; /* size of saved FP registers */
138 int altivec_size; /* size of saved AltiVec registers */
139 int cr_size; /* size to hold CR if not in fixed area */
140 int vrsave_size; /* size to hold VRSAVE */
141 int altivec_padding_size; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
143 int savres_strategy;
144 } rs6000_stack_t;
145
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
149 {
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame;
152 /* Flags if __builtin_return_address (0) was used. */
153 int ra_need_lr;
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
155 int lr_save_state;
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer;
164 bool split_stack_argp_used;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 /* The number of components we use for separate shrink-wrapping. */
168 int n_components;
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately[32];
172 bool fpr_is_wrapped_separately[32];
173 bool lr_is_wrapped_separately;
174 bool toc_is_wrapped_separately;
175 } machine_function;
176
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load;
179
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined;
182
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno;
186
187 #ifdef USING_ELFOS_H
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno = 0;
190 #endif
191
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
193 int dot_symbols;
194
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode;
199
200 #if TARGET_ELF
201 /* Note whether IEEE 128-bit floating point was passed or returned, either as
202 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
203 floating point. We changed the default C++ mangling for these types and we
204 may want to generate a weak alias of the old mangling (U10__float128) to the
205 new mangling (u9__ieee128). */
206 static bool rs6000_passes_ieee128;
207 #endif
208
209 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
210 name used in current releases (i.e. u9__ieee128). */
211 static bool ieee128_mangling_gcc_8_1;
212
213 /* Width in bits of a pointer. */
214 unsigned rs6000_pointer_size;
215
216 #ifdef HAVE_AS_GNU_ATTRIBUTE
217 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
218 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
219 # endif
220 /* Flag whether floating point values have been passed/returned.
221 Note that this doesn't say whether fprs are used, since the
222 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
223 should be set for soft-float values passed in gprs and ieee128
224 values passed in vsx registers. */
225 static bool rs6000_passes_float;
226 static bool rs6000_passes_long_double;
227 /* Flag whether vector values have been passed/returned. */
228 static bool rs6000_passes_vector;
229 /* Flag whether small (<= 8 byte) structures have been returned. */
230 static bool rs6000_returns_struct;
231 #endif
232
233 /* Value is TRUE if register/mode pair is acceptable. */
234 static bool rs6000_hard_regno_mode_ok_p
235 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
236
237 /* Maximum number of registers needed for a given register class and mode. */
238 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
239
240 /* How many registers are needed for a given register and mode. */
241 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
242
243 /* Map register number to register class. */
244 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
245
246 static int dbg_cost_ctrl;
247
248 /* Built in types. */
249 tree rs6000_builtin_types[RS6000_BTI_MAX];
250 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
251
252 /* Flag to say the TOC is initialized */
253 int toc_initialized, need_toc_init;
254 char toc_label_name[10];
255
256 /* Cached value of rs6000_variable_issue. This is cached in
257 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
258 static short cached_can_issue_more;
259
260 static GTY(()) section *read_only_data_section;
261 static GTY(()) section *private_data_section;
262 static GTY(()) section *tls_data_section;
263 static GTY(()) section *tls_private_data_section;
264 static GTY(()) section *read_only_private_data_section;
265 static GTY(()) section *sdata2_section;
266 static GTY(()) section *toc_section;
267
268 struct builtin_description
269 {
270 const HOST_WIDE_INT mask;
271 const enum insn_code icode;
272 const char *const name;
273 const enum rs6000_builtins code;
274 };
275
276 /* Describe the vector unit used for modes. */
277 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
278 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
279
280 /* Register classes for various constraints that are based on the target
281 switches. */
282 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
283
284 /* Describe the alignment of a vector. */
285 int rs6000_vector_align[NUM_MACHINE_MODES];
286
287 /* Map selected modes to types for builtins. */
288 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
289
290 /* What modes to automatically generate reciprocal divide estimate (fre) and
291 reciprocal sqrt (frsqrte) for. */
292 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
293
294 /* Masks to determine which reciprocal esitmate instructions to generate
295 automatically. */
296 enum rs6000_recip_mask {
297 RECIP_SF_DIV = 0x001, /* Use divide estimate */
298 RECIP_DF_DIV = 0x002,
299 RECIP_V4SF_DIV = 0x004,
300 RECIP_V2DF_DIV = 0x008,
301
302 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
303 RECIP_DF_RSQRT = 0x020,
304 RECIP_V4SF_RSQRT = 0x040,
305 RECIP_V2DF_RSQRT = 0x080,
306
307 /* Various combination of flags for -mrecip=xxx. */
308 RECIP_NONE = 0,
309 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
310 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
311 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
312
313 RECIP_HIGH_PRECISION = RECIP_ALL,
314
315 /* On low precision machines like the power5, don't enable double precision
316 reciprocal square root estimate, since it isn't accurate enough. */
317 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
318 };
319
320 /* -mrecip options. */
321 static struct
322 {
323 const char *string; /* option name */
324 unsigned int mask; /* mask bits to set */
325 } recip_options[] = {
326 { "all", RECIP_ALL },
327 { "none", RECIP_NONE },
328 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
329 | RECIP_V2DF_DIV) },
330 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
331 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
332 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
333 | RECIP_V2DF_RSQRT) },
334 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
335 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
336 };
337
338 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
339 static const struct
340 {
341 const char *cpu;
342 unsigned int cpuid;
343 } cpu_is_info[] = {
344 { "power9", PPC_PLATFORM_POWER9 },
345 { "power8", PPC_PLATFORM_POWER8 },
346 { "power7", PPC_PLATFORM_POWER7 },
347 { "power6x", PPC_PLATFORM_POWER6X },
348 { "power6", PPC_PLATFORM_POWER6 },
349 { "power5+", PPC_PLATFORM_POWER5_PLUS },
350 { "power5", PPC_PLATFORM_POWER5 },
351 { "ppc970", PPC_PLATFORM_PPC970 },
352 { "power4", PPC_PLATFORM_POWER4 },
353 { "ppca2", PPC_PLATFORM_PPCA2 },
354 { "ppc476", PPC_PLATFORM_PPC476 },
355 { "ppc464", PPC_PLATFORM_PPC464 },
356 { "ppc440", PPC_PLATFORM_PPC440 },
357 { "ppc405", PPC_PLATFORM_PPC405 },
358 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
359 };
360
361 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
362 static const struct
363 {
364 const char *hwcap;
365 int mask;
366 unsigned int id;
367 } cpu_supports_info[] = {
368 /* AT_HWCAP masks. */
369 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
370 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
371 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
372 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
373 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
374 { "booke", PPC_FEATURE_BOOKE, 0 },
375 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
376 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
377 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
378 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
379 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
380 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
381 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
382 { "notb", PPC_FEATURE_NO_TB, 0 },
383 { "pa6t", PPC_FEATURE_PA6T, 0 },
384 { "power4", PPC_FEATURE_POWER4, 0 },
385 { "power5", PPC_FEATURE_POWER5, 0 },
386 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
387 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
388 { "ppc32", PPC_FEATURE_32, 0 },
389 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
390 { "ppc64", PPC_FEATURE_64, 0 },
391 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
392 { "smt", PPC_FEATURE_SMT, 0 },
393 { "spe", PPC_FEATURE_HAS_SPE, 0 },
394 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
395 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
396 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
397
398 /* AT_HWCAP2 masks. */
399 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
400 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
401 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
402 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
403 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
404 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
405 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
406 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
407 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
408 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
409 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
410 { "darn", PPC_FEATURE2_DARN, 1 },
411 { "scv", PPC_FEATURE2_SCV, 1 }
412 };
413
414 /* On PowerPC, we have a limited number of target clones that we care about
415 which means we can use an array to hold the options, rather than having more
416 elaborate data structures to identify each possible variation. Order the
417 clones from the default to the highest ISA. */
418 enum {
419 CLONE_DEFAULT = 0, /* default clone. */
420 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
421 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
422 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
423 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
424 CLONE_MAX
425 };
426
427 /* Map compiler ISA bits into HWCAP names. */
428 struct clone_map {
429 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
430 const char *name; /* name to use in __builtin_cpu_supports. */
431 };
432
433 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
434 { 0, "" }, /* Default options. */
435 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
436 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
437 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
438 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
439 };
440
441
442 /* Newer LIBCs explicitly export this symbol to declare that they provide
443 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
444 reference to this symbol whenever we expand a CPU builtin, so that
445 we never link against an old LIBC. */
446 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
447
448 /* True if we have expanded a CPU builtin. */
449 bool cpu_builtin_p;
450
451 /* Pointer to function (in rs6000-c.c) that can define or undefine target
452 macros that have changed. Languages that don't support the preprocessor
453 don't link in rs6000-c.c, so we can't call it directly. */
454 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
455
456 /* Simplfy register classes into simpler classifications. We assume
457 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
458 check for standard register classes (gpr/floating/altivec/vsx) and
459 floating/vector classes (float/altivec/vsx). */
460
461 enum rs6000_reg_type {
462 NO_REG_TYPE,
463 PSEUDO_REG_TYPE,
464 GPR_REG_TYPE,
465 VSX_REG_TYPE,
466 ALTIVEC_REG_TYPE,
467 FPR_REG_TYPE,
468 SPR_REG_TYPE,
469 CR_REG_TYPE
470 };
471
472 /* Map register class to register type. */
473 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
474
475 /* First/last register type for the 'normal' register types (i.e. general
476 purpose, floating point, altivec, and VSX registers). */
477 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
478
479 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
480
481
482 /* Register classes we care about in secondary reload or go if legitimate
483 address. We only need to worry about GPR, FPR, and Altivec registers here,
484 along an ANY field that is the OR of the 3 register classes. */
485
486 enum rs6000_reload_reg_type {
487 RELOAD_REG_GPR, /* General purpose registers. */
488 RELOAD_REG_FPR, /* Traditional floating point regs. */
489 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
490 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
491 N_RELOAD_REG
492 };
493
494 /* For setting up register classes, loop through the 3 register classes mapping
495 into real registers, and skip the ANY class, which is just an OR of the
496 bits. */
497 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
498 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
499
500 /* Map reload register type to a register in the register class. */
501 struct reload_reg_map_type {
502 const char *name; /* Register class name. */
503 int reg; /* Register in the register class. */
504 };
505
506 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
507 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
508 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
509 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
510 { "Any", -1 }, /* RELOAD_REG_ANY. */
511 };
512
513 /* Mask bits for each register class, indexed per mode. Historically the
514 compiler has been more restrictive which types can do PRE_MODIFY instead of
515 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
516 typedef unsigned char addr_mask_type;
517
518 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
519 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
520 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
521 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
522 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
523 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
524 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
525 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
526
527 /* Register type masks based on the type, of valid addressing modes. */
528 struct rs6000_reg_addr {
529 enum insn_code reload_load; /* INSN to reload for loading. */
530 enum insn_code reload_store; /* INSN to reload for storing. */
531 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
532 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
533 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
534 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
535 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
536 };
537
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
539
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
543 {
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
546 }
547
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
551 {
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
554 }
555
556 /* Return true if we have D-form addressing in altivec registers. */
557 static inline bool
558 mode_supports_vmx_dform (machine_mode mode)
559 {
560 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
561 }
562
563 /* Return true if we have D-form addressing in VSX registers. This addressing
564 is more limited than normal d-form addressing in that the offset must be
565 aligned on a 16-byte boundary. */
566 static inline bool
567 mode_supports_dq_form (machine_mode mode)
568 {
569 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
570 != 0);
571 }
572
573 /* Given that there exists at least one variable that is set (produced)
574 by OUT_INSN and read (consumed) by IN_INSN, return true iff
575 IN_INSN represents one or more memory store operations and none of
576 the variables set by OUT_INSN is used by IN_INSN as the address of a
577 store operation. If either IN_INSN or OUT_INSN does not represent
578 a "single" RTL SET expression (as loosely defined by the
579 implementation of the single_set function) or a PARALLEL with only
580 SETs, CLOBBERs, and USEs inside, this function returns false.
581
582 This rs6000-specific version of store_data_bypass_p checks for
583 certain conditions that result in assertion failures (and internal
584 compiler errors) in the generic store_data_bypass_p function and
585 returns false rather than calling store_data_bypass_p if one of the
586 problematic conditions is detected. */
587
588 int
589 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
590 {
591 rtx out_set, in_set;
592 rtx out_pat, in_pat;
593 rtx out_exp, in_exp;
594 int i, j;
595
596 in_set = single_set (in_insn);
597 if (in_set)
598 {
599 if (MEM_P (SET_DEST (in_set)))
600 {
601 out_set = single_set (out_insn);
602 if (!out_set)
603 {
604 out_pat = PATTERN (out_insn);
605 if (GET_CODE (out_pat) == PARALLEL)
606 {
607 for (i = 0; i < XVECLEN (out_pat, 0); i++)
608 {
609 out_exp = XVECEXP (out_pat, 0, i);
610 if ((GET_CODE (out_exp) == CLOBBER)
611 || (GET_CODE (out_exp) == USE))
612 continue;
613 else if (GET_CODE (out_exp) != SET)
614 return false;
615 }
616 }
617 }
618 }
619 }
620 else
621 {
622 in_pat = PATTERN (in_insn);
623 if (GET_CODE (in_pat) != PARALLEL)
624 return false;
625
626 for (i = 0; i < XVECLEN (in_pat, 0); i++)
627 {
628 in_exp = XVECEXP (in_pat, 0, i);
629 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
630 continue;
631 else if (GET_CODE (in_exp) != SET)
632 return false;
633
634 if (MEM_P (SET_DEST (in_exp)))
635 {
636 out_set = single_set (out_insn);
637 if (!out_set)
638 {
639 out_pat = PATTERN (out_insn);
640 if (GET_CODE (out_pat) != PARALLEL)
641 return false;
642 for (j = 0; j < XVECLEN (out_pat, 0); j++)
643 {
644 out_exp = XVECEXP (out_pat, 0, j);
645 if ((GET_CODE (out_exp) == CLOBBER)
646 || (GET_CODE (out_exp) == USE))
647 continue;
648 else if (GET_CODE (out_exp) != SET)
649 return false;
650 }
651 }
652 }
653 }
654 }
655 return store_data_bypass_p (out_insn, in_insn);
656 }
657
658 \f
659 /* Processor costs (relative to an add) */
660
661 const struct processor_costs *rs6000_cost;
662
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
681 };
682
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
701 };
702
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
721 };
722
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
741 };
742
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
761 };
762
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
781 };
782
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
801 };
802
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
821 };
822
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
841 };
842
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
861 };
862
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
881 };
882
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
901 };
902
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
921 };
922
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
941 };
942
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
962 };
963
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
982 };
983
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1002 };
1003
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1022 };
1023
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1037 32,
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1042 };
1043
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1062 };
1063
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1082 };
1083
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1102 };
1103
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1122 };
1123
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1142 };
1143
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1162 };
1163
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1182 };
1183
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1202 };
1203
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1222 };
1223
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1242 };
1243
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1257 64,
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1262 };
1263
1264 \f
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_X
1275
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1278
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1281
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1284
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1287
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1290
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1293
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1296
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1299
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1302
1303 struct rs6000_builtin_info_type {
1304 const char *name;
1305 const enum insn_code icode;
1306 const HOST_WIDE_INT mask;
1307 const unsigned attr;
1308 };
1309
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1311 {
1312 #include "rs6000-builtin.def"
1313 };
1314
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_X
1324
1325 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1326 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1327
1328 \f
1329 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1330 static struct machine_function * rs6000_init_machine_status (void);
1331 static int rs6000_ra_ever_killed (void);
1332 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1333 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1334 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1336 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1337 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1338 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1339 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1340 bool);
1341 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1342 unsigned int);
1343 static bool is_microcoded_insn (rtx_insn *);
1344 static bool is_nonpipeline_insn (rtx_insn *);
1345 static bool is_cracked_insn (rtx_insn *);
1346 static bool is_load_insn (rtx, rtx *);
1347 static bool is_store_insn (rtx, rtx *);
1348 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1349 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1350 static bool insn_must_be_first_in_group (rtx_insn *);
1351 static bool insn_must_be_last_in_group (rtx_insn *);
1352 static void altivec_init_builtins (void);
1353 static tree builtin_function_type (machine_mode, machine_mode,
1354 machine_mode, machine_mode,
1355 enum rs6000_builtins, const char *name);
1356 static void rs6000_common_init_builtins (void);
1357 static void htm_init_builtins (void);
1358 static rs6000_stack_t *rs6000_stack_info (void);
1359 static void is_altivec_return_reg (rtx, void *);
1360 int easy_vector_constant (rtx, machine_mode);
1361 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1362 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1363 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1364 bool, bool);
1365 #if TARGET_MACHO
1366 static void macho_branch_islands (void);
1367 static tree get_prev_label (tree);
1368 #endif
1369 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1370 int, int *);
1371 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1372 int, int, int *);
1373 static bool rs6000_mode_dependent_address (const_rtx);
1374 static bool rs6000_debug_mode_dependent_address (const_rtx);
1375 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1376 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1377 machine_mode, rtx);
1378 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1379 machine_mode,
1380 rtx);
1381 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1382 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1383 enum reg_class);
1384 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1385 reg_class_t,
1386 reg_class_t);
1387 static bool rs6000_debug_can_change_mode_class (machine_mode,
1388 machine_mode,
1389 reg_class_t);
1390 static bool rs6000_save_toc_in_prologue_p (void);
1391 static rtx rs6000_internal_arg_pointer (void);
1392
1393 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1394 int, int *)
1395 = rs6000_legitimize_reload_address;
1396
1397 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1398 = rs6000_mode_dependent_address;
1399
1400 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1401 machine_mode, rtx)
1402 = rs6000_secondary_reload_class;
1403
1404 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1405 = rs6000_preferred_reload_class;
1406
1407 const int INSN_NOT_AVAILABLE = -1;
1408
1409 static void rs6000_print_isa_options (FILE *, int, const char *,
1410 HOST_WIDE_INT);
1411 static void rs6000_print_builtin_options (FILE *, int, const char *,
1412 HOST_WIDE_INT);
1413 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1414
1415 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1416 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1417 enum rs6000_reg_type,
1418 machine_mode,
1419 secondary_reload_info *,
1420 bool);
1421 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1422 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1423 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1424
1425 /* Hash table stuff for keeping track of TOC entries. */
1426
1427 struct GTY((for_user)) toc_hash_struct
1428 {
1429 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1430 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1431 rtx key;
1432 machine_mode key_mode;
1433 int labelno;
1434 };
1435
1436 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1437 {
1438 static hashval_t hash (toc_hash_struct *);
1439 static bool equal (toc_hash_struct *, toc_hash_struct *);
1440 };
1441
1442 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1443
1444 /* Hash table to keep track of the argument types for builtin functions. */
1445
1446 struct GTY((for_user)) builtin_hash_struct
1447 {
1448 tree type;
1449 machine_mode mode[4]; /* return value + 3 arguments. */
1450 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1451 };
1452
1453 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1454 {
1455 static hashval_t hash (builtin_hash_struct *);
1456 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1457 };
1458
1459 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1460
1461 \f
1462 /* Default register names. */
1463 char rs6000_reg_names[][8] =
1464 {
1465 "0", "1", "2", "3", "4", "5", "6", "7",
1466 "8", "9", "10", "11", "12", "13", "14", "15",
1467 "16", "17", "18", "19", "20", "21", "22", "23",
1468 "24", "25", "26", "27", "28", "29", "30", "31",
1469 "0", "1", "2", "3", "4", "5", "6", "7",
1470 "8", "9", "10", "11", "12", "13", "14", "15",
1471 "16", "17", "18", "19", "20", "21", "22", "23",
1472 "24", "25", "26", "27", "28", "29", "30", "31",
1473 "mq", "lr", "ctr","ap",
1474 "0", "1", "2", "3", "4", "5", "6", "7",
1475 "ca",
1476 /* AltiVec registers. */
1477 "0", "1", "2", "3", "4", "5", "6", "7",
1478 "8", "9", "10", "11", "12", "13", "14", "15",
1479 "16", "17", "18", "19", "20", "21", "22", "23",
1480 "24", "25", "26", "27", "28", "29", "30", "31",
1481 "vrsave", "vscr",
1482 /* Soft frame pointer. */
1483 "sfp",
1484 /* HTM SPR registers. */
1485 "tfhar", "tfiar", "texasr"
1486 };
1487
1488 #ifdef TARGET_REGNAMES
1489 static const char alt_reg_names[][8] =
1490 {
1491 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1492 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1493 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1494 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1495 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1496 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1497 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1498 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1499 "mq", "lr", "ctr", "ap",
1500 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1501 "ca",
1502 /* AltiVec registers. */
1503 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1504 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1505 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1506 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1507 "vrsave", "vscr",
1508 /* Soft frame pointer. */
1509 "sfp",
1510 /* HTM SPR registers. */
1511 "tfhar", "tfiar", "texasr"
1512 };
1513 #endif
1514
1515 /* Table of valid machine attributes. */
1516
1517 static const struct attribute_spec rs6000_attribute_table[] =
1518 {
1519 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1520 affects_type_identity, handler, exclude } */
1521 { "altivec", 1, 1, false, true, false, false,
1522 rs6000_handle_altivec_attribute, NULL },
1523 { "longcall", 0, 0, false, true, true, false,
1524 rs6000_handle_longcall_attribute, NULL },
1525 { "shortcall", 0, 0, false, true, true, false,
1526 rs6000_handle_longcall_attribute, NULL },
1527 { "ms_struct", 0, 0, false, false, false, false,
1528 rs6000_handle_struct_attribute, NULL },
1529 { "gcc_struct", 0, 0, false, false, false, false,
1530 rs6000_handle_struct_attribute, NULL },
1531 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1532 SUBTARGET_ATTRIBUTE_TABLE,
1533 #endif
1534 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1535 };
1536 \f
1537 #ifndef TARGET_PROFILE_KERNEL
1538 #define TARGET_PROFILE_KERNEL 0
1539 #endif
1540
1541 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1542 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1543 \f
1544 /* Initialize the GCC target structure. */
1545 #undef TARGET_ATTRIBUTE_TABLE
1546 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1547 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1548 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1549 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1550 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1551
1552 #undef TARGET_ASM_ALIGNED_DI_OP
1553 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1554
1555 /* Default unaligned ops are only provided for ELF. Find the ops needed
1556 for non-ELF systems. */
1557 #ifndef OBJECT_FORMAT_ELF
1558 #if TARGET_XCOFF
1559 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1560 64-bit targets. */
1561 #undef TARGET_ASM_UNALIGNED_HI_OP
1562 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1563 #undef TARGET_ASM_UNALIGNED_SI_OP
1564 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1565 #undef TARGET_ASM_UNALIGNED_DI_OP
1566 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1567 #else
1568 /* For Darwin. */
1569 #undef TARGET_ASM_UNALIGNED_HI_OP
1570 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1571 #undef TARGET_ASM_UNALIGNED_SI_OP
1572 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1573 #undef TARGET_ASM_UNALIGNED_DI_OP
1574 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1575 #undef TARGET_ASM_ALIGNED_DI_OP
1576 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1577 #endif
1578 #endif
1579
1580 /* This hook deals with fixups for relocatable code and DI-mode objects
1581 in 64-bit code. */
1582 #undef TARGET_ASM_INTEGER
1583 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1584
1585 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1586 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1587 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1588 #endif
1589
1590 #undef TARGET_SET_UP_BY_PROLOGUE
1591 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1592
1593 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1594 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1595 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1596 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1597 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1598 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1599 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1600 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1601 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1602 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1603 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1605
1606 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1607 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1608
1609 #undef TARGET_INTERNAL_ARG_POINTER
1610 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1611
1612 #undef TARGET_HAVE_TLS
1613 #define TARGET_HAVE_TLS HAVE_AS_TLS
1614
1615 #undef TARGET_CANNOT_FORCE_CONST_MEM
1616 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1617
1618 #undef TARGET_DELEGITIMIZE_ADDRESS
1619 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1620
1621 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1622 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1623
1624 #undef TARGET_LEGITIMATE_COMBINED_INSN
1625 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1626
1627 #undef TARGET_ASM_FUNCTION_PROLOGUE
1628 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1629 #undef TARGET_ASM_FUNCTION_EPILOGUE
1630 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1631
1632 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1633 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1634
1635 #undef TARGET_LEGITIMIZE_ADDRESS
1636 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1637
1638 #undef TARGET_SCHED_VARIABLE_ISSUE
1639 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1640
1641 #undef TARGET_SCHED_ISSUE_RATE
1642 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1643 #undef TARGET_SCHED_ADJUST_COST
1644 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1645 #undef TARGET_SCHED_ADJUST_PRIORITY
1646 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1647 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1648 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1649 #undef TARGET_SCHED_INIT
1650 #define TARGET_SCHED_INIT rs6000_sched_init
1651 #undef TARGET_SCHED_FINISH
1652 #define TARGET_SCHED_FINISH rs6000_sched_finish
1653 #undef TARGET_SCHED_REORDER
1654 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1655 #undef TARGET_SCHED_REORDER2
1656 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1657
1658 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1659 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1660
1661 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1662 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1663
1664 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1665 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1666 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1667 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1668 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1669 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1670 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1671 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1672
1673 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1674 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1675
1676 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1677 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1678 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1679 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1680 rs6000_builtin_support_vector_misalignment
1681 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1682 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1683 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1684 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1685 rs6000_builtin_vectorization_cost
1686 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1687 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1688 rs6000_preferred_simd_mode
1689 #undef TARGET_VECTORIZE_INIT_COST
1690 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1691 #undef TARGET_VECTORIZE_ADD_STMT_COST
1692 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1693 #undef TARGET_VECTORIZE_FINISH_COST
1694 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1695 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1696 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1697
1698 #undef TARGET_INIT_BUILTINS
1699 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1700 #undef TARGET_BUILTIN_DECL
1701 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1702
1703 #undef TARGET_FOLD_BUILTIN
1704 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1705 #undef TARGET_GIMPLE_FOLD_BUILTIN
1706 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1707
1708 #undef TARGET_EXPAND_BUILTIN
1709 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1710
1711 #undef TARGET_MANGLE_TYPE
1712 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1713
1714 #undef TARGET_INIT_LIBFUNCS
1715 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1716
1717 #if TARGET_MACHO
1718 #undef TARGET_BINDS_LOCAL_P
1719 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1720 #endif
1721
1722 #undef TARGET_MS_BITFIELD_LAYOUT_P
1723 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1724
1725 #undef TARGET_ASM_OUTPUT_MI_THUNK
1726 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1727
1728 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1729 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1730
1731 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1732 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1733
1734 #undef TARGET_REGISTER_MOVE_COST
1735 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1736 #undef TARGET_MEMORY_MOVE_COST
1737 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1738 #undef TARGET_CANNOT_COPY_INSN_P
1739 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1740 #undef TARGET_RTX_COSTS
1741 #define TARGET_RTX_COSTS rs6000_rtx_costs
1742 #undef TARGET_ADDRESS_COST
1743 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1744 #undef TARGET_INSN_COST
1745 #define TARGET_INSN_COST rs6000_insn_cost
1746
1747 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1748 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1749
1750 #undef TARGET_PROMOTE_FUNCTION_MODE
1751 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1752
1753 #undef TARGET_RETURN_IN_MEMORY
1754 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1755
1756 #undef TARGET_RETURN_IN_MSB
1757 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1758
1759 #undef TARGET_SETUP_INCOMING_VARARGS
1760 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1761
1762 /* Always strict argument naming on rs6000. */
1763 #undef TARGET_STRICT_ARGUMENT_NAMING
1764 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1765 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1766 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1767 #undef TARGET_SPLIT_COMPLEX_ARG
1768 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1769 #undef TARGET_MUST_PASS_IN_STACK
1770 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1771 #undef TARGET_PASS_BY_REFERENCE
1772 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1773 #undef TARGET_ARG_PARTIAL_BYTES
1774 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1775 #undef TARGET_FUNCTION_ARG_ADVANCE
1776 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1777 #undef TARGET_FUNCTION_ARG
1778 #define TARGET_FUNCTION_ARG rs6000_function_arg
1779 #undef TARGET_FUNCTION_ARG_PADDING
1780 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1781 #undef TARGET_FUNCTION_ARG_BOUNDARY
1782 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1783
1784 #undef TARGET_BUILD_BUILTIN_VA_LIST
1785 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1786
1787 #undef TARGET_EXPAND_BUILTIN_VA_START
1788 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1789
1790 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1791 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1792
1793 #undef TARGET_EH_RETURN_FILTER_MODE
1794 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1795
1796 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1797 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1798
1799 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1800 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1801
1802 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1803 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1804
1805 #undef TARGET_FLOATN_MODE
1806 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1807
1808 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1809 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1810
1811 #undef TARGET_MD_ASM_ADJUST
1812 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1813
1814 #undef TARGET_OPTION_OVERRIDE
1815 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1816
1817 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1818 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1819 rs6000_builtin_vectorized_function
1820
1821 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1822 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1823 rs6000_builtin_md_vectorized_function
1824
1825 #undef TARGET_STACK_PROTECT_GUARD
1826 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1827
1828 #if !TARGET_MACHO
1829 #undef TARGET_STACK_PROTECT_FAIL
1830 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1831 #endif
1832
1833 #ifdef HAVE_AS_TLS
1834 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1835 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1836 #endif
1837
1838 /* Use a 32-bit anchor range. This leads to sequences like:
1839
1840 addis tmp,anchor,high
1841 add dest,tmp,low
1842
1843 where tmp itself acts as an anchor, and can be shared between
1844 accesses to the same 64k page. */
1845 #undef TARGET_MIN_ANCHOR_OFFSET
1846 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1847 #undef TARGET_MAX_ANCHOR_OFFSET
1848 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1849 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1850 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1851 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1852 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1853
1854 #undef TARGET_BUILTIN_RECIPROCAL
1855 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1856
1857 #undef TARGET_SECONDARY_RELOAD
1858 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1859 #undef TARGET_SECONDARY_MEMORY_NEEDED
1860 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1861 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1862 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1863
1864 #undef TARGET_LEGITIMATE_ADDRESS_P
1865 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1866
1867 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1868 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1869
1870 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1871 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1872
1873 #undef TARGET_CAN_ELIMINATE
1874 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1875
1876 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1877 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1878
1879 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1880 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1881
1882 #undef TARGET_TRAMPOLINE_INIT
1883 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1884
1885 #undef TARGET_FUNCTION_VALUE
1886 #define TARGET_FUNCTION_VALUE rs6000_function_value
1887
1888 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1889 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1890
1891 #undef TARGET_OPTION_SAVE
1892 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1893
1894 #undef TARGET_OPTION_RESTORE
1895 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1896
1897 #undef TARGET_OPTION_PRINT
1898 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1899
1900 #undef TARGET_CAN_INLINE_P
1901 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1902
1903 #undef TARGET_SET_CURRENT_FUNCTION
1904 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1905
1906 #undef TARGET_LEGITIMATE_CONSTANT_P
1907 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1908
1909 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1910 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1911
1912 #undef TARGET_CAN_USE_DOLOOP_P
1913 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1914
1915 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1916 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1917
1918 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1919 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1920 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1921 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1922 #undef TARGET_UNWIND_WORD_MODE
1923 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1924
1925 #undef TARGET_OFFLOAD_OPTIONS
1926 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1927
1928 #undef TARGET_C_MODE_FOR_SUFFIX
1929 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1930
1931 #undef TARGET_INVALID_BINARY_OP
1932 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1933
1934 #undef TARGET_OPTAB_SUPPORTED_P
1935 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1936
1937 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1938 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1939
1940 #undef TARGET_COMPARE_VERSION_PRIORITY
1941 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1942
1943 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1944 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1945 rs6000_generate_version_dispatcher_body
1946
1947 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1948 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1949 rs6000_get_function_versions_dispatcher
1950
1951 #undef TARGET_OPTION_FUNCTION_VERSIONS
1952 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1953
1954 #undef TARGET_HARD_REGNO_NREGS
1955 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1956 #undef TARGET_HARD_REGNO_MODE_OK
1957 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1958
1959 #undef TARGET_MODES_TIEABLE_P
1960 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1961
1962 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1963 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1964 rs6000_hard_regno_call_part_clobbered
1965
1966 #undef TARGET_SLOW_UNALIGNED_ACCESS
1967 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1968
1969 #undef TARGET_CAN_CHANGE_MODE_CLASS
1970 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1971
1972 #undef TARGET_CONSTANT_ALIGNMENT
1973 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1974
1975 #undef TARGET_STARTING_FRAME_OFFSET
1976 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1977
1978 #if TARGET_ELF && RS6000_WEAK
1979 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1980 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1981 #endif
1982
1983 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1984 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1985
1986 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1987 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1988 \f
1989
1990 /* Processor table. */
1991 struct rs6000_ptt
1992 {
1993 const char *const name; /* Canonical processor name. */
1994 const enum processor_type processor; /* Processor type enum value. */
1995 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1996 };
1997
1998 static struct rs6000_ptt const processor_target_table[] =
1999 {
2000 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2001 #include "rs6000-cpus.def"
2002 #undef RS6000_CPU
2003 };
2004
2005 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2006 name is invalid. */
2007
2008 static int
2009 rs6000_cpu_name_lookup (const char *name)
2010 {
2011 size_t i;
2012
2013 if (name != NULL)
2014 {
2015 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2016 if (! strcmp (name, processor_target_table[i].name))
2017 return (int)i;
2018 }
2019
2020 return -1;
2021 }
2022
2023 \f
2024 /* Return number of consecutive hard regs needed starting at reg REGNO
2025 to hold something of mode MODE.
2026 This is ordinarily the length in words of a value of mode MODE
2027 but can be less for certain modes in special long registers.
2028
2029 POWER and PowerPC GPRs hold 32 bits worth;
2030 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2031
2032 static int
2033 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2034 {
2035 unsigned HOST_WIDE_INT reg_size;
2036
2037 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2038 128-bit floating point that can go in vector registers, which has VSX
2039 memory addressing. */
2040 if (FP_REGNO_P (regno))
2041 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2042 ? UNITS_PER_VSX_WORD
2043 : UNITS_PER_FP_WORD);
2044
2045 else if (ALTIVEC_REGNO_P (regno))
2046 reg_size = UNITS_PER_ALTIVEC_WORD;
2047
2048 else
2049 reg_size = UNITS_PER_WORD;
2050
2051 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2052 }
2053
2054 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2055 MODE. */
2056 static int
2057 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2058 {
2059 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2060
2061 if (COMPLEX_MODE_P (mode))
2062 mode = GET_MODE_INNER (mode);
2063
2064 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2065 register combinations, and use PTImode where we need to deal with quad
2066 word memory operations. Don't allow quad words in the argument or frame
2067 pointer registers, just registers 0..31. */
2068 if (mode == PTImode)
2069 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2070 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2071 && ((regno & 1) == 0));
2072
2073 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2074 implementations. Don't allow an item to be split between a FP register
2075 and an Altivec register. Allow TImode in all VSX registers if the user
2076 asked for it. */
2077 if (TARGET_VSX && VSX_REGNO_P (regno)
2078 && (VECTOR_MEM_VSX_P (mode)
2079 || FLOAT128_VECTOR_P (mode)
2080 || reg_addr[mode].scalar_in_vmx_p
2081 || mode == TImode
2082 || (TARGET_VADDUQM && mode == V1TImode)))
2083 {
2084 if (FP_REGNO_P (regno))
2085 return FP_REGNO_P (last_regno);
2086
2087 if (ALTIVEC_REGNO_P (regno))
2088 {
2089 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2090 return 0;
2091
2092 return ALTIVEC_REGNO_P (last_regno);
2093 }
2094 }
2095
2096 /* The GPRs can hold any mode, but values bigger than one register
2097 cannot go past R31. */
2098 if (INT_REGNO_P (regno))
2099 return INT_REGNO_P (last_regno);
2100
2101 /* The float registers (except for VSX vector modes) can only hold floating
2102 modes and DImode. */
2103 if (FP_REGNO_P (regno))
2104 {
2105 if (FLOAT128_VECTOR_P (mode))
2106 return false;
2107
2108 if (SCALAR_FLOAT_MODE_P (mode)
2109 && (mode != TDmode || (regno % 2) == 0)
2110 && FP_REGNO_P (last_regno))
2111 return 1;
2112
2113 if (GET_MODE_CLASS (mode) == MODE_INT)
2114 {
2115 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2116 return 1;
2117
2118 if (TARGET_P8_VECTOR && (mode == SImode))
2119 return 1;
2120
2121 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2122 return 1;
2123 }
2124
2125 return 0;
2126 }
2127
2128 /* The CR register can only hold CC modes. */
2129 if (CR_REGNO_P (regno))
2130 return GET_MODE_CLASS (mode) == MODE_CC;
2131
2132 if (CA_REGNO_P (regno))
2133 return mode == Pmode || mode == SImode;
2134
2135 /* AltiVec only in AldyVec registers. */
2136 if (ALTIVEC_REGNO_P (regno))
2137 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2138 || mode == V1TImode);
2139
2140 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2141 and it must be able to fit within the register set. */
2142
2143 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2144 }
2145
2146 /* Implement TARGET_HARD_REGNO_NREGS. */
2147
2148 static unsigned int
2149 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2150 {
2151 return rs6000_hard_regno_nregs[mode][regno];
2152 }
2153
2154 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2155
2156 static bool
2157 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2158 {
2159 return rs6000_hard_regno_mode_ok_p[mode][regno];
2160 }
2161
2162 /* Implement TARGET_MODES_TIEABLE_P.
2163
2164 PTImode cannot tie with other modes because PTImode is restricted to even
2165 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2166 57744).
2167
2168 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2169 128-bit floating point on VSX systems ties with other vectors. */
2170
2171 static bool
2172 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2173 {
2174 if (mode1 == PTImode)
2175 return mode2 == PTImode;
2176 if (mode2 == PTImode)
2177 return false;
2178
2179 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2180 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2181 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2182 return false;
2183
2184 if (SCALAR_FLOAT_MODE_P (mode1))
2185 return SCALAR_FLOAT_MODE_P (mode2);
2186 if (SCALAR_FLOAT_MODE_P (mode2))
2187 return false;
2188
2189 if (GET_MODE_CLASS (mode1) == MODE_CC)
2190 return GET_MODE_CLASS (mode2) == MODE_CC;
2191 if (GET_MODE_CLASS (mode2) == MODE_CC)
2192 return false;
2193
2194 return true;
2195 }
2196
2197 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2198
2199 static bool
2200 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2201 {
2202 if (TARGET_32BIT
2203 && TARGET_POWERPC64
2204 && GET_MODE_SIZE (mode) > 4
2205 && INT_REGNO_P (regno))
2206 return true;
2207
2208 if (TARGET_VSX
2209 && FP_REGNO_P (regno)
2210 && GET_MODE_SIZE (mode) > 8
2211 && !FLOAT128_2REG_P (mode))
2212 return true;
2213
2214 return false;
2215 }
2216
2217 /* Print interesting facts about registers. */
2218 static void
2219 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2220 {
2221 int r, m;
2222
2223 for (r = first_regno; r <= last_regno; ++r)
2224 {
2225 const char *comma = "";
2226 int len;
2227
2228 if (first_regno == last_regno)
2229 fprintf (stderr, "%s:\t", reg_name);
2230 else
2231 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2232
2233 len = 8;
2234 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2235 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2236 {
2237 if (len > 70)
2238 {
2239 fprintf (stderr, ",\n\t");
2240 len = 8;
2241 comma = "";
2242 }
2243
2244 if (rs6000_hard_regno_nregs[m][r] > 1)
2245 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2246 rs6000_hard_regno_nregs[m][r]);
2247 else
2248 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2249
2250 comma = ", ";
2251 }
2252
2253 if (call_used_regs[r])
2254 {
2255 if (len > 70)
2256 {
2257 fprintf (stderr, ",\n\t");
2258 len = 8;
2259 comma = "";
2260 }
2261
2262 len += fprintf (stderr, "%s%s", comma, "call-used");
2263 comma = ", ";
2264 }
2265
2266 if (fixed_regs[r])
2267 {
2268 if (len > 70)
2269 {
2270 fprintf (stderr, ",\n\t");
2271 len = 8;
2272 comma = "";
2273 }
2274
2275 len += fprintf (stderr, "%s%s", comma, "fixed");
2276 comma = ", ";
2277 }
2278
2279 if (len > 70)
2280 {
2281 fprintf (stderr, ",\n\t");
2282 comma = "";
2283 }
2284
2285 len += fprintf (stderr, "%sreg-class = %s", comma,
2286 reg_class_names[(int)rs6000_regno_regclass[r]]);
2287 comma = ", ";
2288
2289 if (len > 70)
2290 {
2291 fprintf (stderr, ",\n\t");
2292 comma = "";
2293 }
2294
2295 fprintf (stderr, "%sregno = %d\n", comma, r);
2296 }
2297 }
2298
2299 static const char *
2300 rs6000_debug_vector_unit (enum rs6000_vector v)
2301 {
2302 const char *ret;
2303
2304 switch (v)
2305 {
2306 case VECTOR_NONE: ret = "none"; break;
2307 case VECTOR_ALTIVEC: ret = "altivec"; break;
2308 case VECTOR_VSX: ret = "vsx"; break;
2309 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2310 default: ret = "unknown"; break;
2311 }
2312
2313 return ret;
2314 }
2315
2316 /* Inner function printing just the address mask for a particular reload
2317 register class. */
2318 DEBUG_FUNCTION char *
2319 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2320 {
2321 static char ret[8];
2322 char *p = ret;
2323
2324 if ((mask & RELOAD_REG_VALID) != 0)
2325 *p++ = 'v';
2326 else if (keep_spaces)
2327 *p++ = ' ';
2328
2329 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2330 *p++ = 'm';
2331 else if (keep_spaces)
2332 *p++ = ' ';
2333
2334 if ((mask & RELOAD_REG_INDEXED) != 0)
2335 *p++ = 'i';
2336 else if (keep_spaces)
2337 *p++ = ' ';
2338
2339 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2340 *p++ = 'O';
2341 else if ((mask & RELOAD_REG_OFFSET) != 0)
2342 *p++ = 'o';
2343 else if (keep_spaces)
2344 *p++ = ' ';
2345
2346 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2347 *p++ = '+';
2348 else if (keep_spaces)
2349 *p++ = ' ';
2350
2351 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2352 *p++ = '+';
2353 else if (keep_spaces)
2354 *p++ = ' ';
2355
2356 if ((mask & RELOAD_REG_AND_M16) != 0)
2357 *p++ = '&';
2358 else if (keep_spaces)
2359 *p++ = ' ';
2360
2361 *p = '\0';
2362
2363 return ret;
2364 }
2365
2366 /* Print the address masks in a human readble fashion. */
2367 DEBUG_FUNCTION void
2368 rs6000_debug_print_mode (ssize_t m)
2369 {
2370 ssize_t rc;
2371 int spaces = 0;
2372
2373 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2374 for (rc = 0; rc < N_RELOAD_REG; rc++)
2375 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2376 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2377
2378 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2379 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2380 {
2381 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2382 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2383 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2384 spaces = 0;
2385 }
2386 else
2387 spaces += sizeof (" Reload=sl") - 1;
2388
2389 if (reg_addr[m].scalar_in_vmx_p)
2390 {
2391 fprintf (stderr, "%*s Upper=y", spaces, "");
2392 spaces = 0;
2393 }
2394 else
2395 spaces += sizeof (" Upper=y") - 1;
2396
2397 if (rs6000_vector_unit[m] != VECTOR_NONE
2398 || rs6000_vector_mem[m] != VECTOR_NONE)
2399 {
2400 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2401 spaces, "",
2402 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2403 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2404 }
2405
2406 fputs ("\n", stderr);
2407 }
2408
2409 #define DEBUG_FMT_ID "%-32s= "
2410 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2411 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2412 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2413
2414 /* Print various interesting information with -mdebug=reg. */
2415 static void
2416 rs6000_debug_reg_global (void)
2417 {
2418 static const char *const tf[2] = { "false", "true" };
2419 const char *nl = (const char *)0;
2420 int m;
2421 size_t m1, m2, v;
2422 char costly_num[20];
2423 char nop_num[20];
2424 char flags_buffer[40];
2425 const char *costly_str;
2426 const char *nop_str;
2427 const char *trace_str;
2428 const char *abi_str;
2429 const char *cmodel_str;
2430 struct cl_target_option cl_opts;
2431
2432 /* Modes we want tieable information on. */
2433 static const machine_mode print_tieable_modes[] = {
2434 QImode,
2435 HImode,
2436 SImode,
2437 DImode,
2438 TImode,
2439 PTImode,
2440 SFmode,
2441 DFmode,
2442 TFmode,
2443 IFmode,
2444 KFmode,
2445 SDmode,
2446 DDmode,
2447 TDmode,
2448 V16QImode,
2449 V8HImode,
2450 V4SImode,
2451 V2DImode,
2452 V1TImode,
2453 V32QImode,
2454 V16HImode,
2455 V8SImode,
2456 V4DImode,
2457 V2TImode,
2458 V4SFmode,
2459 V2DFmode,
2460 V8SFmode,
2461 V4DFmode,
2462 CCmode,
2463 CCUNSmode,
2464 CCEQmode,
2465 };
2466
2467 /* Virtual regs we are interested in. */
2468 const static struct {
2469 int regno; /* register number. */
2470 const char *name; /* register name. */
2471 } virtual_regs[] = {
2472 { STACK_POINTER_REGNUM, "stack pointer:" },
2473 { TOC_REGNUM, "toc: " },
2474 { STATIC_CHAIN_REGNUM, "static chain: " },
2475 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2476 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2477 { ARG_POINTER_REGNUM, "arg pointer: " },
2478 { FRAME_POINTER_REGNUM, "frame pointer:" },
2479 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2480 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2481 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2482 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2483 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2484 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2485 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2486 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2487 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2488 };
2489
2490 fputs ("\nHard register information:\n", stderr);
2491 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2492 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2493 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2494 LAST_ALTIVEC_REGNO,
2495 "vs");
2496 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2497 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2498 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2499 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2500 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2501 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2502
2503 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2504 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2505 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2506
2507 fprintf (stderr,
2508 "\n"
2509 "d reg_class = %s\n"
2510 "f reg_class = %s\n"
2511 "v reg_class = %s\n"
2512 "wa reg_class = %s\n"
2513 "wb reg_class = %s\n"
2514 "wd reg_class = %s\n"
2515 "we reg_class = %s\n"
2516 "wf reg_class = %s\n"
2517 "wg reg_class = %s\n"
2518 "wh reg_class = %s\n"
2519 "wi reg_class = %s\n"
2520 "wj reg_class = %s\n"
2521 "wk reg_class = %s\n"
2522 "wl reg_class = %s\n"
2523 "wm reg_class = %s\n"
2524 "wo reg_class = %s\n"
2525 "wp reg_class = %s\n"
2526 "wq reg_class = %s\n"
2527 "wr reg_class = %s\n"
2528 "ws reg_class = %s\n"
2529 "wt reg_class = %s\n"
2530 "wu reg_class = %s\n"
2531 "wv reg_class = %s\n"
2532 "ww reg_class = %s\n"
2533 "wx reg_class = %s\n"
2534 "wy reg_class = %s\n"
2535 "wz reg_class = %s\n"
2536 "wA reg_class = %s\n"
2537 "wH reg_class = %s\n"
2538 "wI reg_class = %s\n"
2539 "wJ reg_class = %s\n"
2540 "wK reg_class = %s\n"
2541 "\n",
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2573 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2574
2575 nl = "\n";
2576 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2577 rs6000_debug_print_mode (m);
2578
2579 fputs ("\n", stderr);
2580
2581 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2582 {
2583 machine_mode mode1 = print_tieable_modes[m1];
2584 bool first_time = true;
2585
2586 nl = (const char *)0;
2587 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2588 {
2589 machine_mode mode2 = print_tieable_modes[m2];
2590 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2591 {
2592 if (first_time)
2593 {
2594 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2595 nl = "\n";
2596 first_time = false;
2597 }
2598
2599 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2600 }
2601 }
2602
2603 if (!first_time)
2604 fputs ("\n", stderr);
2605 }
2606
2607 if (nl)
2608 fputs (nl, stderr);
2609
2610 if (rs6000_recip_control)
2611 {
2612 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2613
2614 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2615 if (rs6000_recip_bits[m])
2616 {
2617 fprintf (stderr,
2618 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2619 GET_MODE_NAME (m),
2620 (RS6000_RECIP_AUTO_RE_P (m)
2621 ? "auto"
2622 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2623 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2624 ? "auto"
2625 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2626 }
2627
2628 fputs ("\n", stderr);
2629 }
2630
2631 if (rs6000_cpu_index >= 0)
2632 {
2633 const char *name = processor_target_table[rs6000_cpu_index].name;
2634 HOST_WIDE_INT flags
2635 = processor_target_table[rs6000_cpu_index].target_enable;
2636
2637 sprintf (flags_buffer, "-mcpu=%s flags", name);
2638 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2639 }
2640 else
2641 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2642
2643 if (rs6000_tune_index >= 0)
2644 {
2645 const char *name = processor_target_table[rs6000_tune_index].name;
2646 HOST_WIDE_INT flags
2647 = processor_target_table[rs6000_tune_index].target_enable;
2648
2649 sprintf (flags_buffer, "-mtune=%s flags", name);
2650 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2651 }
2652 else
2653 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2654
2655 cl_target_option_save (&cl_opts, &global_options);
2656 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2657 rs6000_isa_flags);
2658
2659 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2660 rs6000_isa_flags_explicit);
2661
2662 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2663 rs6000_builtin_mask);
2664
2665 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2666
2667 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2668 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2669
2670 switch (rs6000_sched_costly_dep)
2671 {
2672 case max_dep_latency:
2673 costly_str = "max_dep_latency";
2674 break;
2675
2676 case no_dep_costly:
2677 costly_str = "no_dep_costly";
2678 break;
2679
2680 case all_deps_costly:
2681 costly_str = "all_deps_costly";
2682 break;
2683
2684 case true_store_to_load_dep_costly:
2685 costly_str = "true_store_to_load_dep_costly";
2686 break;
2687
2688 case store_to_load_dep_costly:
2689 costly_str = "store_to_load_dep_costly";
2690 break;
2691
2692 default:
2693 costly_str = costly_num;
2694 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2695 break;
2696 }
2697
2698 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2699
2700 switch (rs6000_sched_insert_nops)
2701 {
2702 case sched_finish_regroup_exact:
2703 nop_str = "sched_finish_regroup_exact";
2704 break;
2705
2706 case sched_finish_pad_groups:
2707 nop_str = "sched_finish_pad_groups";
2708 break;
2709
2710 case sched_finish_none:
2711 nop_str = "sched_finish_none";
2712 break;
2713
2714 default:
2715 nop_str = nop_num;
2716 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2717 break;
2718 }
2719
2720 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2721
2722 switch (rs6000_sdata)
2723 {
2724 default:
2725 case SDATA_NONE:
2726 break;
2727
2728 case SDATA_DATA:
2729 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2730 break;
2731
2732 case SDATA_SYSV:
2733 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2734 break;
2735
2736 case SDATA_EABI:
2737 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2738 break;
2739
2740 }
2741
2742 switch (rs6000_traceback)
2743 {
2744 case traceback_default: trace_str = "default"; break;
2745 case traceback_none: trace_str = "none"; break;
2746 case traceback_part: trace_str = "part"; break;
2747 case traceback_full: trace_str = "full"; break;
2748 default: trace_str = "unknown"; break;
2749 }
2750
2751 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2752
2753 switch (rs6000_current_cmodel)
2754 {
2755 case CMODEL_SMALL: cmodel_str = "small"; break;
2756 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2757 case CMODEL_LARGE: cmodel_str = "large"; break;
2758 default: cmodel_str = "unknown"; break;
2759 }
2760
2761 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2762
2763 switch (rs6000_current_abi)
2764 {
2765 case ABI_NONE: abi_str = "none"; break;
2766 case ABI_AIX: abi_str = "aix"; break;
2767 case ABI_ELFv2: abi_str = "ELFv2"; break;
2768 case ABI_V4: abi_str = "V4"; break;
2769 case ABI_DARWIN: abi_str = "darwin"; break;
2770 default: abi_str = "unknown"; break;
2771 }
2772
2773 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2774
2775 if (rs6000_altivec_abi)
2776 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2777
2778 if (rs6000_darwin64_abi)
2779 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2780
2781 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2782 (TARGET_SOFT_FLOAT ? "true" : "false"));
2783
2784 if (TARGET_LINK_STACK)
2785 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2786
2787 if (TARGET_P8_FUSION)
2788 {
2789 char options[80];
2790
2791 strcpy (options, "power8");
2792 if (TARGET_P8_FUSION_SIGN)
2793 strcat (options, ", sign");
2794
2795 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2796 }
2797
2798 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2799 TARGET_SECURE_PLT ? "secure" : "bss");
2800 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2801 aix_struct_return ? "aix" : "sysv");
2802 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2803 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2804 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2805 tf[!!rs6000_align_branch_targets]);
2806 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2807 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2808 rs6000_long_double_type_size);
2809 if (rs6000_long_double_type_size > 64)
2810 {
2811 fprintf (stderr, DEBUG_FMT_S, "long double type",
2812 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2813 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2814 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2815 }
2816 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2817 (int)rs6000_sched_restricted_insns_priority);
2818 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2819 (int)END_BUILTINS);
2820 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2821 (int)RS6000_BUILTIN_COUNT);
2822
2823 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2824 (int)TARGET_FLOAT128_ENABLE_TYPE);
2825
2826 if (TARGET_VSX)
2827 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2828 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2829
2830 if (TARGET_DIRECT_MOVE_128)
2831 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2832 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2833 }
2834
2835 \f
2836 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2837 legitimate address support to figure out the appropriate addressing to
2838 use. */
2839
2840 static void
2841 rs6000_setup_reg_addr_masks (void)
2842 {
2843 ssize_t rc, reg, m, nregs;
2844 addr_mask_type any_addr_mask, addr_mask;
2845
2846 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2847 {
2848 machine_mode m2 = (machine_mode) m;
2849 bool complex_p = false;
2850 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2851 size_t msize;
2852
2853 if (COMPLEX_MODE_P (m2))
2854 {
2855 complex_p = true;
2856 m2 = GET_MODE_INNER (m2);
2857 }
2858
2859 msize = GET_MODE_SIZE (m2);
2860
2861 /* SDmode is special in that we want to access it only via REG+REG
2862 addressing on power7 and above, since we want to use the LFIWZX and
2863 STFIWZX instructions to load it. */
2864 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2865
2866 any_addr_mask = 0;
2867 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2868 {
2869 addr_mask = 0;
2870 reg = reload_reg_map[rc].reg;
2871
2872 /* Can mode values go in the GPR/FPR/Altivec registers? */
2873 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2874 {
2875 bool small_int_vsx_p = (small_int_p
2876 && (rc == RELOAD_REG_FPR
2877 || rc == RELOAD_REG_VMX));
2878
2879 nregs = rs6000_hard_regno_nregs[m][reg];
2880 addr_mask |= RELOAD_REG_VALID;
2881
2882 /* Indicate if the mode takes more than 1 physical register. If
2883 it takes a single register, indicate it can do REG+REG
2884 addressing. Small integers in VSX registers can only do
2885 REG+REG addressing. */
2886 if (small_int_vsx_p)
2887 addr_mask |= RELOAD_REG_INDEXED;
2888 else if (nregs > 1 || m == BLKmode || complex_p)
2889 addr_mask |= RELOAD_REG_MULTIPLE;
2890 else
2891 addr_mask |= RELOAD_REG_INDEXED;
2892
2893 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2894 addressing. If we allow scalars into Altivec registers,
2895 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2896
2897 For VSX systems, we don't allow update addressing for
2898 DFmode/SFmode if those registers can go in both the
2899 traditional floating point registers and Altivec registers.
2900 The load/store instructions for the Altivec registers do not
2901 have update forms. If we allowed update addressing, it seems
2902 to break IV-OPT code using floating point if the index type is
2903 int instead of long (PR target/81550 and target/84042). */
2904
2905 if (TARGET_UPDATE
2906 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2907 && msize <= 8
2908 && !VECTOR_MODE_P (m2)
2909 && !FLOAT128_VECTOR_P (m2)
2910 && !complex_p
2911 && (m != E_DFmode || !TARGET_VSX)
2912 && (m != E_SFmode || !TARGET_P8_VECTOR)
2913 && !small_int_vsx_p)
2914 {
2915 addr_mask |= RELOAD_REG_PRE_INCDEC;
2916
2917 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2918 we don't allow PRE_MODIFY for some multi-register
2919 operations. */
2920 switch (m)
2921 {
2922 default:
2923 addr_mask |= RELOAD_REG_PRE_MODIFY;
2924 break;
2925
2926 case E_DImode:
2927 if (TARGET_POWERPC64)
2928 addr_mask |= RELOAD_REG_PRE_MODIFY;
2929 break;
2930
2931 case E_DFmode:
2932 case E_DDmode:
2933 if (TARGET_HARD_FLOAT)
2934 addr_mask |= RELOAD_REG_PRE_MODIFY;
2935 break;
2936 }
2937 }
2938 }
2939
2940 /* GPR and FPR registers can do REG+OFFSET addressing, except
2941 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2942 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2943 if ((addr_mask != 0) && !indexed_only_p
2944 && msize <= 8
2945 && (rc == RELOAD_REG_GPR
2946 || ((msize == 8 || m2 == SFmode)
2947 && (rc == RELOAD_REG_FPR
2948 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2949 addr_mask |= RELOAD_REG_OFFSET;
2950
2951 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2952 instructions are enabled. The offset for 128-bit VSX registers is
2953 only 12-bits. While GPRs can handle the full offset range, VSX
2954 registers can only handle the restricted range. */
2955 else if ((addr_mask != 0) && !indexed_only_p
2956 && msize == 16 && TARGET_P9_VECTOR
2957 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2958 || (m2 == TImode && TARGET_VSX)))
2959 {
2960 addr_mask |= RELOAD_REG_OFFSET;
2961 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2962 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2963 }
2964
2965 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2966 addressing on 128-bit types. */
2967 if (rc == RELOAD_REG_VMX && msize == 16
2968 && (addr_mask & RELOAD_REG_VALID) != 0)
2969 addr_mask |= RELOAD_REG_AND_M16;
2970
2971 reg_addr[m].addr_mask[rc] = addr_mask;
2972 any_addr_mask |= addr_mask;
2973 }
2974
2975 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2976 }
2977 }
2978
2979 \f
2980 /* Initialize the various global tables that are based on register size. */
2981 static void
2982 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2983 {
2984 ssize_t r, m, c;
2985 int align64;
2986 int align32;
2987
2988 /* Precalculate REGNO_REG_CLASS. */
2989 rs6000_regno_regclass[0] = GENERAL_REGS;
2990 for (r = 1; r < 32; ++r)
2991 rs6000_regno_regclass[r] = BASE_REGS;
2992
2993 for (r = 32; r < 64; ++r)
2994 rs6000_regno_regclass[r] = FLOAT_REGS;
2995
2996 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2997 rs6000_regno_regclass[r] = NO_REGS;
2998
2999 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3000 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3001
3002 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3003 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3004 rs6000_regno_regclass[r] = CR_REGS;
3005
3006 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3007 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3008 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3009 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3010 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3011 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3012 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3013 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3014 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3015 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3016
3017 /* Precalculate register class to simpler reload register class. We don't
3018 need all of the register classes that are combinations of different
3019 classes, just the simple ones that have constraint letters. */
3020 for (c = 0; c < N_REG_CLASSES; c++)
3021 reg_class_to_reg_type[c] = NO_REG_TYPE;
3022
3023 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3024 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3025 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3026 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3029 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3031 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3032 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3033
3034 if (TARGET_VSX)
3035 {
3036 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3037 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3038 }
3039 else
3040 {
3041 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3042 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3043 }
3044
3045 /* Precalculate the valid memory formats as well as the vector information,
3046 this must be set up before the rs6000_hard_regno_nregs_internal calls
3047 below. */
3048 gcc_assert ((int)VECTOR_NONE == 0);
3049 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3050 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3051
3052 gcc_assert ((int)CODE_FOR_nothing == 0);
3053 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3054
3055 gcc_assert ((int)NO_REGS == 0);
3056 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3057
3058 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3059 believes it can use native alignment or still uses 128-bit alignment. */
3060 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3061 {
3062 align64 = 64;
3063 align32 = 32;
3064 }
3065 else
3066 {
3067 align64 = 128;
3068 align32 = 128;
3069 }
3070
3071 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3072 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3073 if (TARGET_FLOAT128_TYPE)
3074 {
3075 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3076 rs6000_vector_align[KFmode] = 128;
3077
3078 if (FLOAT128_IEEE_P (TFmode))
3079 {
3080 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3081 rs6000_vector_align[TFmode] = 128;
3082 }
3083 }
3084
3085 /* V2DF mode, VSX only. */
3086 if (TARGET_VSX)
3087 {
3088 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3089 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3090 rs6000_vector_align[V2DFmode] = align64;
3091 }
3092
3093 /* V4SF mode, either VSX or Altivec. */
3094 if (TARGET_VSX)
3095 {
3096 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3097 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3098 rs6000_vector_align[V4SFmode] = align32;
3099 }
3100 else if (TARGET_ALTIVEC)
3101 {
3102 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3103 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3104 rs6000_vector_align[V4SFmode] = align32;
3105 }
3106
3107 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3108 and stores. */
3109 if (TARGET_ALTIVEC)
3110 {
3111 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3112 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3113 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3114 rs6000_vector_align[V4SImode] = align32;
3115 rs6000_vector_align[V8HImode] = align32;
3116 rs6000_vector_align[V16QImode] = align32;
3117
3118 if (TARGET_VSX)
3119 {
3120 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3121 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3122 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3123 }
3124 else
3125 {
3126 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3127 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3128 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3129 }
3130 }
3131
3132 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3133 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3134 if (TARGET_VSX)
3135 {
3136 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3137 rs6000_vector_unit[V2DImode]
3138 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3139 rs6000_vector_align[V2DImode] = align64;
3140
3141 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3142 rs6000_vector_unit[V1TImode]
3143 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3144 rs6000_vector_align[V1TImode] = 128;
3145 }
3146
3147 /* DFmode, see if we want to use the VSX unit. Memory is handled
3148 differently, so don't set rs6000_vector_mem. */
3149 if (TARGET_VSX)
3150 {
3151 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3152 rs6000_vector_align[DFmode] = 64;
3153 }
3154
3155 /* SFmode, see if we want to use the VSX unit. */
3156 if (TARGET_P8_VECTOR)
3157 {
3158 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3159 rs6000_vector_align[SFmode] = 32;
3160 }
3161
3162 /* Allow TImode in VSX register and set the VSX memory macros. */
3163 if (TARGET_VSX)
3164 {
3165 rs6000_vector_mem[TImode] = VECTOR_VSX;
3166 rs6000_vector_align[TImode] = align64;
3167 }
3168
3169 /* Register class constraints for the constraints that depend on compile
3170 switches. When the VSX code was added, different constraints were added
3171 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3172 of the VSX registers are used. The register classes for scalar floating
3173 point types is set, based on whether we allow that type into the upper
3174 (Altivec) registers. GCC has register classes to target the Altivec
3175 registers for load/store operations, to select using a VSX memory
3176 operation instead of the traditional floating point operation. The
3177 constraints are:
3178
3179 d - Register class to use with traditional DFmode instructions.
3180 f - Register class to use with traditional SFmode instructions.
3181 v - Altivec register.
3182 wa - Any VSX register.
3183 wc - Reserved to represent individual CR bits (used in LLVM).
3184 wd - Preferred register class for V2DFmode.
3185 wf - Preferred register class for V4SFmode.
3186 wg - Float register for power6x move insns.
3187 wh - FP register for direct move instructions.
3188 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3189 wj - FP or VSX register to hold 64-bit integers for direct moves.
3190 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3191 wl - Float register if we can do 32-bit signed int loads.
3192 wm - VSX register for ISA 2.07 direct move operations.
3193 wn - always NO_REGS.
3194 wr - GPR if 64-bit mode is permitted.
3195 ws - Register class to do ISA 2.06 DF operations.
3196 wt - VSX register for TImode in VSX registers.
3197 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3198 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3199 ww - Register class to do SF conversions in with VSX operations.
3200 wx - Float register if we can do 32-bit int stores.
3201 wy - Register class to do ISA 2.07 SF operations.
3202 wz - Float register if we can do 32-bit unsigned int loads.
3203 wH - Altivec register if SImode is allowed in VSX registers.
3204 wI - VSX register if SImode is allowed in VSX registers.
3205 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3206 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3207
3208 if (TARGET_HARD_FLOAT)
3209 {
3210 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3211 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3212 }
3213
3214 if (TARGET_VSX)
3215 {
3216 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3217 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3218 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3219 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3220 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3222 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3223 }
3224
3225 /* Add conditional constraints based on various options, to allow us to
3226 collapse multiple insn patterns. */
3227 if (TARGET_ALTIVEC)
3228 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3229
3230 if (TARGET_MFPGPR) /* DFmode */
3231 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3232
3233 if (TARGET_LFIWAX)
3234 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3235
3236 if (TARGET_DIRECT_MOVE)
3237 {
3238 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3239 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3240 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3241 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3242 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3243 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3244 }
3245
3246 if (TARGET_POWERPC64)
3247 {
3248 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3249 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3250 }
3251
3252 if (TARGET_P8_VECTOR) /* SFmode */
3253 {
3254 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3255 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3256 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3257 }
3258 else if (TARGET_VSX)
3259 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3260
3261 if (TARGET_STFIWX)
3262 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3263
3264 if (TARGET_LFIWZX)
3265 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3266
3267 if (TARGET_FLOAT128_TYPE)
3268 {
3269 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3270 if (FLOAT128_IEEE_P (TFmode))
3271 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3272 }
3273
3274 if (TARGET_P9_VECTOR)
3275 {
3276 /* Support for new D-form instructions. */
3277 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3278
3279 /* Support for ISA 3.0 (power9) vectors. */
3280 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3281 }
3282
3283 /* Support for new direct moves (ISA 3.0 + 64bit). */
3284 if (TARGET_DIRECT_MOVE_128)
3285 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3286
3287 /* Support small integers in VSX registers. */
3288 if (TARGET_P8_VECTOR)
3289 {
3290 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3291 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3292 if (TARGET_P9_VECTOR)
3293 {
3294 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3295 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3296 }
3297 }
3298
3299 /* Set up the reload helper and direct move functions. */
3300 if (TARGET_VSX || TARGET_ALTIVEC)
3301 {
3302 if (TARGET_64BIT)
3303 {
3304 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3305 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3306 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3307 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3308 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3309 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3310 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3311 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3312 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3313 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3314 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3315 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3316 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3317 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3318 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3319 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3320 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3321 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3322 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3323 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3324
3325 if (FLOAT128_VECTOR_P (KFmode))
3326 {
3327 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3328 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3329 }
3330
3331 if (FLOAT128_VECTOR_P (TFmode))
3332 {
3333 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3334 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3335 }
3336
3337 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3338 available. */
3339 if (TARGET_NO_SDMODE_STACK)
3340 {
3341 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3342 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3343 }
3344
3345 if (TARGET_VSX)
3346 {
3347 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3348 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3349 }
3350
3351 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3352 {
3353 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3354 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3355 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3356 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3357 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3358 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3359 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3360 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3361 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3362
3363 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3364 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3365 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3366 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3367 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3368 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3369 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3370 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3371 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3372
3373 if (FLOAT128_VECTOR_P (KFmode))
3374 {
3375 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3376 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3377 }
3378
3379 if (FLOAT128_VECTOR_P (TFmode))
3380 {
3381 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3382 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3383 }
3384 }
3385 }
3386 else
3387 {
3388 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3389 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3390 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3391 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3392 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3393 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3394 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3395 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3396 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3397 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3398 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3399 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3400 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3401 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3402 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3403 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3404 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3405 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3406 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3407 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3408
3409 if (FLOAT128_VECTOR_P (KFmode))
3410 {
3411 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3412 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3413 }
3414
3415 if (FLOAT128_IEEE_P (TFmode))
3416 {
3417 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3418 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3419 }
3420
3421 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3422 available. */
3423 if (TARGET_NO_SDMODE_STACK)
3424 {
3425 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3426 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3427 }
3428
3429 if (TARGET_VSX)
3430 {
3431 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3432 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3433 }
3434
3435 if (TARGET_DIRECT_MOVE)
3436 {
3437 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3438 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3439 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3440 }
3441 }
3442
3443 reg_addr[DFmode].scalar_in_vmx_p = true;
3444 reg_addr[DImode].scalar_in_vmx_p = true;
3445
3446 if (TARGET_P8_VECTOR)
3447 {
3448 reg_addr[SFmode].scalar_in_vmx_p = true;
3449 reg_addr[SImode].scalar_in_vmx_p = true;
3450
3451 if (TARGET_P9_VECTOR)
3452 {
3453 reg_addr[HImode].scalar_in_vmx_p = true;
3454 reg_addr[QImode].scalar_in_vmx_p = true;
3455 }
3456 }
3457 }
3458
3459 /* Precalculate HARD_REGNO_NREGS. */
3460 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3461 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3462 rs6000_hard_regno_nregs[m][r]
3463 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3464
3465 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3466 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3467 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3468 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3469 rs6000_hard_regno_mode_ok_p[m][r] = true;
3470
3471 /* Precalculate CLASS_MAX_NREGS sizes. */
3472 for (c = 0; c < LIM_REG_CLASSES; ++c)
3473 {
3474 int reg_size;
3475
3476 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3477 reg_size = UNITS_PER_VSX_WORD;
3478
3479 else if (c == ALTIVEC_REGS)
3480 reg_size = UNITS_PER_ALTIVEC_WORD;
3481
3482 else if (c == FLOAT_REGS)
3483 reg_size = UNITS_PER_FP_WORD;
3484
3485 else
3486 reg_size = UNITS_PER_WORD;
3487
3488 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3489 {
3490 machine_mode m2 = (machine_mode)m;
3491 int reg_size2 = reg_size;
3492
3493 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3494 in VSX. */
3495 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3496 reg_size2 = UNITS_PER_FP_WORD;
3497
3498 rs6000_class_max_nregs[m][c]
3499 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3500 }
3501 }
3502
3503 /* Calculate which modes to automatically generate code to use a the
3504 reciprocal divide and square root instructions. In the future, possibly
3505 automatically generate the instructions even if the user did not specify
3506 -mrecip. The older machines double precision reciprocal sqrt estimate is
3507 not accurate enough. */
3508 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3509 if (TARGET_FRES)
3510 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3511 if (TARGET_FRE)
3512 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3513 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3514 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3515 if (VECTOR_UNIT_VSX_P (V2DFmode))
3516 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3517
3518 if (TARGET_FRSQRTES)
3519 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3520 if (TARGET_FRSQRTE)
3521 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3522 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3523 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3524 if (VECTOR_UNIT_VSX_P (V2DFmode))
3525 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3526
3527 if (rs6000_recip_control)
3528 {
3529 if (!flag_finite_math_only)
3530 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3531 "-ffast-math");
3532 if (flag_trapping_math)
3533 warning (0, "%qs requires %qs or %qs", "-mrecip",
3534 "-fno-trapping-math", "-ffast-math");
3535 if (!flag_reciprocal_math)
3536 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3537 "-ffast-math");
3538 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3539 {
3540 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3541 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3542 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3543
3544 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3545 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3546 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3547
3548 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3549 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3550 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3551
3552 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3553 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3554 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3555
3556 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3557 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3558 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3559
3560 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3561 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3562 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3563
3564 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3565 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3566 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3567
3568 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3569 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3570 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3571 }
3572 }
3573
3574 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3575 legitimate address support to figure out the appropriate addressing to
3576 use. */
3577 rs6000_setup_reg_addr_masks ();
3578
3579 if (global_init_p || TARGET_DEBUG_TARGET)
3580 {
3581 if (TARGET_DEBUG_REG)
3582 rs6000_debug_reg_global ();
3583
3584 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3585 fprintf (stderr,
3586 "SImode variable mult cost = %d\n"
3587 "SImode constant mult cost = %d\n"
3588 "SImode short constant mult cost = %d\n"
3589 "DImode multipliciation cost = %d\n"
3590 "SImode division cost = %d\n"
3591 "DImode division cost = %d\n"
3592 "Simple fp operation cost = %d\n"
3593 "DFmode multiplication cost = %d\n"
3594 "SFmode division cost = %d\n"
3595 "DFmode division cost = %d\n"
3596 "cache line size = %d\n"
3597 "l1 cache size = %d\n"
3598 "l2 cache size = %d\n"
3599 "simultaneous prefetches = %d\n"
3600 "\n",
3601 rs6000_cost->mulsi,
3602 rs6000_cost->mulsi_const,
3603 rs6000_cost->mulsi_const9,
3604 rs6000_cost->muldi,
3605 rs6000_cost->divsi,
3606 rs6000_cost->divdi,
3607 rs6000_cost->fp,
3608 rs6000_cost->dmul,
3609 rs6000_cost->sdiv,
3610 rs6000_cost->ddiv,
3611 rs6000_cost->cache_line_size,
3612 rs6000_cost->l1_cache_size,
3613 rs6000_cost->l2_cache_size,
3614 rs6000_cost->simultaneous_prefetches);
3615 }
3616 }
3617
3618 #if TARGET_MACHO
3619 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3620
3621 static void
3622 darwin_rs6000_override_options (void)
3623 {
3624 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3625 off. */
3626 rs6000_altivec_abi = 1;
3627 TARGET_ALTIVEC_VRSAVE = 1;
3628 rs6000_current_abi = ABI_DARWIN;
3629
3630 if (DEFAULT_ABI == ABI_DARWIN
3631 && TARGET_64BIT)
3632 darwin_one_byte_bool = 1;
3633
3634 if (TARGET_64BIT && ! TARGET_POWERPC64)
3635 {
3636 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3637 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3638 }
3639 if (flag_mkernel)
3640 {
3641 rs6000_default_long_calls = 1;
3642 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3643 }
3644
3645 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3646 Altivec. */
3647 if (!flag_mkernel && !flag_apple_kext
3648 && TARGET_64BIT
3649 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3650 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3651
3652 /* Unless the user (not the configurer) has explicitly overridden
3653 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3654 G4 unless targeting the kernel. */
3655 if (!flag_mkernel
3656 && !flag_apple_kext
3657 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3658 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3659 && ! global_options_set.x_rs6000_cpu_index)
3660 {
3661 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3662 }
3663 }
3664 #endif
3665
3666 /* If not otherwise specified by a target, make 'long double' equivalent to
3667 'double'. */
3668
3669 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3670 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3671 #endif
3672
3673 /* Return the builtin mask of the various options used that could affect which
3674 builtins were used. In the past we used target_flags, but we've run out of
3675 bits, and some options are no longer in target_flags. */
3676
3677 HOST_WIDE_INT
3678 rs6000_builtin_mask_calculate (void)
3679 {
3680 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3681 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3682 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3683 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3684 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3685 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3686 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3687 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3688 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3689 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3690 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3691 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3692 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3693 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3694 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3695 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3696 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3697 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3698 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3699 | ((TARGET_LONG_DOUBLE_128
3700 && TARGET_HARD_FLOAT
3701 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3702 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3703 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3704 }
3705
3706 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3707 to clobber the XER[CA] bit because clobbering that bit without telling
3708 the compiler worked just fine with versions of GCC before GCC 5, and
3709 breaking a lot of older code in ways that are hard to track down is
3710 not such a great idea. */
3711
3712 static rtx_insn *
3713 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3714 vec<const char *> &/*constraints*/,
3715 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3716 {
3717 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3718 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3719 return NULL;
3720 }
3721
3722 /* Override command line options.
3723
3724 Combine build-specific configuration information with options
3725 specified on the command line to set various state variables which
3726 influence code generation, optimization, and expansion of built-in
3727 functions. Assure that command-line configuration preferences are
3728 compatible with each other and with the build configuration; issue
3729 warnings while adjusting configuration or error messages while
3730 rejecting configuration.
3731
3732 Upon entry to this function:
3733
3734 This function is called once at the beginning of
3735 compilation, and then again at the start and end of compiling
3736 each section of code that has a different configuration, as
3737 indicated, for example, by adding the
3738
3739 __attribute__((__target__("cpu=power9")))
3740
3741 qualifier to a function definition or, for example, by bracketing
3742 code between
3743
3744 #pragma GCC target("altivec")
3745
3746 and
3747
3748 #pragma GCC reset_options
3749
3750 directives. Parameter global_init_p is true for the initial
3751 invocation, which initializes global variables, and false for all
3752 subsequent invocations.
3753
3754
3755 Various global state information is assumed to be valid. This
3756 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3757 default CPU specified at build configure time, TARGET_DEFAULT,
3758 representing the default set of option flags for the default
3759 target, and global_options_set.x_rs6000_isa_flags, representing
3760 which options were requested on the command line.
3761
3762 Upon return from this function:
3763
3764 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3765 was set by name on the command line. Additionally, if certain
3766 attributes are automatically enabled or disabled by this function
3767 in order to assure compatibility between options and
3768 configuration, the flags associated with those attributes are
3769 also set. By setting these "explicit bits", we avoid the risk
3770 that other code might accidentally overwrite these particular
3771 attributes with "default values".
3772
3773 The various bits of rs6000_isa_flags are set to indicate the
3774 target options that have been selected for the most current
3775 compilation efforts. This has the effect of also turning on the
3776 associated TARGET_XXX values since these are macros which are
3777 generally defined to test the corresponding bit of the
3778 rs6000_isa_flags variable.
3779
3780 The variable rs6000_builtin_mask is set to represent the target
3781 options for the most current compilation efforts, consistent with
3782 the current contents of rs6000_isa_flags. This variable controls
3783 expansion of built-in functions.
3784
3785 Various other global variables and fields of global structures
3786 (over 50 in all) are initialized to reflect the desired options
3787 for the most current compilation efforts. */
3788
3789 static bool
3790 rs6000_option_override_internal (bool global_init_p)
3791 {
3792 bool ret = true;
3793
3794 HOST_WIDE_INT set_masks;
3795 HOST_WIDE_INT ignore_masks;
3796 int cpu_index = -1;
3797 int tune_index;
3798 struct cl_target_option *main_target_opt
3799 = ((global_init_p || target_option_default_node == NULL)
3800 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3801
3802 /* Print defaults. */
3803 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3804 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3805
3806 /* Remember the explicit arguments. */
3807 if (global_init_p)
3808 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3809
3810 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3811 library functions, so warn about it. The flag may be useful for
3812 performance studies from time to time though, so don't disable it
3813 entirely. */
3814 if (global_options_set.x_rs6000_alignment_flags
3815 && rs6000_alignment_flags == MASK_ALIGN_POWER
3816 && DEFAULT_ABI == ABI_DARWIN
3817 && TARGET_64BIT)
3818 warning (0, "%qs is not supported for 64-bit Darwin;"
3819 " it is incompatible with the installed C and C++ libraries",
3820 "-malign-power");
3821
3822 /* Numerous experiment shows that IRA based loop pressure
3823 calculation works better for RTL loop invariant motion on targets
3824 with enough (>= 32) registers. It is an expensive optimization.
3825 So it is on only for peak performance. */
3826 if (optimize >= 3 && global_init_p
3827 && !global_options_set.x_flag_ira_loop_pressure)
3828 flag_ira_loop_pressure = 1;
3829
3830 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3831 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3832 options were already specified. */
3833 if (flag_sanitize & SANITIZE_USER_ADDRESS
3834 && !global_options_set.x_flag_asynchronous_unwind_tables)
3835 flag_asynchronous_unwind_tables = 1;
3836
3837 /* Set the pointer size. */
3838 if (TARGET_64BIT)
3839 {
3840 rs6000_pmode = DImode;
3841 rs6000_pointer_size = 64;
3842 }
3843 else
3844 {
3845 rs6000_pmode = SImode;
3846 rs6000_pointer_size = 32;
3847 }
3848
3849 /* Some OSs don't support saving the high part of 64-bit registers on context
3850 switch. Other OSs don't support saving Altivec registers. On those OSs,
3851 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3852 if the user wants either, the user must explicitly specify them and we
3853 won't interfere with the user's specification. */
3854
3855 set_masks = POWERPC_MASKS;
3856 #ifdef OS_MISSING_POWERPC64
3857 if (OS_MISSING_POWERPC64)
3858 set_masks &= ~OPTION_MASK_POWERPC64;
3859 #endif
3860 #ifdef OS_MISSING_ALTIVEC
3861 if (OS_MISSING_ALTIVEC)
3862 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3863 | OTHER_VSX_VECTOR_MASKS);
3864 #endif
3865
3866 /* Don't override by the processor default if given explicitly. */
3867 set_masks &= ~rs6000_isa_flags_explicit;
3868
3869 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3870 the cpu in a target attribute or pragma, but did not specify a tuning
3871 option, use the cpu for the tuning option rather than the option specified
3872 with -mtune on the command line. Process a '--with-cpu' configuration
3873 request as an implicit --cpu. */
3874 if (rs6000_cpu_index >= 0)
3875 cpu_index = rs6000_cpu_index;
3876 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3877 cpu_index = main_target_opt->x_rs6000_cpu_index;
3878 else if (OPTION_TARGET_CPU_DEFAULT)
3879 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3880
3881 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3882 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3883 with those from the cpu, except for options that were explicitly set. If
3884 we don't have a cpu, do not override the target bits set in
3885 TARGET_DEFAULT. */
3886 if (cpu_index >= 0)
3887 {
3888 rs6000_cpu_index = cpu_index;
3889 rs6000_isa_flags &= ~set_masks;
3890 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3891 & set_masks);
3892 }
3893 else
3894 {
3895 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3896 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3897 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3898 to using rs6000_isa_flags, we need to do the initialization here.
3899
3900 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3901 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3902 HOST_WIDE_INT flags;
3903 if (TARGET_DEFAULT)
3904 flags = TARGET_DEFAULT;
3905 else
3906 {
3907 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3908 const char *default_cpu = (!TARGET_POWERPC64
3909 ? "powerpc"
3910 : (BYTES_BIG_ENDIAN
3911 ? "powerpc64"
3912 : "powerpc64le"));
3913 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3914 flags = processor_target_table[default_cpu_index].target_enable;
3915 }
3916 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3917 }
3918
3919 if (rs6000_tune_index >= 0)
3920 tune_index = rs6000_tune_index;
3921 else if (cpu_index >= 0)
3922 rs6000_tune_index = tune_index = cpu_index;
3923 else
3924 {
3925 size_t i;
3926 enum processor_type tune_proc
3927 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3928
3929 tune_index = -1;
3930 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3931 if (processor_target_table[i].processor == tune_proc)
3932 {
3933 tune_index = i;
3934 break;
3935 }
3936 }
3937
3938 if (cpu_index >= 0)
3939 rs6000_cpu = processor_target_table[cpu_index].processor;
3940 else
3941 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3942
3943 gcc_assert (tune_index >= 0);
3944 rs6000_tune = processor_target_table[tune_index].processor;
3945
3946 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3947 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3948 || rs6000_cpu == PROCESSOR_PPCE5500)
3949 {
3950 if (TARGET_ALTIVEC)
3951 error ("AltiVec not supported in this target");
3952 }
3953
3954 /* If we are optimizing big endian systems for space, use the load/store
3955 multiple instructions. */
3956 if (BYTES_BIG_ENDIAN && optimize_size)
3957 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3958
3959 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3960 because the hardware doesn't support the instructions used in little
3961 endian mode, and causes an alignment trap. The 750 does not cause an
3962 alignment trap (except when the target is unaligned). */
3963
3964 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3965 {
3966 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3967 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3968 warning (0, "%qs is not supported on little endian systems",
3969 "-mmultiple");
3970 }
3971
3972 /* If little-endian, default to -mstrict-align on older processors.
3973 Testing for htm matches power8 and later. */
3974 if (!BYTES_BIG_ENDIAN
3975 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3976 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3977
3978 if (!rs6000_fold_gimple)
3979 fprintf (stderr,
3980 "gimple folding of rs6000 builtins has been disabled.\n");
3981
3982 /* Add some warnings for VSX. */
3983 if (TARGET_VSX)
3984 {
3985 const char *msg = NULL;
3986 if (!TARGET_HARD_FLOAT)
3987 {
3988 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3989 msg = N_("-mvsx requires hardware floating point");
3990 else
3991 {
3992 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3993 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3994 }
3995 }
3996 else if (TARGET_AVOID_XFORM > 0)
3997 msg = N_("-mvsx needs indexed addressing");
3998 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3999 & OPTION_MASK_ALTIVEC))
4000 {
4001 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4002 msg = N_("-mvsx and -mno-altivec are incompatible");
4003 else
4004 msg = N_("-mno-altivec disables vsx");
4005 }
4006
4007 if (msg)
4008 {
4009 warning (0, msg);
4010 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4011 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4012 }
4013 }
4014
4015 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4016 the -mcpu setting to enable options that conflict. */
4017 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4018 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4019 | OPTION_MASK_ALTIVEC
4020 | OPTION_MASK_VSX)) != 0)
4021 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4022 | OPTION_MASK_DIRECT_MOVE)
4023 & ~rs6000_isa_flags_explicit);
4024
4025 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4026 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4027
4028 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4029 off all of the options that depend on those flags. */
4030 ignore_masks = rs6000_disable_incompatible_switches ();
4031
4032 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4033 unless the user explicitly used the -mno-<option> to disable the code. */
4034 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4035 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4036 else if (TARGET_P9_MINMAX)
4037 {
4038 if (cpu_index >= 0)
4039 {
4040 if (cpu_index == PROCESSOR_POWER9)
4041 {
4042 /* legacy behavior: allow -mcpu=power9 with certain
4043 capabilities explicitly disabled. */
4044 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4045 }
4046 else
4047 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4048 "for <xxx> less than power9", "-mcpu");
4049 }
4050 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4051 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4052 & rs6000_isa_flags_explicit))
4053 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4054 were explicitly cleared. */
4055 error ("%qs incompatible with explicitly disabled options",
4056 "-mpower9-minmax");
4057 else
4058 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4059 }
4060 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4061 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4062 else if (TARGET_VSX)
4063 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4064 else if (TARGET_POPCNTD)
4065 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4066 else if (TARGET_DFP)
4067 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4068 else if (TARGET_CMPB)
4069 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4070 else if (TARGET_FPRND)
4071 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4072 else if (TARGET_POPCNTB)
4073 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4074 else if (TARGET_ALTIVEC)
4075 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4076
4077 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4078 {
4079 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4080 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4081 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4082 }
4083
4084 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4085 {
4086 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4087 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4088 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4089 }
4090
4091 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4092 {
4093 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4094 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4095 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4096 }
4097
4098 if (TARGET_P8_VECTOR && !TARGET_VSX)
4099 {
4100 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4101 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4102 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4103 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4104 {
4105 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4106 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4107 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4108 }
4109 else
4110 {
4111 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4112 not explicit. */
4113 rs6000_isa_flags |= OPTION_MASK_VSX;
4114 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4115 }
4116 }
4117
4118 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4119 {
4120 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4121 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4122 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4123 }
4124
4125 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4126 silently turn off quad memory mode. */
4127 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4128 {
4129 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4130 warning (0, N_("-mquad-memory requires 64-bit mode"));
4131
4132 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4133 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4134
4135 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4136 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4137 }
4138
4139 /* Non-atomic quad memory load/store are disabled for little endian, since
4140 the words are reversed, but atomic operations can still be done by
4141 swapping the words. */
4142 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4143 {
4144 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4145 warning (0, N_("-mquad-memory is not available in little endian "
4146 "mode"));
4147
4148 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4149 }
4150
4151 /* Assume if the user asked for normal quad memory instructions, they want
4152 the atomic versions as well, unless they explicity told us not to use quad
4153 word atomic instructions. */
4154 if (TARGET_QUAD_MEMORY
4155 && !TARGET_QUAD_MEMORY_ATOMIC
4156 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4157 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4158
4159 /* If we can shrink-wrap the TOC register save separately, then use
4160 -msave-toc-indirect unless explicitly disabled. */
4161 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4162 && flag_shrink_wrap_separate
4163 && optimize_function_for_speed_p (cfun))
4164 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4165
4166 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4167 generating power8 instructions. Power9 does not optimize power8 fusion
4168 cases. */
4169 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4170 {
4171 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4172 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4173 else
4174 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4175 }
4176
4177 /* Setting additional fusion flags turns on base fusion. */
4178 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4179 {
4180 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4181 {
4182 if (TARGET_P8_FUSION_SIGN)
4183 error ("%qs requires %qs", "-mpower8-fusion-sign",
4184 "-mpower8-fusion");
4185
4186 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4187 }
4188 else
4189 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4190 }
4191
4192 /* Power8 does not fuse sign extended loads with the addis. If we are
4193 optimizing at high levels for speed, convert a sign extended load into a
4194 zero extending load, and an explicit sign extension. */
4195 if (TARGET_P8_FUSION
4196 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4197 && optimize_function_for_speed_p (cfun)
4198 && optimize >= 3)
4199 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4200
4201 /* ISA 3.0 vector instructions include ISA 2.07. */
4202 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4203 {
4204 /* We prefer to not mention undocumented options in
4205 error messages. However, if users have managed to select
4206 power9-vector without selecting power8-vector, they
4207 already know about undocumented flags. */
4208 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4209 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4210 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4211 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4212 {
4213 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4214 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4215 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4216 }
4217 else
4218 {
4219 /* OPTION_MASK_P9_VECTOR is explicit and
4220 OPTION_MASK_P8_VECTOR is not explicit. */
4221 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4222 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4223 }
4224 }
4225
4226 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4227 support. If we only have ISA 2.06 support, and the user did not specify
4228 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4229 but we don't enable the full vectorization support */
4230 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4231 TARGET_ALLOW_MOVMISALIGN = 1;
4232
4233 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4234 {
4235 if (TARGET_ALLOW_MOVMISALIGN > 0
4236 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4237 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4238
4239 TARGET_ALLOW_MOVMISALIGN = 0;
4240 }
4241
4242 /* Determine when unaligned vector accesses are permitted, and when
4243 they are preferred over masked Altivec loads. Note that if
4244 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4245 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4246 not true. */
4247 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4248 {
4249 if (!TARGET_VSX)
4250 {
4251 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4252 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4253
4254 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4255 }
4256
4257 else if (!TARGET_ALLOW_MOVMISALIGN)
4258 {
4259 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4260 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4261 "-mallow-movmisalign");
4262
4263 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4264 }
4265 }
4266
4267 /* Use long double size to select the appropriate long double. We use
4268 TYPE_PRECISION to differentiate the 3 different long double types. We map
4269 128 into the precision used for TFmode. */
4270 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4271 ? 64
4272 : FLOAT_PRECISION_TFmode);
4273
4274 /* Set long double size before the IEEE 128-bit tests. */
4275 if (!global_options_set.x_rs6000_long_double_type_size)
4276 {
4277 if (main_target_opt != NULL
4278 && (main_target_opt->x_rs6000_long_double_type_size
4279 != default_long_double_size))
4280 error ("target attribute or pragma changes long double size");
4281 else
4282 rs6000_long_double_type_size = default_long_double_size;
4283 }
4284 else if (rs6000_long_double_type_size == 128)
4285 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4286 else if (global_options_set.x_rs6000_ieeequad)
4287 {
4288 if (global_options.x_rs6000_ieeequad)
4289 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4290 else
4291 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4292 }
4293
4294 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4295 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4296 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4297 those systems will not pick up this default. Warn if the user changes the
4298 default unless -Wno-psabi. */
4299 if (!global_options_set.x_rs6000_ieeequad)
4300 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4301
4302 else
4303 {
4304 if (global_options.x_rs6000_ieeequad
4305 && (!TARGET_POPCNTD || !TARGET_VSX))
4306 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4307
4308 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4309 {
4310 static bool warned_change_long_double;
4311 if (!warned_change_long_double)
4312 {
4313 warned_change_long_double = true;
4314 if (TARGET_IEEEQUAD)
4315 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4316 else
4317 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4318 }
4319 }
4320 }
4321
4322 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4323 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4324 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4325 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4326 the keyword as well as the type. */
4327 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4328
4329 /* IEEE 128-bit floating point requires VSX support. */
4330 if (TARGET_FLOAT128_KEYWORD)
4331 {
4332 if (!TARGET_VSX)
4333 {
4334 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4335 error ("%qs requires VSX support", "-mfloat128");
4336
4337 TARGET_FLOAT128_TYPE = 0;
4338 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4339 | OPTION_MASK_FLOAT128_HW);
4340 }
4341 else if (!TARGET_FLOAT128_TYPE)
4342 {
4343 TARGET_FLOAT128_TYPE = 1;
4344 warning (0, "The -mfloat128 option may not be fully supported");
4345 }
4346 }
4347
4348 /* Enable the __float128 keyword under Linux by default. */
4349 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4350 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4351 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4352
4353 /* If we have are supporting the float128 type and full ISA 3.0 support,
4354 enable -mfloat128-hardware by default. However, don't enable the
4355 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4356 because sometimes the compiler wants to put things in an integer
4357 container, and if we don't have __int128 support, it is impossible. */
4358 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4359 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4360 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4361 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4362
4363 if (TARGET_FLOAT128_HW
4364 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4365 {
4366 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4367 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4368
4369 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4370 }
4371
4372 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4373 {
4374 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4375 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4376
4377 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4378 }
4379
4380 /* Print the options after updating the defaults. */
4381 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4382 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4383
4384 /* E500mc does "better" if we inline more aggressively. Respect the
4385 user's opinion, though. */
4386 if (rs6000_block_move_inline_limit == 0
4387 && (rs6000_tune == PROCESSOR_PPCE500MC
4388 || rs6000_tune == PROCESSOR_PPCE500MC64
4389 || rs6000_tune == PROCESSOR_PPCE5500
4390 || rs6000_tune == PROCESSOR_PPCE6500))
4391 rs6000_block_move_inline_limit = 128;
4392
4393 /* store_one_arg depends on expand_block_move to handle at least the
4394 size of reg_parm_stack_space. */
4395 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4396 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4397
4398 if (global_init_p)
4399 {
4400 /* If the appropriate debug option is enabled, replace the target hooks
4401 with debug versions that call the real version and then prints
4402 debugging information. */
4403 if (TARGET_DEBUG_COST)
4404 {
4405 targetm.rtx_costs = rs6000_debug_rtx_costs;
4406 targetm.address_cost = rs6000_debug_address_cost;
4407 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4408 }
4409
4410 if (TARGET_DEBUG_ADDR)
4411 {
4412 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4413 targetm.legitimize_address = rs6000_debug_legitimize_address;
4414 rs6000_secondary_reload_class_ptr
4415 = rs6000_debug_secondary_reload_class;
4416 targetm.secondary_memory_needed
4417 = rs6000_debug_secondary_memory_needed;
4418 targetm.can_change_mode_class
4419 = rs6000_debug_can_change_mode_class;
4420 rs6000_preferred_reload_class_ptr
4421 = rs6000_debug_preferred_reload_class;
4422 rs6000_legitimize_reload_address_ptr
4423 = rs6000_debug_legitimize_reload_address;
4424 rs6000_mode_dependent_address_ptr
4425 = rs6000_debug_mode_dependent_address;
4426 }
4427
4428 if (rs6000_veclibabi_name)
4429 {
4430 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4431 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4432 else
4433 {
4434 error ("unknown vectorization library ABI type (%qs) for "
4435 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4436 ret = false;
4437 }
4438 }
4439 }
4440
4441 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4442 target attribute or pragma which automatically enables both options,
4443 unless the altivec ABI was set. This is set by default for 64-bit, but
4444 not for 32-bit. */
4445 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4446 {
4447 TARGET_FLOAT128_TYPE = 0;
4448 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4449 | OPTION_MASK_FLOAT128_KEYWORD)
4450 & ~rs6000_isa_flags_explicit);
4451 }
4452
4453 /* Enable Altivec ABI for AIX -maltivec. */
4454 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4455 {
4456 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4457 error ("target attribute or pragma changes AltiVec ABI");
4458 else
4459 rs6000_altivec_abi = 1;
4460 }
4461
4462 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4463 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4464 be explicitly overridden in either case. */
4465 if (TARGET_ELF)
4466 {
4467 if (!global_options_set.x_rs6000_altivec_abi
4468 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4469 {
4470 if (main_target_opt != NULL &&
4471 !main_target_opt->x_rs6000_altivec_abi)
4472 error ("target attribute or pragma changes AltiVec ABI");
4473 else
4474 rs6000_altivec_abi = 1;
4475 }
4476 }
4477
4478 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4479 So far, the only darwin64 targets are also MACH-O. */
4480 if (TARGET_MACHO
4481 && DEFAULT_ABI == ABI_DARWIN
4482 && TARGET_64BIT)
4483 {
4484 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4485 error ("target attribute or pragma changes darwin64 ABI");
4486 else
4487 {
4488 rs6000_darwin64_abi = 1;
4489 /* Default to natural alignment, for better performance. */
4490 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4491 }
4492 }
4493
4494 /* Place FP constants in the constant pool instead of TOC
4495 if section anchors enabled. */
4496 if (flag_section_anchors
4497 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4498 TARGET_NO_FP_IN_TOC = 1;
4499
4500 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4501 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4502
4503 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4504 SUBTARGET_OVERRIDE_OPTIONS;
4505 #endif
4506 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4507 SUBSUBTARGET_OVERRIDE_OPTIONS;
4508 #endif
4509 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4510 SUB3TARGET_OVERRIDE_OPTIONS;
4511 #endif
4512
4513 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4514 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4515
4516 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4517 && rs6000_tune != PROCESSOR_POWER5
4518 && rs6000_tune != PROCESSOR_POWER6
4519 && rs6000_tune != PROCESSOR_POWER7
4520 && rs6000_tune != PROCESSOR_POWER8
4521 && rs6000_tune != PROCESSOR_POWER9
4522 && rs6000_tune != PROCESSOR_PPCA2
4523 && rs6000_tune != PROCESSOR_CELL
4524 && rs6000_tune != PROCESSOR_PPC476);
4525 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4526 || rs6000_tune == PROCESSOR_POWER5
4527 || rs6000_tune == PROCESSOR_POWER7
4528 || rs6000_tune == PROCESSOR_POWER8);
4529 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4530 || rs6000_tune == PROCESSOR_POWER5
4531 || rs6000_tune == PROCESSOR_POWER6
4532 || rs6000_tune == PROCESSOR_POWER7
4533 || rs6000_tune == PROCESSOR_POWER8
4534 || rs6000_tune == PROCESSOR_POWER9
4535 || rs6000_tune == PROCESSOR_PPCE500MC
4536 || rs6000_tune == PROCESSOR_PPCE500MC64
4537 || rs6000_tune == PROCESSOR_PPCE5500
4538 || rs6000_tune == PROCESSOR_PPCE6500);
4539
4540 /* Allow debug switches to override the above settings. These are set to -1
4541 in rs6000.opt to indicate the user hasn't directly set the switch. */
4542 if (TARGET_ALWAYS_HINT >= 0)
4543 rs6000_always_hint = TARGET_ALWAYS_HINT;
4544
4545 if (TARGET_SCHED_GROUPS >= 0)
4546 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4547
4548 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4549 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4550
4551 rs6000_sched_restricted_insns_priority
4552 = (rs6000_sched_groups ? 1 : 0);
4553
4554 /* Handle -msched-costly-dep option. */
4555 rs6000_sched_costly_dep
4556 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4557
4558 if (rs6000_sched_costly_dep_str)
4559 {
4560 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4561 rs6000_sched_costly_dep = no_dep_costly;
4562 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4563 rs6000_sched_costly_dep = all_deps_costly;
4564 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4565 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4566 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4567 rs6000_sched_costly_dep = store_to_load_dep_costly;
4568 else
4569 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4570 atoi (rs6000_sched_costly_dep_str));
4571 }
4572
4573 /* Handle -minsert-sched-nops option. */
4574 rs6000_sched_insert_nops
4575 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4576
4577 if (rs6000_sched_insert_nops_str)
4578 {
4579 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4580 rs6000_sched_insert_nops = sched_finish_none;
4581 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4582 rs6000_sched_insert_nops = sched_finish_pad_groups;
4583 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4584 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4585 else
4586 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4587 atoi (rs6000_sched_insert_nops_str));
4588 }
4589
4590 /* Handle stack protector */
4591 if (!global_options_set.x_rs6000_stack_protector_guard)
4592 #ifdef TARGET_THREAD_SSP_OFFSET
4593 rs6000_stack_protector_guard = SSP_TLS;
4594 #else
4595 rs6000_stack_protector_guard = SSP_GLOBAL;
4596 #endif
4597
4598 #ifdef TARGET_THREAD_SSP_OFFSET
4599 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4600 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4601 #endif
4602
4603 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4604 {
4605 char *endp;
4606 const char *str = rs6000_stack_protector_guard_offset_str;
4607
4608 errno = 0;
4609 long offset = strtol (str, &endp, 0);
4610 if (!*str || *endp || errno)
4611 error ("%qs is not a valid number in %qs", str,
4612 "-mstack-protector-guard-offset=");
4613
4614 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4615 || (TARGET_64BIT && (offset & 3)))
4616 error ("%qs is not a valid offset in %qs", str,
4617 "-mstack-protector-guard-offset=");
4618
4619 rs6000_stack_protector_guard_offset = offset;
4620 }
4621
4622 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4623 {
4624 const char *str = rs6000_stack_protector_guard_reg_str;
4625 int reg = decode_reg_name (str);
4626
4627 if (!IN_RANGE (reg, 1, 31))
4628 error ("%qs is not a valid base register in %qs", str,
4629 "-mstack-protector-guard-reg=");
4630
4631 rs6000_stack_protector_guard_reg = reg;
4632 }
4633
4634 if (rs6000_stack_protector_guard == SSP_TLS
4635 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4636 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4637
4638 if (global_init_p)
4639 {
4640 #ifdef TARGET_REGNAMES
4641 /* If the user desires alternate register names, copy in the
4642 alternate names now. */
4643 if (TARGET_REGNAMES)
4644 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4645 #endif
4646
4647 /* Set aix_struct_return last, after the ABI is determined.
4648 If -maix-struct-return or -msvr4-struct-return was explicitly
4649 used, don't override with the ABI default. */
4650 if (!global_options_set.x_aix_struct_return)
4651 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4652
4653 #if 0
4654 /* IBM XL compiler defaults to unsigned bitfields. */
4655 if (TARGET_XL_COMPAT)
4656 flag_signed_bitfields = 0;
4657 #endif
4658
4659 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4660 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4661
4662 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4663
4664 /* We can only guarantee the availability of DI pseudo-ops when
4665 assembling for 64-bit targets. */
4666 if (!TARGET_64BIT)
4667 {
4668 targetm.asm_out.aligned_op.di = NULL;
4669 targetm.asm_out.unaligned_op.di = NULL;
4670 }
4671
4672
4673 /* Set branch target alignment, if not optimizing for size. */
4674 if (!optimize_size)
4675 {
4676 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4677 aligned 8byte to avoid misprediction by the branch predictor. */
4678 if (rs6000_tune == PROCESSOR_TITAN
4679 || rs6000_tune == PROCESSOR_CELL)
4680 {
4681 if (flag_align_functions && !str_align_functions)
4682 str_align_functions = "8";
4683 if (flag_align_jumps && !str_align_jumps)
4684 str_align_jumps = "8";
4685 if (flag_align_loops && !str_align_loops)
4686 str_align_loops = "8";
4687 }
4688 if (rs6000_align_branch_targets)
4689 {
4690 if (flag_align_functions && !str_align_functions)
4691 str_align_functions = "16";
4692 if (flag_align_jumps && !str_align_jumps)
4693 str_align_jumps = "16";
4694 if (flag_align_loops && !str_align_loops)
4695 {
4696 can_override_loop_align = 1;
4697 str_align_loops = "16";
4698 }
4699 }
4700
4701 if (flag_align_jumps && !str_align_jumps)
4702 str_align_jumps = "16";
4703 if (flag_align_loops && !str_align_loops)
4704 str_align_loops = "16";
4705 }
4706
4707 /* Arrange to save and restore machine status around nested functions. */
4708 init_machine_status = rs6000_init_machine_status;
4709
4710 /* We should always be splitting complex arguments, but we can't break
4711 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4712 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4713 targetm.calls.split_complex_arg = NULL;
4714
4715 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4716 if (DEFAULT_ABI == ABI_AIX)
4717 targetm.calls.custom_function_descriptors = 0;
4718 }
4719
4720 /* Initialize rs6000_cost with the appropriate target costs. */
4721 if (optimize_size)
4722 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4723 else
4724 switch (rs6000_tune)
4725 {
4726 case PROCESSOR_RS64A:
4727 rs6000_cost = &rs64a_cost;
4728 break;
4729
4730 case PROCESSOR_MPCCORE:
4731 rs6000_cost = &mpccore_cost;
4732 break;
4733
4734 case PROCESSOR_PPC403:
4735 rs6000_cost = &ppc403_cost;
4736 break;
4737
4738 case PROCESSOR_PPC405:
4739 rs6000_cost = &ppc405_cost;
4740 break;
4741
4742 case PROCESSOR_PPC440:
4743 rs6000_cost = &ppc440_cost;
4744 break;
4745
4746 case PROCESSOR_PPC476:
4747 rs6000_cost = &ppc476_cost;
4748 break;
4749
4750 case PROCESSOR_PPC601:
4751 rs6000_cost = &ppc601_cost;
4752 break;
4753
4754 case PROCESSOR_PPC603:
4755 rs6000_cost = &ppc603_cost;
4756 break;
4757
4758 case PROCESSOR_PPC604:
4759 rs6000_cost = &ppc604_cost;
4760 break;
4761
4762 case PROCESSOR_PPC604e:
4763 rs6000_cost = &ppc604e_cost;
4764 break;
4765
4766 case PROCESSOR_PPC620:
4767 rs6000_cost = &ppc620_cost;
4768 break;
4769
4770 case PROCESSOR_PPC630:
4771 rs6000_cost = &ppc630_cost;
4772 break;
4773
4774 case PROCESSOR_CELL:
4775 rs6000_cost = &ppccell_cost;
4776 break;
4777
4778 case PROCESSOR_PPC750:
4779 case PROCESSOR_PPC7400:
4780 rs6000_cost = &ppc750_cost;
4781 break;
4782
4783 case PROCESSOR_PPC7450:
4784 rs6000_cost = &ppc7450_cost;
4785 break;
4786
4787 case PROCESSOR_PPC8540:
4788 case PROCESSOR_PPC8548:
4789 rs6000_cost = &ppc8540_cost;
4790 break;
4791
4792 case PROCESSOR_PPCE300C2:
4793 case PROCESSOR_PPCE300C3:
4794 rs6000_cost = &ppce300c2c3_cost;
4795 break;
4796
4797 case PROCESSOR_PPCE500MC:
4798 rs6000_cost = &ppce500mc_cost;
4799 break;
4800
4801 case PROCESSOR_PPCE500MC64:
4802 rs6000_cost = &ppce500mc64_cost;
4803 break;
4804
4805 case PROCESSOR_PPCE5500:
4806 rs6000_cost = &ppce5500_cost;
4807 break;
4808
4809 case PROCESSOR_PPCE6500:
4810 rs6000_cost = &ppce6500_cost;
4811 break;
4812
4813 case PROCESSOR_TITAN:
4814 rs6000_cost = &titan_cost;
4815 break;
4816
4817 case PROCESSOR_POWER4:
4818 case PROCESSOR_POWER5:
4819 rs6000_cost = &power4_cost;
4820 break;
4821
4822 case PROCESSOR_POWER6:
4823 rs6000_cost = &power6_cost;
4824 break;
4825
4826 case PROCESSOR_POWER7:
4827 rs6000_cost = &power7_cost;
4828 break;
4829
4830 case PROCESSOR_POWER8:
4831 rs6000_cost = &power8_cost;
4832 break;
4833
4834 case PROCESSOR_POWER9:
4835 rs6000_cost = &power9_cost;
4836 break;
4837
4838 case PROCESSOR_PPCA2:
4839 rs6000_cost = &ppca2_cost;
4840 break;
4841
4842 default:
4843 gcc_unreachable ();
4844 }
4845
4846 if (global_init_p)
4847 {
4848 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4849 rs6000_cost->simultaneous_prefetches,
4850 global_options.x_param_values,
4851 global_options_set.x_param_values);
4852 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4853 global_options.x_param_values,
4854 global_options_set.x_param_values);
4855 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4856 rs6000_cost->cache_line_size,
4857 global_options.x_param_values,
4858 global_options_set.x_param_values);
4859 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4860 global_options.x_param_values,
4861 global_options_set.x_param_values);
4862
4863 /* Increase loop peeling limits based on performance analysis. */
4864 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4865 global_options.x_param_values,
4866 global_options_set.x_param_values);
4867 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4868 global_options.x_param_values,
4869 global_options_set.x_param_values);
4870
4871 /* Use the 'model' -fsched-pressure algorithm by default. */
4872 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4873 SCHED_PRESSURE_MODEL,
4874 global_options.x_param_values,
4875 global_options_set.x_param_values);
4876
4877 /* If using typedef char *va_list, signal that
4878 __builtin_va_start (&ap, 0) can be optimized to
4879 ap = __builtin_next_arg (0). */
4880 if (DEFAULT_ABI != ABI_V4)
4881 targetm.expand_builtin_va_start = NULL;
4882 }
4883
4884 /* If not explicitly specified via option, decide whether to generate indexed
4885 load/store instructions. A value of -1 indicates that the
4886 initial value of this variable has not been overwritten. During
4887 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4888 if (TARGET_AVOID_XFORM == -1)
4889 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4890 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4891 need indexed accesses and the type used is the scalar type of the element
4892 being loaded or stored. */
4893 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4894 && !TARGET_ALTIVEC);
4895
4896 /* Set the -mrecip options. */
4897 if (rs6000_recip_name)
4898 {
4899 char *p = ASTRDUP (rs6000_recip_name);
4900 char *q;
4901 unsigned int mask, i;
4902 bool invert;
4903
4904 while ((q = strtok (p, ",")) != NULL)
4905 {
4906 p = NULL;
4907 if (*q == '!')
4908 {
4909 invert = true;
4910 q++;
4911 }
4912 else
4913 invert = false;
4914
4915 if (!strcmp (q, "default"))
4916 mask = ((TARGET_RECIP_PRECISION)
4917 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4918 else
4919 {
4920 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4921 if (!strcmp (q, recip_options[i].string))
4922 {
4923 mask = recip_options[i].mask;
4924 break;
4925 }
4926
4927 if (i == ARRAY_SIZE (recip_options))
4928 {
4929 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4930 invert = false;
4931 mask = 0;
4932 ret = false;
4933 }
4934 }
4935
4936 if (invert)
4937 rs6000_recip_control &= ~mask;
4938 else
4939 rs6000_recip_control |= mask;
4940 }
4941 }
4942
4943 /* Set the builtin mask of the various options used that could affect which
4944 builtins were used. In the past we used target_flags, but we've run out
4945 of bits, and some options are no longer in target_flags. */
4946 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4947 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4948 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4949 rs6000_builtin_mask);
4950
4951 /* Initialize all of the registers. */
4952 rs6000_init_hard_regno_mode_ok (global_init_p);
4953
4954 /* Save the initial options in case the user does function specific options */
4955 if (global_init_p)
4956 target_option_default_node = target_option_current_node
4957 = build_target_option_node (&global_options);
4958
4959 /* If not explicitly specified via option, decide whether to generate the
4960 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4961 if (TARGET_LINK_STACK == -1)
4962 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4963
4964 /* Deprecate use of -mno-speculate-indirect-jumps. */
4965 if (!rs6000_speculate_indirect_jumps)
4966 warning (0, "%qs is deprecated and not recommended in any circumstances",
4967 "-mno-speculate-indirect-jumps");
4968
4969 return ret;
4970 }
4971
4972 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4973 define the target cpu type. */
4974
4975 static void
4976 rs6000_option_override (void)
4977 {
4978 (void) rs6000_option_override_internal (true);
4979 }
4980
4981 \f
4982 /* Implement targetm.vectorize.builtin_mask_for_load. */
4983 static tree
4984 rs6000_builtin_mask_for_load (void)
4985 {
4986 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4987 if ((TARGET_ALTIVEC && !TARGET_VSX)
4988 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4989 return altivec_builtin_mask_for_load;
4990 else
4991 return 0;
4992 }
4993
4994 /* Implement LOOP_ALIGN. */
4995 align_flags
4996 rs6000_loop_align (rtx label)
4997 {
4998 basic_block bb;
4999 int ninsns;
5000
5001 /* Don't override loop alignment if -falign-loops was specified. */
5002 if (!can_override_loop_align)
5003 return align_loops;
5004
5005 bb = BLOCK_FOR_INSN (label);
5006 ninsns = num_loop_insns(bb->loop_father);
5007
5008 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5009 if (ninsns > 4 && ninsns <= 8
5010 && (rs6000_tune == PROCESSOR_POWER4
5011 || rs6000_tune == PROCESSOR_POWER5
5012 || rs6000_tune == PROCESSOR_POWER6
5013 || rs6000_tune == PROCESSOR_POWER7
5014 || rs6000_tune == PROCESSOR_POWER8))
5015 return align_flags (5);
5016 else
5017 return align_loops;
5018 }
5019
5020 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5021 after applying N number of iterations. This routine does not determine
5022 how may iterations are required to reach desired alignment. */
5023
5024 static bool
5025 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5026 {
5027 if (is_packed)
5028 return false;
5029
5030 if (TARGET_32BIT)
5031 {
5032 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5033 return true;
5034
5035 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5036 return true;
5037
5038 return false;
5039 }
5040 else
5041 {
5042 if (TARGET_MACHO)
5043 return false;
5044
5045 /* Assuming that all other types are naturally aligned. CHECKME! */
5046 return true;
5047 }
5048 }
5049
5050 /* Return true if the vector misalignment factor is supported by the
5051 target. */
5052 static bool
5053 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5054 const_tree type,
5055 int misalignment,
5056 bool is_packed)
5057 {
5058 if (TARGET_VSX)
5059 {
5060 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5061 return true;
5062
5063 /* Return if movmisalign pattern is not supported for this mode. */
5064 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5065 return false;
5066
5067 if (misalignment == -1)
5068 {
5069 /* Misalignment factor is unknown at compile time but we know
5070 it's word aligned. */
5071 if (rs6000_vector_alignment_reachable (type, is_packed))
5072 {
5073 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5074
5075 if (element_size == 64 || element_size == 32)
5076 return true;
5077 }
5078
5079 return false;
5080 }
5081
5082 /* VSX supports word-aligned vector. */
5083 if (misalignment % 4 == 0)
5084 return true;
5085 }
5086 return false;
5087 }
5088
5089 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5090 static int
5091 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5092 tree vectype, int misalign)
5093 {
5094 unsigned elements;
5095 tree elem_type;
5096
5097 switch (type_of_cost)
5098 {
5099 case scalar_stmt:
5100 case scalar_load:
5101 case scalar_store:
5102 case vector_stmt:
5103 case vector_load:
5104 case vector_store:
5105 case vec_to_scalar:
5106 case scalar_to_vec:
5107 case cond_branch_not_taken:
5108 return 1;
5109
5110 case vec_perm:
5111 if (TARGET_VSX)
5112 return 3;
5113 else
5114 return 1;
5115
5116 case vec_promote_demote:
5117 if (TARGET_VSX)
5118 return 4;
5119 else
5120 return 1;
5121
5122 case cond_branch_taken:
5123 return 3;
5124
5125 case unaligned_load:
5126 case vector_gather_load:
5127 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5128 return 1;
5129
5130 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5131 {
5132 elements = TYPE_VECTOR_SUBPARTS (vectype);
5133 if (elements == 2)
5134 /* Double word aligned. */
5135 return 2;
5136
5137 if (elements == 4)
5138 {
5139 switch (misalign)
5140 {
5141 case 8:
5142 /* Double word aligned. */
5143 return 2;
5144
5145 case -1:
5146 /* Unknown misalignment. */
5147 case 4:
5148 case 12:
5149 /* Word aligned. */
5150 return 22;
5151
5152 default:
5153 gcc_unreachable ();
5154 }
5155 }
5156 }
5157
5158 if (TARGET_ALTIVEC)
5159 /* Misaligned loads are not supported. */
5160 gcc_unreachable ();
5161
5162 return 2;
5163
5164 case unaligned_store:
5165 case vector_scatter_store:
5166 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5167 return 1;
5168
5169 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5170 {
5171 elements = TYPE_VECTOR_SUBPARTS (vectype);
5172 if (elements == 2)
5173 /* Double word aligned. */
5174 return 2;
5175
5176 if (elements == 4)
5177 {
5178 switch (misalign)
5179 {
5180 case 8:
5181 /* Double word aligned. */
5182 return 2;
5183
5184 case -1:
5185 /* Unknown misalignment. */
5186 case 4:
5187 case 12:
5188 /* Word aligned. */
5189 return 23;
5190
5191 default:
5192 gcc_unreachable ();
5193 }
5194 }
5195 }
5196
5197 if (TARGET_ALTIVEC)
5198 /* Misaligned stores are not supported. */
5199 gcc_unreachable ();
5200
5201 return 2;
5202
5203 case vec_construct:
5204 /* This is a rough approximation assuming non-constant elements
5205 constructed into a vector via element insertion. FIXME:
5206 vec_construct is not granular enough for uniformly good
5207 decisions. If the initialization is a splat, this is
5208 cheaper than we estimate. Improve this someday. */
5209 elem_type = TREE_TYPE (vectype);
5210 /* 32-bit vectors loaded into registers are stored as double
5211 precision, so we need 2 permutes, 2 converts, and 1 merge
5212 to construct a vector of short floats from them. */
5213 if (SCALAR_FLOAT_TYPE_P (elem_type)
5214 && TYPE_PRECISION (elem_type) == 32)
5215 return 5;
5216 /* On POWER9, integer vector types are built up in GPRs and then
5217 use a direct move (2 cycles). For POWER8 this is even worse,
5218 as we need two direct moves and a merge, and the direct moves
5219 are five cycles. */
5220 else if (INTEGRAL_TYPE_P (elem_type))
5221 {
5222 if (TARGET_P9_VECTOR)
5223 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5224 else
5225 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5226 }
5227 else
5228 /* V2DFmode doesn't need a direct move. */
5229 return 2;
5230
5231 default:
5232 gcc_unreachable ();
5233 }
5234 }
5235
5236 /* Implement targetm.vectorize.preferred_simd_mode. */
5237
5238 static machine_mode
5239 rs6000_preferred_simd_mode (scalar_mode mode)
5240 {
5241 if (TARGET_VSX)
5242 switch (mode)
5243 {
5244 case E_DFmode:
5245 return V2DFmode;
5246 default:;
5247 }
5248 if (TARGET_ALTIVEC || TARGET_VSX)
5249 switch (mode)
5250 {
5251 case E_SFmode:
5252 return V4SFmode;
5253 case E_TImode:
5254 return V1TImode;
5255 case E_DImode:
5256 return V2DImode;
5257 case E_SImode:
5258 return V4SImode;
5259 case E_HImode:
5260 return V8HImode;
5261 case E_QImode:
5262 return V16QImode;
5263 default:;
5264 }
5265 return word_mode;
5266 }
5267
5268 typedef struct _rs6000_cost_data
5269 {
5270 struct loop *loop_info;
5271 unsigned cost[3];
5272 } rs6000_cost_data;
5273
5274 /* Test for likely overcommitment of vector hardware resources. If a
5275 loop iteration is relatively large, and too large a percentage of
5276 instructions in the loop are vectorized, the cost model may not
5277 adequately reflect delays from unavailable vector resources.
5278 Penalize the loop body cost for this case. */
5279
5280 static void
5281 rs6000_density_test (rs6000_cost_data *data)
5282 {
5283 const int DENSITY_PCT_THRESHOLD = 85;
5284 const int DENSITY_SIZE_THRESHOLD = 70;
5285 const int DENSITY_PENALTY = 10;
5286 struct loop *loop = data->loop_info;
5287 basic_block *bbs = get_loop_body (loop);
5288 int nbbs = loop->num_nodes;
5289 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5290 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5291 int i, density_pct;
5292
5293 for (i = 0; i < nbbs; i++)
5294 {
5295 basic_block bb = bbs[i];
5296 gimple_stmt_iterator gsi;
5297
5298 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5299 {
5300 gimple *stmt = gsi_stmt (gsi);
5301 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5302
5303 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5304 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5305 not_vec_cost++;
5306 }
5307 }
5308
5309 free (bbs);
5310 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5311
5312 if (density_pct > DENSITY_PCT_THRESHOLD
5313 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5314 {
5315 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5316 if (dump_enabled_p ())
5317 dump_printf_loc (MSG_NOTE, vect_location,
5318 "density %d%%, cost %d exceeds threshold, penalizing "
5319 "loop body cost by %d%%", density_pct,
5320 vec_cost + not_vec_cost, DENSITY_PENALTY);
5321 }
5322 }
5323
5324 /* Implement targetm.vectorize.init_cost. */
5325
5326 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5327 instruction is needed by the vectorization. */
5328 static bool rs6000_vect_nonmem;
5329
5330 static void *
5331 rs6000_init_cost (struct loop *loop_info)
5332 {
5333 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5334 data->loop_info = loop_info;
5335 data->cost[vect_prologue] = 0;
5336 data->cost[vect_body] = 0;
5337 data->cost[vect_epilogue] = 0;
5338 rs6000_vect_nonmem = false;
5339 return data;
5340 }
5341
5342 /* Implement targetm.vectorize.add_stmt_cost. */
5343
5344 static unsigned
5345 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5346 struct _stmt_vec_info *stmt_info, int misalign,
5347 enum vect_cost_model_location where)
5348 {
5349 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5350 unsigned retval = 0;
5351
5352 if (flag_vect_cost_model)
5353 {
5354 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5355 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5356 misalign);
5357 /* Statements in an inner loop relative to the loop being
5358 vectorized are weighted more heavily. The value here is
5359 arbitrary and could potentially be improved with analysis. */
5360 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5361 count *= 50; /* FIXME. */
5362
5363 retval = (unsigned) (count * stmt_cost);
5364 cost_data->cost[where] += retval;
5365
5366 /* Check whether we're doing something other than just a copy loop.
5367 Not all such loops may be profitably vectorized; see
5368 rs6000_finish_cost. */
5369 if ((kind == vec_to_scalar || kind == vec_perm
5370 || kind == vec_promote_demote || kind == vec_construct
5371 || kind == scalar_to_vec)
5372 || (where == vect_body && kind == vector_stmt))
5373 rs6000_vect_nonmem = true;
5374 }
5375
5376 return retval;
5377 }
5378
5379 /* Implement targetm.vectorize.finish_cost. */
5380
5381 static void
5382 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5383 unsigned *body_cost, unsigned *epilogue_cost)
5384 {
5385 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5386
5387 if (cost_data->loop_info)
5388 rs6000_density_test (cost_data);
5389
5390 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5391 that require versioning for any reason. The vectorization is at
5392 best a wash inside the loop, and the versioning checks make
5393 profitability highly unlikely and potentially quite harmful. */
5394 if (cost_data->loop_info)
5395 {
5396 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5397 if (!rs6000_vect_nonmem
5398 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5399 && LOOP_REQUIRES_VERSIONING (vec_info))
5400 cost_data->cost[vect_body] += 10000;
5401 }
5402
5403 *prologue_cost = cost_data->cost[vect_prologue];
5404 *body_cost = cost_data->cost[vect_body];
5405 *epilogue_cost = cost_data->cost[vect_epilogue];
5406 }
5407
5408 /* Implement targetm.vectorize.destroy_cost_data. */
5409
5410 static void
5411 rs6000_destroy_cost_data (void *data)
5412 {
5413 free (data);
5414 }
5415
5416 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5417 library with vectorized intrinsics. */
5418
5419 static tree
5420 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5421 tree type_in)
5422 {
5423 char name[32];
5424 const char *suffix = NULL;
5425 tree fntype, new_fndecl, bdecl = NULL_TREE;
5426 int n_args = 1;
5427 const char *bname;
5428 machine_mode el_mode, in_mode;
5429 int n, in_n;
5430
5431 /* Libmass is suitable for unsafe math only as it does not correctly support
5432 parts of IEEE with the required precision such as denormals. Only support
5433 it if we have VSX to use the simd d2 or f4 functions.
5434 XXX: Add variable length support. */
5435 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5436 return NULL_TREE;
5437
5438 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5439 n = TYPE_VECTOR_SUBPARTS (type_out);
5440 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5441 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5442 if (el_mode != in_mode
5443 || n != in_n)
5444 return NULL_TREE;
5445
5446 switch (fn)
5447 {
5448 CASE_CFN_ATAN2:
5449 CASE_CFN_HYPOT:
5450 CASE_CFN_POW:
5451 n_args = 2;
5452 gcc_fallthrough ();
5453
5454 CASE_CFN_ACOS:
5455 CASE_CFN_ACOSH:
5456 CASE_CFN_ASIN:
5457 CASE_CFN_ASINH:
5458 CASE_CFN_ATAN:
5459 CASE_CFN_ATANH:
5460 CASE_CFN_CBRT:
5461 CASE_CFN_COS:
5462 CASE_CFN_COSH:
5463 CASE_CFN_ERF:
5464 CASE_CFN_ERFC:
5465 CASE_CFN_EXP2:
5466 CASE_CFN_EXP:
5467 CASE_CFN_EXPM1:
5468 CASE_CFN_LGAMMA:
5469 CASE_CFN_LOG10:
5470 CASE_CFN_LOG1P:
5471 CASE_CFN_LOG2:
5472 CASE_CFN_LOG:
5473 CASE_CFN_SIN:
5474 CASE_CFN_SINH:
5475 CASE_CFN_SQRT:
5476 CASE_CFN_TAN:
5477 CASE_CFN_TANH:
5478 if (el_mode == DFmode && n == 2)
5479 {
5480 bdecl = mathfn_built_in (double_type_node, fn);
5481 suffix = "d2"; /* pow -> powd2 */
5482 }
5483 else if (el_mode == SFmode && n == 4)
5484 {
5485 bdecl = mathfn_built_in (float_type_node, fn);
5486 suffix = "4"; /* powf -> powf4 */
5487 }
5488 else
5489 return NULL_TREE;
5490 if (!bdecl)
5491 return NULL_TREE;
5492 break;
5493
5494 default:
5495 return NULL_TREE;
5496 }
5497
5498 gcc_assert (suffix != NULL);
5499 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5500 if (!bname)
5501 return NULL_TREE;
5502
5503 strcpy (name, bname + sizeof ("__builtin_") - 1);
5504 strcat (name, suffix);
5505
5506 if (n_args == 1)
5507 fntype = build_function_type_list (type_out, type_in, NULL);
5508 else if (n_args == 2)
5509 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5510 else
5511 gcc_unreachable ();
5512
5513 /* Build a function declaration for the vectorized function. */
5514 new_fndecl = build_decl (BUILTINS_LOCATION,
5515 FUNCTION_DECL, get_identifier (name), fntype);
5516 TREE_PUBLIC (new_fndecl) = 1;
5517 DECL_EXTERNAL (new_fndecl) = 1;
5518 DECL_IS_NOVOPS (new_fndecl) = 1;
5519 TREE_READONLY (new_fndecl) = 1;
5520
5521 return new_fndecl;
5522 }
5523
5524 /* Returns a function decl for a vectorized version of the builtin function
5525 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5526 if it is not available. */
5527
5528 static tree
5529 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5530 tree type_in)
5531 {
5532 machine_mode in_mode, out_mode;
5533 int in_n, out_n;
5534
5535 if (TARGET_DEBUG_BUILTIN)
5536 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5537 combined_fn_name (combined_fn (fn)),
5538 GET_MODE_NAME (TYPE_MODE (type_out)),
5539 GET_MODE_NAME (TYPE_MODE (type_in)));
5540
5541 if (TREE_CODE (type_out) != VECTOR_TYPE
5542 || TREE_CODE (type_in) != VECTOR_TYPE)
5543 return NULL_TREE;
5544
5545 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5546 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5547 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5548 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5549
5550 switch (fn)
5551 {
5552 CASE_CFN_COPYSIGN:
5553 if (VECTOR_UNIT_VSX_P (V2DFmode)
5554 && out_mode == DFmode && out_n == 2
5555 && in_mode == DFmode && in_n == 2)
5556 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5557 if (VECTOR_UNIT_VSX_P (V4SFmode)
5558 && out_mode == SFmode && out_n == 4
5559 && in_mode == SFmode && in_n == 4)
5560 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5561 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5562 && out_mode == SFmode && out_n == 4
5563 && in_mode == SFmode && in_n == 4)
5564 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5565 break;
5566 CASE_CFN_CEIL:
5567 if (VECTOR_UNIT_VSX_P (V2DFmode)
5568 && out_mode == DFmode && out_n == 2
5569 && in_mode == DFmode && in_n == 2)
5570 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5571 if (VECTOR_UNIT_VSX_P (V4SFmode)
5572 && out_mode == SFmode && out_n == 4
5573 && in_mode == SFmode && in_n == 4)
5574 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5575 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5576 && out_mode == SFmode && out_n == 4
5577 && in_mode == SFmode && in_n == 4)
5578 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5579 break;
5580 CASE_CFN_FLOOR:
5581 if (VECTOR_UNIT_VSX_P (V2DFmode)
5582 && out_mode == DFmode && out_n == 2
5583 && in_mode == DFmode && in_n == 2)
5584 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5585 if (VECTOR_UNIT_VSX_P (V4SFmode)
5586 && out_mode == SFmode && out_n == 4
5587 && in_mode == SFmode && in_n == 4)
5588 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5589 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5590 && out_mode == SFmode && out_n == 4
5591 && in_mode == SFmode && in_n == 4)
5592 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5593 break;
5594 CASE_CFN_FMA:
5595 if (VECTOR_UNIT_VSX_P (V2DFmode)
5596 && out_mode == DFmode && out_n == 2
5597 && in_mode == DFmode && in_n == 2)
5598 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5599 if (VECTOR_UNIT_VSX_P (V4SFmode)
5600 && out_mode == SFmode && out_n == 4
5601 && in_mode == SFmode && in_n == 4)
5602 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5603 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5604 && out_mode == SFmode && out_n == 4
5605 && in_mode == SFmode && in_n == 4)
5606 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5607 break;
5608 CASE_CFN_TRUNC:
5609 if (VECTOR_UNIT_VSX_P (V2DFmode)
5610 && out_mode == DFmode && out_n == 2
5611 && in_mode == DFmode && in_n == 2)
5612 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5613 if (VECTOR_UNIT_VSX_P (V4SFmode)
5614 && out_mode == SFmode && out_n == 4
5615 && in_mode == SFmode && in_n == 4)
5616 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5617 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5618 && out_mode == SFmode && out_n == 4
5619 && in_mode == SFmode && in_n == 4)
5620 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5621 break;
5622 CASE_CFN_NEARBYINT:
5623 if (VECTOR_UNIT_VSX_P (V2DFmode)
5624 && flag_unsafe_math_optimizations
5625 && out_mode == DFmode && out_n == 2
5626 && in_mode == DFmode && in_n == 2)
5627 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5628 if (VECTOR_UNIT_VSX_P (V4SFmode)
5629 && flag_unsafe_math_optimizations
5630 && out_mode == SFmode && out_n == 4
5631 && in_mode == SFmode && in_n == 4)
5632 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5633 break;
5634 CASE_CFN_RINT:
5635 if (VECTOR_UNIT_VSX_P (V2DFmode)
5636 && !flag_trapping_math
5637 && out_mode == DFmode && out_n == 2
5638 && in_mode == DFmode && in_n == 2)
5639 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5640 if (VECTOR_UNIT_VSX_P (V4SFmode)
5641 && !flag_trapping_math
5642 && out_mode == SFmode && out_n == 4
5643 && in_mode == SFmode && in_n == 4)
5644 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5645 break;
5646 default:
5647 break;
5648 }
5649
5650 /* Generate calls to libmass if appropriate. */
5651 if (rs6000_veclib_handler)
5652 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5653
5654 return NULL_TREE;
5655 }
5656
5657 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5658
5659 static tree
5660 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5661 tree type_in)
5662 {
5663 machine_mode in_mode, out_mode;
5664 int in_n, out_n;
5665
5666 if (TARGET_DEBUG_BUILTIN)
5667 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5668 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5669 GET_MODE_NAME (TYPE_MODE (type_out)),
5670 GET_MODE_NAME (TYPE_MODE (type_in)));
5671
5672 if (TREE_CODE (type_out) != VECTOR_TYPE
5673 || TREE_CODE (type_in) != VECTOR_TYPE)
5674 return NULL_TREE;
5675
5676 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5677 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5678 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5679 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5680
5681 enum rs6000_builtins fn
5682 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5683 switch (fn)
5684 {
5685 case RS6000_BUILTIN_RSQRTF:
5686 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5687 && out_mode == SFmode && out_n == 4
5688 && in_mode == SFmode && in_n == 4)
5689 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5690 break;
5691 case RS6000_BUILTIN_RSQRT:
5692 if (VECTOR_UNIT_VSX_P (V2DFmode)
5693 && out_mode == DFmode && out_n == 2
5694 && in_mode == DFmode && in_n == 2)
5695 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5696 break;
5697 case RS6000_BUILTIN_RECIPF:
5698 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5699 && out_mode == SFmode && out_n == 4
5700 && in_mode == SFmode && in_n == 4)
5701 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5702 break;
5703 case RS6000_BUILTIN_RECIP:
5704 if (VECTOR_UNIT_VSX_P (V2DFmode)
5705 && out_mode == DFmode && out_n == 2
5706 && in_mode == DFmode && in_n == 2)
5707 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5708 break;
5709 default:
5710 break;
5711 }
5712 return NULL_TREE;
5713 }
5714 \f
5715 /* Default CPU string for rs6000*_file_start functions. */
5716 static const char *rs6000_default_cpu;
5717
5718 /* Do anything needed at the start of the asm file. */
5719
5720 static void
5721 rs6000_file_start (void)
5722 {
5723 char buffer[80];
5724 const char *start = buffer;
5725 FILE *file = asm_out_file;
5726
5727 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5728
5729 default_file_start ();
5730
5731 if (flag_verbose_asm)
5732 {
5733 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5734
5735 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5736 {
5737 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5738 start = "";
5739 }
5740
5741 if (global_options_set.x_rs6000_cpu_index)
5742 {
5743 fprintf (file, "%s -mcpu=%s", start,
5744 processor_target_table[rs6000_cpu_index].name);
5745 start = "";
5746 }
5747
5748 if (global_options_set.x_rs6000_tune_index)
5749 {
5750 fprintf (file, "%s -mtune=%s", start,
5751 processor_target_table[rs6000_tune_index].name);
5752 start = "";
5753 }
5754
5755 if (PPC405_ERRATUM77)
5756 {
5757 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5758 start = "";
5759 }
5760
5761 #ifdef USING_ELFOS_H
5762 switch (rs6000_sdata)
5763 {
5764 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5765 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5766 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5767 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5768 }
5769
5770 if (rs6000_sdata && g_switch_value)
5771 {
5772 fprintf (file, "%s -G %d", start,
5773 g_switch_value);
5774 start = "";
5775 }
5776 #endif
5777
5778 if (*start == '\0')
5779 putc ('\n', file);
5780 }
5781
5782 #ifdef USING_ELFOS_H
5783 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5784 && !global_options_set.x_rs6000_cpu_index)
5785 {
5786 fputs ("\t.machine ", asm_out_file);
5787 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5788 fputs ("power9\n", asm_out_file);
5789 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5790 fputs ("power8\n", asm_out_file);
5791 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5792 fputs ("power7\n", asm_out_file);
5793 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5794 fputs ("power6\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5796 fputs ("power5\n", asm_out_file);
5797 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5798 fputs ("power4\n", asm_out_file);
5799 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5800 fputs ("ppc64\n", asm_out_file);
5801 else
5802 fputs ("ppc\n", asm_out_file);
5803 }
5804 #endif
5805
5806 if (DEFAULT_ABI == ABI_ELFv2)
5807 fprintf (file, "\t.abiversion 2\n");
5808 }
5809
5810 \f
5811 /* Return nonzero if this function is known to have a null epilogue. */
5812
5813 int
5814 direct_return (void)
5815 {
5816 if (reload_completed)
5817 {
5818 rs6000_stack_t *info = rs6000_stack_info ();
5819
5820 if (info->first_gp_reg_save == 32
5821 && info->first_fp_reg_save == 64
5822 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5823 && ! info->lr_save_p
5824 && ! info->cr_save_p
5825 && info->vrsave_size == 0
5826 && ! info->push_p)
5827 return 1;
5828 }
5829
5830 return 0;
5831 }
5832
5833 /* Helper for num_insns_constant. Calculate number of instructions to
5834 load VALUE to a single gpr using combinations of addi, addis, ori,
5835 oris and sldi instructions. */
5836
5837 static int
5838 num_insns_constant_gpr (HOST_WIDE_INT value)
5839 {
5840 /* signed constant loadable with addi */
5841 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5842 return 1;
5843
5844 /* constant loadable with addis */
5845 else if ((value & 0xffff) == 0
5846 && (value >> 31 == -1 || value >> 31 == 0))
5847 return 1;
5848
5849 else if (TARGET_POWERPC64)
5850 {
5851 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5852 HOST_WIDE_INT high = value >> 31;
5853
5854 if (high == 0 || high == -1)
5855 return 2;
5856
5857 high >>= 1;
5858
5859 if (low == 0)
5860 return num_insns_constant_gpr (high) + 1;
5861 else if (high == 0)
5862 return num_insns_constant_gpr (low) + 1;
5863 else
5864 return (num_insns_constant_gpr (high)
5865 + num_insns_constant_gpr (low) + 1);
5866 }
5867
5868 else
5869 return 2;
5870 }
5871
5872 /* Helper for num_insns_constant. Allow constants formed by the
5873 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5874 and handle modes that require multiple gprs. */
5875
5876 static int
5877 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5878 {
5879 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5880 int total = 0;
5881 while (nregs-- > 0)
5882 {
5883 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5884 int insns = num_insns_constant_gpr (low);
5885 if (insns > 2
5886 /* We won't get more than 2 from num_insns_constant_gpr
5887 except when TARGET_POWERPC64 and mode is DImode or
5888 wider, so the register mode must be DImode. */
5889 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5890 insns = 2;
5891 total += insns;
5892 value >>= BITS_PER_WORD;
5893 }
5894 return total;
5895 }
5896
5897 /* Return the number of instructions it takes to form a constant in as
5898 many gprs are needed for MODE. */
5899
5900 int
5901 num_insns_constant (rtx op, machine_mode mode)
5902 {
5903 HOST_WIDE_INT val;
5904
5905 switch (GET_CODE (op))
5906 {
5907 case CONST_INT:
5908 val = INTVAL (op);
5909 break;
5910
5911 case CONST_WIDE_INT:
5912 {
5913 int insns = 0;
5914 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5915 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5916 DImode);
5917 return insns;
5918 }
5919
5920 case CONST_DOUBLE:
5921 {
5922 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5923
5924 if (mode == SFmode || mode == SDmode)
5925 {
5926 long l;
5927
5928 if (mode == SDmode)
5929 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5930 else
5931 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5932 /* See the first define_split in rs6000.md handling a
5933 const_double_operand. */
5934 val = l;
5935 mode = SImode;
5936 }
5937 else if (mode == DFmode || mode == DDmode)
5938 {
5939 long l[2];
5940
5941 if (mode == DDmode)
5942 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5943 else
5944 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5945
5946 /* See the second (32-bit) and third (64-bit) define_split
5947 in rs6000.md handling a const_double_operand. */
5948 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5949 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5950 mode = DImode;
5951 }
5952 else if (mode == TFmode || mode == TDmode
5953 || mode == KFmode || mode == IFmode)
5954 {
5955 long l[4];
5956 int insns;
5957
5958 if (mode == TDmode)
5959 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5960 else
5961 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5962
5963 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5964 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5965 insns = num_insns_constant_multi (val, DImode);
5966 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5967 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5968 insns += num_insns_constant_multi (val, DImode);
5969 return insns;
5970 }
5971 else
5972 gcc_unreachable ();
5973 }
5974 break;
5975
5976 default:
5977 gcc_unreachable ();
5978 }
5979
5980 return num_insns_constant_multi (val, mode);
5981 }
5982
5983 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5984 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5985 corresponding element of the vector, but for V4SFmode, the
5986 corresponding "float" is interpreted as an SImode integer. */
5987
5988 HOST_WIDE_INT
5989 const_vector_elt_as_int (rtx op, unsigned int elt)
5990 {
5991 rtx tmp;
5992
5993 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5994 gcc_assert (GET_MODE (op) != V2DImode
5995 && GET_MODE (op) != V2DFmode);
5996
5997 tmp = CONST_VECTOR_ELT (op, elt);
5998 if (GET_MODE (op) == V4SFmode)
5999 tmp = gen_lowpart (SImode, tmp);
6000 return INTVAL (tmp);
6001 }
6002
6003 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6004 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6005 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6006 all items are set to the same value and contain COPIES replicas of the
6007 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6008 operand and the others are set to the value of the operand's msb. */
6009
6010 static bool
6011 vspltis_constant (rtx op, unsigned step, unsigned copies)
6012 {
6013 machine_mode mode = GET_MODE (op);
6014 machine_mode inner = GET_MODE_INNER (mode);
6015
6016 unsigned i;
6017 unsigned nunits;
6018 unsigned bitsize;
6019 unsigned mask;
6020
6021 HOST_WIDE_INT val;
6022 HOST_WIDE_INT splat_val;
6023 HOST_WIDE_INT msb_val;
6024
6025 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6026 return false;
6027
6028 nunits = GET_MODE_NUNITS (mode);
6029 bitsize = GET_MODE_BITSIZE (inner);
6030 mask = GET_MODE_MASK (inner);
6031
6032 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6033 splat_val = val;
6034 msb_val = val >= 0 ? 0 : -1;
6035
6036 /* Construct the value to be splatted, if possible. If not, return 0. */
6037 for (i = 2; i <= copies; i *= 2)
6038 {
6039 HOST_WIDE_INT small_val;
6040 bitsize /= 2;
6041 small_val = splat_val >> bitsize;
6042 mask >>= bitsize;
6043 if (splat_val != ((HOST_WIDE_INT)
6044 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6045 | (small_val & mask)))
6046 return false;
6047 splat_val = small_val;
6048 }
6049
6050 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6051 if (EASY_VECTOR_15 (splat_val))
6052 ;
6053
6054 /* Also check if we can splat, and then add the result to itself. Do so if
6055 the value is positive, of if the splat instruction is using OP's mode;
6056 for splat_val < 0, the splat and the add should use the same mode. */
6057 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6058 && (splat_val >= 0 || (step == 1 && copies == 1)))
6059 ;
6060
6061 /* Also check if are loading up the most significant bit which can be done by
6062 loading up -1 and shifting the value left by -1. */
6063 else if (EASY_VECTOR_MSB (splat_val, inner))
6064 ;
6065
6066 else
6067 return false;
6068
6069 /* Check if VAL is present in every STEP-th element, and the
6070 other elements are filled with its most significant bit. */
6071 for (i = 1; i < nunits; ++i)
6072 {
6073 HOST_WIDE_INT desired_val;
6074 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6075 if ((i & (step - 1)) == 0)
6076 desired_val = val;
6077 else
6078 desired_val = msb_val;
6079
6080 if (desired_val != const_vector_elt_as_int (op, elt))
6081 return false;
6082 }
6083
6084 return true;
6085 }
6086
6087 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6088 instruction, filling in the bottom elements with 0 or -1.
6089
6090 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6091 for the number of zeroes to shift in, or negative for the number of 0xff
6092 bytes to shift in.
6093
6094 OP is a CONST_VECTOR. */
6095
6096 int
6097 vspltis_shifted (rtx op)
6098 {
6099 machine_mode mode = GET_MODE (op);
6100 machine_mode inner = GET_MODE_INNER (mode);
6101
6102 unsigned i, j;
6103 unsigned nunits;
6104 unsigned mask;
6105
6106 HOST_WIDE_INT val;
6107
6108 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6109 return false;
6110
6111 /* We need to create pseudo registers to do the shift, so don't recognize
6112 shift vector constants after reload. */
6113 if (!can_create_pseudo_p ())
6114 return false;
6115
6116 nunits = GET_MODE_NUNITS (mode);
6117 mask = GET_MODE_MASK (inner);
6118
6119 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6120
6121 /* Check if the value can really be the operand of a vspltis[bhw]. */
6122 if (EASY_VECTOR_15 (val))
6123 ;
6124
6125 /* Also check if we are loading up the most significant bit which can be done
6126 by loading up -1 and shifting the value left by -1. */
6127 else if (EASY_VECTOR_MSB (val, inner))
6128 ;
6129
6130 else
6131 return 0;
6132
6133 /* Check if VAL is present in every STEP-th element until we find elements
6134 that are 0 or all 1 bits. */
6135 for (i = 1; i < nunits; ++i)
6136 {
6137 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6138 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6139
6140 /* If the value isn't the splat value, check for the remaining elements
6141 being 0/-1. */
6142 if (val != elt_val)
6143 {
6144 if (elt_val == 0)
6145 {
6146 for (j = i+1; j < nunits; ++j)
6147 {
6148 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6149 if (const_vector_elt_as_int (op, elt2) != 0)
6150 return 0;
6151 }
6152
6153 return (nunits - i) * GET_MODE_SIZE (inner);
6154 }
6155
6156 else if ((elt_val & mask) == mask)
6157 {
6158 for (j = i+1; j < nunits; ++j)
6159 {
6160 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6161 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6162 return 0;
6163 }
6164
6165 return -((nunits - i) * GET_MODE_SIZE (inner));
6166 }
6167
6168 else
6169 return 0;
6170 }
6171 }
6172
6173 /* If all elements are equal, we don't need to do VLSDOI. */
6174 return 0;
6175 }
6176
6177
6178 /* Return true if OP is of the given MODE and can be synthesized
6179 with a vspltisb, vspltish or vspltisw. */
6180
6181 bool
6182 easy_altivec_constant (rtx op, machine_mode mode)
6183 {
6184 unsigned step, copies;
6185
6186 if (mode == VOIDmode)
6187 mode = GET_MODE (op);
6188 else if (mode != GET_MODE (op))
6189 return false;
6190
6191 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6192 constants. */
6193 if (mode == V2DFmode)
6194 return zero_constant (op, mode);
6195
6196 else if (mode == V2DImode)
6197 {
6198 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6199 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6200 return false;
6201
6202 if (zero_constant (op, mode))
6203 return true;
6204
6205 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6206 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6207 return true;
6208
6209 return false;
6210 }
6211
6212 /* V1TImode is a special container for TImode. Ignore for now. */
6213 else if (mode == V1TImode)
6214 return false;
6215
6216 /* Start with a vspltisw. */
6217 step = GET_MODE_NUNITS (mode) / 4;
6218 copies = 1;
6219
6220 if (vspltis_constant (op, step, copies))
6221 return true;
6222
6223 /* Then try with a vspltish. */
6224 if (step == 1)
6225 copies <<= 1;
6226 else
6227 step >>= 1;
6228
6229 if (vspltis_constant (op, step, copies))
6230 return true;
6231
6232 /* And finally a vspltisb. */
6233 if (step == 1)
6234 copies <<= 1;
6235 else
6236 step >>= 1;
6237
6238 if (vspltis_constant (op, step, copies))
6239 return true;
6240
6241 if (vspltis_shifted (op) != 0)
6242 return true;
6243
6244 return false;
6245 }
6246
6247 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6248 result is OP. Abort if it is not possible. */
6249
6250 rtx
6251 gen_easy_altivec_constant (rtx op)
6252 {
6253 machine_mode mode = GET_MODE (op);
6254 int nunits = GET_MODE_NUNITS (mode);
6255 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6256 unsigned step = nunits / 4;
6257 unsigned copies = 1;
6258
6259 /* Start with a vspltisw. */
6260 if (vspltis_constant (op, step, copies))
6261 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6262
6263 /* Then try with a vspltish. */
6264 if (step == 1)
6265 copies <<= 1;
6266 else
6267 step >>= 1;
6268
6269 if (vspltis_constant (op, step, copies))
6270 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6271
6272 /* And finally a vspltisb. */
6273 if (step == 1)
6274 copies <<= 1;
6275 else
6276 step >>= 1;
6277
6278 if (vspltis_constant (op, step, copies))
6279 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6280
6281 gcc_unreachable ();
6282 }
6283
6284 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6285 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6286
6287 Return the number of instructions needed (1 or 2) into the address pointed
6288 via NUM_INSNS_PTR.
6289
6290 Return the constant that is being split via CONSTANT_PTR. */
6291
6292 bool
6293 xxspltib_constant_p (rtx op,
6294 machine_mode mode,
6295 int *num_insns_ptr,
6296 int *constant_ptr)
6297 {
6298 size_t nunits = GET_MODE_NUNITS (mode);
6299 size_t i;
6300 HOST_WIDE_INT value;
6301 rtx element;
6302
6303 /* Set the returned values to out of bound values. */
6304 *num_insns_ptr = -1;
6305 *constant_ptr = 256;
6306
6307 if (!TARGET_P9_VECTOR)
6308 return false;
6309
6310 if (mode == VOIDmode)
6311 mode = GET_MODE (op);
6312
6313 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6314 return false;
6315
6316 /* Handle (vec_duplicate <constant>). */
6317 if (GET_CODE (op) == VEC_DUPLICATE)
6318 {
6319 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6320 && mode != V2DImode)
6321 return false;
6322
6323 element = XEXP (op, 0);
6324 if (!CONST_INT_P (element))
6325 return false;
6326
6327 value = INTVAL (element);
6328 if (!IN_RANGE (value, -128, 127))
6329 return false;
6330 }
6331
6332 /* Handle (const_vector [...]). */
6333 else if (GET_CODE (op) == CONST_VECTOR)
6334 {
6335 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6336 && mode != V2DImode)
6337 return false;
6338
6339 element = CONST_VECTOR_ELT (op, 0);
6340 if (!CONST_INT_P (element))
6341 return false;
6342
6343 value = INTVAL (element);
6344 if (!IN_RANGE (value, -128, 127))
6345 return false;
6346
6347 for (i = 1; i < nunits; i++)
6348 {
6349 element = CONST_VECTOR_ELT (op, i);
6350 if (!CONST_INT_P (element))
6351 return false;
6352
6353 if (value != INTVAL (element))
6354 return false;
6355 }
6356 }
6357
6358 /* Handle integer constants being loaded into the upper part of the VSX
6359 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6360 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6361 else if (CONST_INT_P (op))
6362 {
6363 if (!SCALAR_INT_MODE_P (mode))
6364 return false;
6365
6366 value = INTVAL (op);
6367 if (!IN_RANGE (value, -128, 127))
6368 return false;
6369
6370 if (!IN_RANGE (value, -1, 0))
6371 {
6372 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6373 return false;
6374
6375 if (EASY_VECTOR_15 (value))
6376 return false;
6377 }
6378 }
6379
6380 else
6381 return false;
6382
6383 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6384 sign extend. Special case 0/-1 to allow getting any VSX register instead
6385 of an Altivec register. */
6386 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6387 && EASY_VECTOR_15 (value))
6388 return false;
6389
6390 /* Return # of instructions and the constant byte for XXSPLTIB. */
6391 if (mode == V16QImode)
6392 *num_insns_ptr = 1;
6393
6394 else if (IN_RANGE (value, -1, 0))
6395 *num_insns_ptr = 1;
6396
6397 else
6398 *num_insns_ptr = 2;
6399
6400 *constant_ptr = (int) value;
6401 return true;
6402 }
6403
6404 const char *
6405 output_vec_const_move (rtx *operands)
6406 {
6407 int shift;
6408 machine_mode mode;
6409 rtx dest, vec;
6410
6411 dest = operands[0];
6412 vec = operands[1];
6413 mode = GET_MODE (dest);
6414
6415 if (TARGET_VSX)
6416 {
6417 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6418 int xxspltib_value = 256;
6419 int num_insns = -1;
6420
6421 if (zero_constant (vec, mode))
6422 {
6423 if (TARGET_P9_VECTOR)
6424 return "xxspltib %x0,0";
6425
6426 else if (dest_vmx_p)
6427 return "vspltisw %0,0";
6428
6429 else
6430 return "xxlxor %x0,%x0,%x0";
6431 }
6432
6433 if (all_ones_constant (vec, mode))
6434 {
6435 if (TARGET_P9_VECTOR)
6436 return "xxspltib %x0,255";
6437
6438 else if (dest_vmx_p)
6439 return "vspltisw %0,-1";
6440
6441 else if (TARGET_P8_VECTOR)
6442 return "xxlorc %x0,%x0,%x0";
6443
6444 else
6445 gcc_unreachable ();
6446 }
6447
6448 if (TARGET_P9_VECTOR
6449 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6450 {
6451 if (num_insns == 1)
6452 {
6453 operands[2] = GEN_INT (xxspltib_value & 0xff);
6454 return "xxspltib %x0,%2";
6455 }
6456
6457 return "#";
6458 }
6459 }
6460
6461 if (TARGET_ALTIVEC)
6462 {
6463 rtx splat_vec;
6464
6465 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6466 if (zero_constant (vec, mode))
6467 return "vspltisw %0,0";
6468
6469 if (all_ones_constant (vec, mode))
6470 return "vspltisw %0,-1";
6471
6472 /* Do we need to construct a value using VSLDOI? */
6473 shift = vspltis_shifted (vec);
6474 if (shift != 0)
6475 return "#";
6476
6477 splat_vec = gen_easy_altivec_constant (vec);
6478 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6479 operands[1] = XEXP (splat_vec, 0);
6480 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6481 return "#";
6482
6483 switch (GET_MODE (splat_vec))
6484 {
6485 case E_V4SImode:
6486 return "vspltisw %0,%1";
6487
6488 case E_V8HImode:
6489 return "vspltish %0,%1";
6490
6491 case E_V16QImode:
6492 return "vspltisb %0,%1";
6493
6494 default:
6495 gcc_unreachable ();
6496 }
6497 }
6498
6499 gcc_unreachable ();
6500 }
6501
6502 /* Initialize vector TARGET to VALS. */
6503
6504 void
6505 rs6000_expand_vector_init (rtx target, rtx vals)
6506 {
6507 machine_mode mode = GET_MODE (target);
6508 machine_mode inner_mode = GET_MODE_INNER (mode);
6509 int n_elts = GET_MODE_NUNITS (mode);
6510 int n_var = 0, one_var = -1;
6511 bool all_same = true, all_const_zero = true;
6512 rtx x, mem;
6513 int i;
6514
6515 for (i = 0; i < n_elts; ++i)
6516 {
6517 x = XVECEXP (vals, 0, i);
6518 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6519 ++n_var, one_var = i;
6520 else if (x != CONST0_RTX (inner_mode))
6521 all_const_zero = false;
6522
6523 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6524 all_same = false;
6525 }
6526
6527 if (n_var == 0)
6528 {
6529 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6530 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6531 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6532 {
6533 /* Zero register. */
6534 emit_move_insn (target, CONST0_RTX (mode));
6535 return;
6536 }
6537 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6538 {
6539 /* Splat immediate. */
6540 emit_insn (gen_rtx_SET (target, const_vec));
6541 return;
6542 }
6543 else
6544 {
6545 /* Load from constant pool. */
6546 emit_move_insn (target, const_vec);
6547 return;
6548 }
6549 }
6550
6551 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6552 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6553 {
6554 rtx op[2];
6555 size_t i;
6556 size_t num_elements = all_same ? 1 : 2;
6557 for (i = 0; i < num_elements; i++)
6558 {
6559 op[i] = XVECEXP (vals, 0, i);
6560 /* Just in case there is a SUBREG with a smaller mode, do a
6561 conversion. */
6562 if (GET_MODE (op[i]) != inner_mode)
6563 {
6564 rtx tmp = gen_reg_rtx (inner_mode);
6565 convert_move (tmp, op[i], 0);
6566 op[i] = tmp;
6567 }
6568 /* Allow load with splat double word. */
6569 else if (MEM_P (op[i]))
6570 {
6571 if (!all_same)
6572 op[i] = force_reg (inner_mode, op[i]);
6573 }
6574 else if (!REG_P (op[i]))
6575 op[i] = force_reg (inner_mode, op[i]);
6576 }
6577
6578 if (all_same)
6579 {
6580 if (mode == V2DFmode)
6581 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6582 else
6583 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6584 }
6585 else
6586 {
6587 if (mode == V2DFmode)
6588 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6589 else
6590 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6591 }
6592 return;
6593 }
6594
6595 /* Special case initializing vector int if we are on 64-bit systems with
6596 direct move or we have the ISA 3.0 instructions. */
6597 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6598 && TARGET_DIRECT_MOVE_64BIT)
6599 {
6600 if (all_same)
6601 {
6602 rtx element0 = XVECEXP (vals, 0, 0);
6603 if (MEM_P (element0))
6604 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6605 else
6606 element0 = force_reg (SImode, element0);
6607
6608 if (TARGET_P9_VECTOR)
6609 emit_insn (gen_vsx_splat_v4si (target, element0));
6610 else
6611 {
6612 rtx tmp = gen_reg_rtx (DImode);
6613 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6614 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6615 }
6616 return;
6617 }
6618 else
6619 {
6620 rtx elements[4];
6621 size_t i;
6622
6623 for (i = 0; i < 4; i++)
6624 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6625
6626 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6627 elements[2], elements[3]));
6628 return;
6629 }
6630 }
6631
6632 /* With single precision floating point on VSX, know that internally single
6633 precision is actually represented as a double, and either make 2 V2DF
6634 vectors, and convert these vectors to single precision, or do one
6635 conversion, and splat the result to the other elements. */
6636 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6637 {
6638 if (all_same)
6639 {
6640 rtx element0 = XVECEXP (vals, 0, 0);
6641
6642 if (TARGET_P9_VECTOR)
6643 {
6644 if (MEM_P (element0))
6645 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6646
6647 emit_insn (gen_vsx_splat_v4sf (target, element0));
6648 }
6649
6650 else
6651 {
6652 rtx freg = gen_reg_rtx (V4SFmode);
6653 rtx sreg = force_reg (SFmode, element0);
6654 rtx cvt = (TARGET_XSCVDPSPN
6655 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6656 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6657
6658 emit_insn (cvt);
6659 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6660 const0_rtx));
6661 }
6662 }
6663 else
6664 {
6665 rtx dbl_even = gen_reg_rtx (V2DFmode);
6666 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6667 rtx flt_even = gen_reg_rtx (V4SFmode);
6668 rtx flt_odd = gen_reg_rtx (V4SFmode);
6669 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6670 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6671 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6672 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6673
6674 /* Use VMRGEW if we can instead of doing a permute. */
6675 if (TARGET_P8_VECTOR)
6676 {
6677 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6678 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6679 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6680 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6681 if (BYTES_BIG_ENDIAN)
6682 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6683 else
6684 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6685 }
6686 else
6687 {
6688 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6689 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6690 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6691 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6692 rs6000_expand_extract_even (target, flt_even, flt_odd);
6693 }
6694 }
6695 return;
6696 }
6697
6698 /* Special case initializing vector short/char that are splats if we are on
6699 64-bit systems with direct move. */
6700 if (all_same && TARGET_DIRECT_MOVE_64BIT
6701 && (mode == V16QImode || mode == V8HImode))
6702 {
6703 rtx op0 = XVECEXP (vals, 0, 0);
6704 rtx di_tmp = gen_reg_rtx (DImode);
6705
6706 if (!REG_P (op0))
6707 op0 = force_reg (GET_MODE_INNER (mode), op0);
6708
6709 if (mode == V16QImode)
6710 {
6711 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6712 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6713 return;
6714 }
6715
6716 if (mode == V8HImode)
6717 {
6718 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6719 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6720 return;
6721 }
6722 }
6723
6724 /* Store value to stack temp. Load vector element. Splat. However, splat
6725 of 64-bit items is not supported on Altivec. */
6726 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6727 {
6728 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6729 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6730 XVECEXP (vals, 0, 0));
6731 x = gen_rtx_UNSPEC (VOIDmode,
6732 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6733 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6734 gen_rtvec (2,
6735 gen_rtx_SET (target, mem),
6736 x)));
6737 x = gen_rtx_VEC_SELECT (inner_mode, target,
6738 gen_rtx_PARALLEL (VOIDmode,
6739 gen_rtvec (1, const0_rtx)));
6740 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6741 return;
6742 }
6743
6744 /* One field is non-constant. Load constant then overwrite
6745 varying field. */
6746 if (n_var == 1)
6747 {
6748 rtx copy = copy_rtx (vals);
6749
6750 /* Load constant part of vector, substitute neighboring value for
6751 varying element. */
6752 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6753 rs6000_expand_vector_init (target, copy);
6754
6755 /* Insert variable. */
6756 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6757 return;
6758 }
6759
6760 /* Construct the vector in memory one field at a time
6761 and load the whole vector. */
6762 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6763 for (i = 0; i < n_elts; i++)
6764 emit_move_insn (adjust_address_nv (mem, inner_mode,
6765 i * GET_MODE_SIZE (inner_mode)),
6766 XVECEXP (vals, 0, i));
6767 emit_move_insn (target, mem);
6768 }
6769
6770 /* Set field ELT of TARGET to VAL. */
6771
6772 void
6773 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6774 {
6775 machine_mode mode = GET_MODE (target);
6776 machine_mode inner_mode = GET_MODE_INNER (mode);
6777 rtx reg = gen_reg_rtx (mode);
6778 rtx mask, mem, x;
6779 int width = GET_MODE_SIZE (inner_mode);
6780 int i;
6781
6782 val = force_reg (GET_MODE (val), val);
6783
6784 if (VECTOR_MEM_VSX_P (mode))
6785 {
6786 rtx insn = NULL_RTX;
6787 rtx elt_rtx = GEN_INT (elt);
6788
6789 if (mode == V2DFmode)
6790 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6791
6792 else if (mode == V2DImode)
6793 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6794
6795 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6796 {
6797 if (mode == V4SImode)
6798 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6799 else if (mode == V8HImode)
6800 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6801 else if (mode == V16QImode)
6802 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6803 else if (mode == V4SFmode)
6804 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6805 }
6806
6807 if (insn)
6808 {
6809 emit_insn (insn);
6810 return;
6811 }
6812 }
6813
6814 /* Simplify setting single element vectors like V1TImode. */
6815 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6816 {
6817 emit_move_insn (target, gen_lowpart (mode, val));
6818 return;
6819 }
6820
6821 /* Load single variable value. */
6822 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6823 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6824 x = gen_rtx_UNSPEC (VOIDmode,
6825 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6826 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6827 gen_rtvec (2,
6828 gen_rtx_SET (reg, mem),
6829 x)));
6830
6831 /* Linear sequence. */
6832 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6833 for (i = 0; i < 16; ++i)
6834 XVECEXP (mask, 0, i) = GEN_INT (i);
6835
6836 /* Set permute mask to insert element into target. */
6837 for (i = 0; i < width; ++i)
6838 XVECEXP (mask, 0, elt*width + i)
6839 = GEN_INT (i + 0x10);
6840 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6841
6842 if (BYTES_BIG_ENDIAN)
6843 x = gen_rtx_UNSPEC (mode,
6844 gen_rtvec (3, target, reg,
6845 force_reg (V16QImode, x)),
6846 UNSPEC_VPERM);
6847 else
6848 {
6849 if (TARGET_P9_VECTOR)
6850 x = gen_rtx_UNSPEC (mode,
6851 gen_rtvec (3, reg, target,
6852 force_reg (V16QImode, x)),
6853 UNSPEC_VPERMR);
6854 else
6855 {
6856 /* Invert selector. We prefer to generate VNAND on P8 so
6857 that future fusion opportunities can kick in, but must
6858 generate VNOR elsewhere. */
6859 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6860 rtx iorx = (TARGET_P8_VECTOR
6861 ? gen_rtx_IOR (V16QImode, notx, notx)
6862 : gen_rtx_AND (V16QImode, notx, notx));
6863 rtx tmp = gen_reg_rtx (V16QImode);
6864 emit_insn (gen_rtx_SET (tmp, iorx));
6865
6866 /* Permute with operands reversed and adjusted selector. */
6867 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6868 UNSPEC_VPERM);
6869 }
6870 }
6871
6872 emit_insn (gen_rtx_SET (target, x));
6873 }
6874
6875 /* Extract field ELT from VEC into TARGET. */
6876
6877 void
6878 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6879 {
6880 machine_mode mode = GET_MODE (vec);
6881 machine_mode inner_mode = GET_MODE_INNER (mode);
6882 rtx mem;
6883
6884 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6885 {
6886 switch (mode)
6887 {
6888 default:
6889 break;
6890 case E_V1TImode:
6891 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6892 emit_move_insn (target, gen_lowpart (TImode, vec));
6893 break;
6894 case E_V2DFmode:
6895 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6896 return;
6897 case E_V2DImode:
6898 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6899 return;
6900 case E_V4SFmode:
6901 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6902 return;
6903 case E_V16QImode:
6904 if (TARGET_DIRECT_MOVE_64BIT)
6905 {
6906 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6907 return;
6908 }
6909 else
6910 break;
6911 case E_V8HImode:
6912 if (TARGET_DIRECT_MOVE_64BIT)
6913 {
6914 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6915 return;
6916 }
6917 else
6918 break;
6919 case E_V4SImode:
6920 if (TARGET_DIRECT_MOVE_64BIT)
6921 {
6922 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6923 return;
6924 }
6925 break;
6926 }
6927 }
6928 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6929 && TARGET_DIRECT_MOVE_64BIT)
6930 {
6931 if (GET_MODE (elt) != DImode)
6932 {
6933 rtx tmp = gen_reg_rtx (DImode);
6934 convert_move (tmp, elt, 0);
6935 elt = tmp;
6936 }
6937 else if (!REG_P (elt))
6938 elt = force_reg (DImode, elt);
6939
6940 switch (mode)
6941 {
6942 case E_V2DFmode:
6943 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6944 return;
6945
6946 case E_V2DImode:
6947 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6948 return;
6949
6950 case E_V4SFmode:
6951 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6952 return;
6953
6954 case E_V4SImode:
6955 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6956 return;
6957
6958 case E_V8HImode:
6959 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6960 return;
6961
6962 case E_V16QImode:
6963 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6964 return;
6965
6966 default:
6967 gcc_unreachable ();
6968 }
6969 }
6970
6971 gcc_assert (CONST_INT_P (elt));
6972
6973 /* Allocate mode-sized buffer. */
6974 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6975
6976 emit_move_insn (mem, vec);
6977
6978 /* Add offset to field within buffer matching vector element. */
6979 mem = adjust_address_nv (mem, inner_mode,
6980 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6981
6982 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6983 }
6984
6985 /* Helper function to return the register number of a RTX. */
6986 static inline int
6987 regno_or_subregno (rtx op)
6988 {
6989 if (REG_P (op))
6990 return REGNO (op);
6991 else if (SUBREG_P (op))
6992 return subreg_regno (op);
6993 else
6994 gcc_unreachable ();
6995 }
6996
6997 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6998 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6999 temporary (BASE_TMP) to fixup the address. Return the new memory address
7000 that is valid for reads or writes to a given register (SCALAR_REG). */
7001
7002 rtx
7003 rs6000_adjust_vec_address (rtx scalar_reg,
7004 rtx mem,
7005 rtx element,
7006 rtx base_tmp,
7007 machine_mode scalar_mode)
7008 {
7009 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7010 rtx addr = XEXP (mem, 0);
7011 rtx element_offset;
7012 rtx new_addr;
7013 bool valid_addr_p;
7014
7015 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7016 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7017
7018 /* Calculate what we need to add to the address to get the element
7019 address. */
7020 if (CONST_INT_P (element))
7021 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7022 else
7023 {
7024 int byte_shift = exact_log2 (scalar_size);
7025 gcc_assert (byte_shift >= 0);
7026
7027 if (byte_shift == 0)
7028 element_offset = element;
7029
7030 else
7031 {
7032 if (TARGET_POWERPC64)
7033 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7034 else
7035 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7036
7037 element_offset = base_tmp;
7038 }
7039 }
7040
7041 /* Create the new address pointing to the element within the vector. If we
7042 are adding 0, we don't have to change the address. */
7043 if (element_offset == const0_rtx)
7044 new_addr = addr;
7045
7046 /* A simple indirect address can be converted into a reg + offset
7047 address. */
7048 else if (REG_P (addr) || SUBREG_P (addr))
7049 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7050
7051 /* Optimize D-FORM addresses with constant offset with a constant element, to
7052 include the element offset in the address directly. */
7053 else if (GET_CODE (addr) == PLUS)
7054 {
7055 rtx op0 = XEXP (addr, 0);
7056 rtx op1 = XEXP (addr, 1);
7057 rtx insn;
7058
7059 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7060 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7061 {
7062 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7063 rtx offset_rtx = GEN_INT (offset);
7064
7065 if (IN_RANGE (offset, -32768, 32767)
7066 && (scalar_size < 8 || (offset & 0x3) == 0))
7067 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7068 else
7069 {
7070 emit_move_insn (base_tmp, offset_rtx);
7071 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7072 }
7073 }
7074 else
7075 {
7076 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7077 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7078
7079 /* Note, ADDI requires the register being added to be a base
7080 register. If the register was R0, load it up into the temporary
7081 and do the add. */
7082 if (op1_reg_p
7083 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7084 {
7085 insn = gen_add3_insn (base_tmp, op1, element_offset);
7086 gcc_assert (insn != NULL_RTX);
7087 emit_insn (insn);
7088 }
7089
7090 else if (ele_reg_p
7091 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7092 {
7093 insn = gen_add3_insn (base_tmp, element_offset, op1);
7094 gcc_assert (insn != NULL_RTX);
7095 emit_insn (insn);
7096 }
7097
7098 else
7099 {
7100 emit_move_insn (base_tmp, op1);
7101 emit_insn (gen_add2_insn (base_tmp, element_offset));
7102 }
7103
7104 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7105 }
7106 }
7107
7108 else
7109 {
7110 emit_move_insn (base_tmp, addr);
7111 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7112 }
7113
7114 /* If we have a PLUS, we need to see whether the particular register class
7115 allows for D-FORM or X-FORM addressing. */
7116 if (GET_CODE (new_addr) == PLUS)
7117 {
7118 rtx op1 = XEXP (new_addr, 1);
7119 addr_mask_type addr_mask;
7120 int scalar_regno = regno_or_subregno (scalar_reg);
7121
7122 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7123 if (INT_REGNO_P (scalar_regno))
7124 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7125
7126 else if (FP_REGNO_P (scalar_regno))
7127 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7128
7129 else if (ALTIVEC_REGNO_P (scalar_regno))
7130 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7131
7132 else
7133 gcc_unreachable ();
7134
7135 if (REG_P (op1) || SUBREG_P (op1))
7136 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7137 else
7138 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7139 }
7140
7141 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7142 valid_addr_p = true;
7143
7144 else
7145 valid_addr_p = false;
7146
7147 if (!valid_addr_p)
7148 {
7149 emit_move_insn (base_tmp, new_addr);
7150 new_addr = base_tmp;
7151 }
7152
7153 return change_address (mem, scalar_mode, new_addr);
7154 }
7155
7156 /* Split a variable vec_extract operation into the component instructions. */
7157
7158 void
7159 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7160 rtx tmp_altivec)
7161 {
7162 machine_mode mode = GET_MODE (src);
7163 machine_mode scalar_mode = GET_MODE (dest);
7164 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7165 int byte_shift = exact_log2 (scalar_size);
7166
7167 gcc_assert (byte_shift >= 0);
7168
7169 /* If we are given a memory address, optimize to load just the element. We
7170 don't have to adjust the vector element number on little endian
7171 systems. */
7172 if (MEM_P (src))
7173 {
7174 gcc_assert (REG_P (tmp_gpr));
7175 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7176 tmp_gpr, scalar_mode));
7177 return;
7178 }
7179
7180 else if (REG_P (src) || SUBREG_P (src))
7181 {
7182 int bit_shift = byte_shift + 3;
7183 rtx element2;
7184 int dest_regno = regno_or_subregno (dest);
7185 int src_regno = regno_or_subregno (src);
7186 int element_regno = regno_or_subregno (element);
7187
7188 gcc_assert (REG_P (tmp_gpr));
7189
7190 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7191 a general purpose register. */
7192 if (TARGET_P9_VECTOR
7193 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7194 && INT_REGNO_P (dest_regno)
7195 && ALTIVEC_REGNO_P (src_regno)
7196 && INT_REGNO_P (element_regno))
7197 {
7198 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7199 rtx element_si = gen_rtx_REG (SImode, element_regno);
7200
7201 if (mode == V16QImode)
7202 emit_insn (BYTES_BIG_ENDIAN
7203 ? gen_vextublx (dest_si, element_si, src)
7204 : gen_vextubrx (dest_si, element_si, src));
7205
7206 else if (mode == V8HImode)
7207 {
7208 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7209 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7210 emit_insn (BYTES_BIG_ENDIAN
7211 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7212 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7213 }
7214
7215
7216 else
7217 {
7218 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7219 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7220 emit_insn (BYTES_BIG_ENDIAN
7221 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7222 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7223 }
7224
7225 return;
7226 }
7227
7228
7229 gcc_assert (REG_P (tmp_altivec));
7230
7231 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7232 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7233 will shift the element into the upper position (adding 3 to convert a
7234 byte shift into a bit shift). */
7235 if (scalar_size == 8)
7236 {
7237 if (!BYTES_BIG_ENDIAN)
7238 {
7239 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7240 element2 = tmp_gpr;
7241 }
7242 else
7243 element2 = element;
7244
7245 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7246 bit. */
7247 emit_insn (gen_rtx_SET (tmp_gpr,
7248 gen_rtx_AND (DImode,
7249 gen_rtx_ASHIFT (DImode,
7250 element2,
7251 GEN_INT (6)),
7252 GEN_INT (64))));
7253 }
7254 else
7255 {
7256 if (!BYTES_BIG_ENDIAN)
7257 {
7258 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7259
7260 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7261 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7262 element2 = tmp_gpr;
7263 }
7264 else
7265 element2 = element;
7266
7267 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7268 }
7269
7270 /* Get the value into the lower byte of the Altivec register where VSLO
7271 expects it. */
7272 if (TARGET_P9_VECTOR)
7273 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7274 else if (can_create_pseudo_p ())
7275 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7276 else
7277 {
7278 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7279 emit_move_insn (tmp_di, tmp_gpr);
7280 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7281 }
7282
7283 /* Do the VSLO to get the value into the final location. */
7284 switch (mode)
7285 {
7286 case E_V2DFmode:
7287 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7288 return;
7289
7290 case E_V2DImode:
7291 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7292 return;
7293
7294 case E_V4SFmode:
7295 {
7296 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7297 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7298 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7299 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7300 tmp_altivec));
7301
7302 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7303 return;
7304 }
7305
7306 case E_V4SImode:
7307 case E_V8HImode:
7308 case E_V16QImode:
7309 {
7310 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7311 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7312 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7313 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7314 tmp_altivec));
7315 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7316 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7317 GEN_INT (64 - (8 * scalar_size))));
7318 return;
7319 }
7320
7321 default:
7322 gcc_unreachable ();
7323 }
7324
7325 return;
7326 }
7327 else
7328 gcc_unreachable ();
7329 }
7330
7331 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7332 selects whether the alignment is abi mandated, optional, or
7333 both abi and optional alignment. */
7334
7335 unsigned int
7336 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7337 {
7338 if (how != align_opt)
7339 {
7340 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7341 align = 128;
7342 }
7343
7344 if (how != align_abi)
7345 {
7346 if (TREE_CODE (type) == ARRAY_TYPE
7347 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7348 {
7349 if (align < BITS_PER_WORD)
7350 align = BITS_PER_WORD;
7351 }
7352 }
7353
7354 return align;
7355 }
7356
7357 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7358 instructions simply ignore the low bits; VSX memory instructions
7359 are aligned to 4 or 8 bytes. */
7360
7361 static bool
7362 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7363 {
7364 return (STRICT_ALIGNMENT
7365 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7366 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7367 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7368 && (int) align < VECTOR_ALIGN (mode)))));
7369 }
7370
7371 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7372
7373 bool
7374 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7375 {
7376 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7377 {
7378 if (computed != 128)
7379 {
7380 static bool warned;
7381 if (!warned && warn_psabi)
7382 {
7383 warned = true;
7384 inform (input_location,
7385 "the layout of aggregates containing vectors with"
7386 " %d-byte alignment has changed in GCC 5",
7387 computed / BITS_PER_UNIT);
7388 }
7389 }
7390 /* In current GCC there is no special case. */
7391 return false;
7392 }
7393
7394 return false;
7395 }
7396
7397 /* AIX increases natural record alignment to doubleword if the first
7398 field is an FP double while the FP fields remain word aligned. */
7399
7400 unsigned int
7401 rs6000_special_round_type_align (tree type, unsigned int computed,
7402 unsigned int specified)
7403 {
7404 unsigned int align = MAX (computed, specified);
7405 tree field = TYPE_FIELDS (type);
7406
7407 /* Skip all non field decls */
7408 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7409 field = DECL_CHAIN (field);
7410
7411 if (field != NULL && field != type)
7412 {
7413 type = TREE_TYPE (field);
7414 while (TREE_CODE (type) == ARRAY_TYPE)
7415 type = TREE_TYPE (type);
7416
7417 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7418 align = MAX (align, 64);
7419 }
7420
7421 return align;
7422 }
7423
7424 /* Darwin increases record alignment to the natural alignment of
7425 the first field. */
7426
7427 unsigned int
7428 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7429 unsigned int specified)
7430 {
7431 unsigned int align = MAX (computed, specified);
7432
7433 if (TYPE_PACKED (type))
7434 return align;
7435
7436 /* Find the first field, looking down into aggregates. */
7437 do {
7438 tree field = TYPE_FIELDS (type);
7439 /* Skip all non field decls */
7440 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7441 field = DECL_CHAIN (field);
7442 if (! field)
7443 break;
7444 /* A packed field does not contribute any extra alignment. */
7445 if (DECL_PACKED (field))
7446 return align;
7447 type = TREE_TYPE (field);
7448 while (TREE_CODE (type) == ARRAY_TYPE)
7449 type = TREE_TYPE (type);
7450 } while (AGGREGATE_TYPE_P (type));
7451
7452 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7453 align = MAX (align, TYPE_ALIGN (type));
7454
7455 return align;
7456 }
7457
7458 /* Return 1 for an operand in small memory on V.4/eabi. */
7459
7460 int
7461 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7462 machine_mode mode ATTRIBUTE_UNUSED)
7463 {
7464 #if TARGET_ELF
7465 rtx sym_ref;
7466
7467 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7468 return 0;
7469
7470 if (DEFAULT_ABI != ABI_V4)
7471 return 0;
7472
7473 if (GET_CODE (op) == SYMBOL_REF)
7474 sym_ref = op;
7475
7476 else if (GET_CODE (op) != CONST
7477 || GET_CODE (XEXP (op, 0)) != PLUS
7478 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7479 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7480 return 0;
7481
7482 else
7483 {
7484 rtx sum = XEXP (op, 0);
7485 HOST_WIDE_INT summand;
7486
7487 /* We have to be careful here, because it is the referenced address
7488 that must be 32k from _SDA_BASE_, not just the symbol. */
7489 summand = INTVAL (XEXP (sum, 1));
7490 if (summand < 0 || summand > g_switch_value)
7491 return 0;
7492
7493 sym_ref = XEXP (sum, 0);
7494 }
7495
7496 return SYMBOL_REF_SMALL_P (sym_ref);
7497 #else
7498 return 0;
7499 #endif
7500 }
7501
7502 /* Return true if either operand is a general purpose register. */
7503
7504 bool
7505 gpr_or_gpr_p (rtx op0, rtx op1)
7506 {
7507 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7508 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7509 }
7510
7511 /* Return true if this is a move direct operation between GPR registers and
7512 floating point/VSX registers. */
7513
7514 bool
7515 direct_move_p (rtx op0, rtx op1)
7516 {
7517 int regno0, regno1;
7518
7519 if (!REG_P (op0) || !REG_P (op1))
7520 return false;
7521
7522 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7523 return false;
7524
7525 regno0 = REGNO (op0);
7526 regno1 = REGNO (op1);
7527 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7528 return false;
7529
7530 if (INT_REGNO_P (regno0))
7531 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7532
7533 else if (INT_REGNO_P (regno1))
7534 {
7535 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7536 return true;
7537
7538 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7539 return true;
7540 }
7541
7542 return false;
7543 }
7544
7545 /* Return true if the OFFSET is valid for the quad address instructions that
7546 use d-form (register + offset) addressing. */
7547
7548 static inline bool
7549 quad_address_offset_p (HOST_WIDE_INT offset)
7550 {
7551 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7552 }
7553
7554 /* Return true if the ADDR is an acceptable address for a quad memory
7555 operation of mode MODE (either LQ/STQ for general purpose registers, or
7556 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7557 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7558 3.0 LXV/STXV instruction. */
7559
7560 bool
7561 quad_address_p (rtx addr, machine_mode mode, bool strict)
7562 {
7563 rtx op0, op1;
7564
7565 if (GET_MODE_SIZE (mode) != 16)
7566 return false;
7567
7568 if (legitimate_indirect_address_p (addr, strict))
7569 return true;
7570
7571 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7572 return false;
7573
7574 if (GET_CODE (addr) != PLUS)
7575 return false;
7576
7577 op0 = XEXP (addr, 0);
7578 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7579 return false;
7580
7581 op1 = XEXP (addr, 1);
7582 if (!CONST_INT_P (op1))
7583 return false;
7584
7585 return quad_address_offset_p (INTVAL (op1));
7586 }
7587
7588 /* Return true if this is a load or store quad operation. This function does
7589 not handle the atomic quad memory instructions. */
7590
7591 bool
7592 quad_load_store_p (rtx op0, rtx op1)
7593 {
7594 bool ret;
7595
7596 if (!TARGET_QUAD_MEMORY)
7597 ret = false;
7598
7599 else if (REG_P (op0) && MEM_P (op1))
7600 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7601 && quad_memory_operand (op1, GET_MODE (op1))
7602 && !reg_overlap_mentioned_p (op0, op1));
7603
7604 else if (MEM_P (op0) && REG_P (op1))
7605 ret = (quad_memory_operand (op0, GET_MODE (op0))
7606 && quad_int_reg_operand (op1, GET_MODE (op1)));
7607
7608 else
7609 ret = false;
7610
7611 if (TARGET_DEBUG_ADDR)
7612 {
7613 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7614 ret ? "true" : "false");
7615 debug_rtx (gen_rtx_SET (op0, op1));
7616 }
7617
7618 return ret;
7619 }
7620
7621 /* Given an address, return a constant offset term if one exists. */
7622
7623 static rtx
7624 address_offset (rtx op)
7625 {
7626 if (GET_CODE (op) == PRE_INC
7627 || GET_CODE (op) == PRE_DEC)
7628 op = XEXP (op, 0);
7629 else if (GET_CODE (op) == PRE_MODIFY
7630 || GET_CODE (op) == LO_SUM)
7631 op = XEXP (op, 1);
7632
7633 if (GET_CODE (op) == CONST)
7634 op = XEXP (op, 0);
7635
7636 if (GET_CODE (op) == PLUS)
7637 op = XEXP (op, 1);
7638
7639 if (CONST_INT_P (op))
7640 return op;
7641
7642 return NULL_RTX;
7643 }
7644
7645 /* Return true if the MEM operand is a memory operand suitable for use
7646 with a (full width, possibly multiple) gpr load/store. On
7647 powerpc64 this means the offset must be divisible by 4.
7648 Implements 'Y' constraint.
7649
7650 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7651 a constraint function we know the operand has satisfied a suitable
7652 memory predicate. Also accept some odd rtl generated by reload
7653 (see rs6000_legitimize_reload_address for various forms). It is
7654 important that reload rtl be accepted by appropriate constraints
7655 but not by the operand predicate.
7656
7657 Offsetting a lo_sum should not be allowed, except where we know by
7658 alignment that a 32k boundary is not crossed, but see the ???
7659 comment in rs6000_legitimize_reload_address. Note that by
7660 "offsetting" here we mean a further offset to access parts of the
7661 MEM. It's fine to have a lo_sum where the inner address is offset
7662 from a sym, since the same sym+offset will appear in the high part
7663 of the address calculation. */
7664
7665 bool
7666 mem_operand_gpr (rtx op, machine_mode mode)
7667 {
7668 unsigned HOST_WIDE_INT offset;
7669 int extra;
7670 rtx addr = XEXP (op, 0);
7671
7672 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7673 if (TARGET_UPDATE
7674 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7675 && mode_supports_pre_incdec_p (mode)
7676 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7677 return true;
7678
7679 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7680 if (!rs6000_offsettable_memref_p (op, mode, false))
7681 return false;
7682
7683 op = address_offset (addr);
7684 if (op == NULL_RTX)
7685 return true;
7686
7687 offset = INTVAL (op);
7688 if (TARGET_POWERPC64 && (offset & 3) != 0)
7689 return false;
7690
7691 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7692 if (extra < 0)
7693 extra = 0;
7694
7695 if (GET_CODE (addr) == LO_SUM)
7696 /* For lo_sum addresses, we must allow any offset except one that
7697 causes a wrap, so test only the low 16 bits. */
7698 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7699
7700 return offset + 0x8000 < 0x10000u - extra;
7701 }
7702
7703 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7704 enforce an offset divisible by 4 even for 32-bit. */
7705
7706 bool
7707 mem_operand_ds_form (rtx op, machine_mode mode)
7708 {
7709 unsigned HOST_WIDE_INT offset;
7710 int extra;
7711 rtx addr = XEXP (op, 0);
7712
7713 if (!offsettable_address_p (false, mode, addr))
7714 return false;
7715
7716 op = address_offset (addr);
7717 if (op == NULL_RTX)
7718 return true;
7719
7720 offset = INTVAL (op);
7721 if ((offset & 3) != 0)
7722 return false;
7723
7724 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7725 if (extra < 0)
7726 extra = 0;
7727
7728 if (GET_CODE (addr) == LO_SUM)
7729 /* For lo_sum addresses, we must allow any offset except one that
7730 causes a wrap, so test only the low 16 bits. */
7731 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7732
7733 return offset + 0x8000 < 0x10000u - extra;
7734 }
7735 \f
7736 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7737
7738 static bool
7739 reg_offset_addressing_ok_p (machine_mode mode)
7740 {
7741 switch (mode)
7742 {
7743 case E_V16QImode:
7744 case E_V8HImode:
7745 case E_V4SFmode:
7746 case E_V4SImode:
7747 case E_V2DFmode:
7748 case E_V2DImode:
7749 case E_V1TImode:
7750 case E_TImode:
7751 case E_TFmode:
7752 case E_KFmode:
7753 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7754 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7755 a vector mode, if we want to use the VSX registers to move it around,
7756 we need to restrict ourselves to reg+reg addressing. Similarly for
7757 IEEE 128-bit floating point that is passed in a single vector
7758 register. */
7759 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7760 return mode_supports_dq_form (mode);
7761 break;
7762
7763 case E_SDmode:
7764 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7765 addressing for the LFIWZX and STFIWX instructions. */
7766 if (TARGET_NO_SDMODE_STACK)
7767 return false;
7768 break;
7769
7770 default:
7771 break;
7772 }
7773
7774 return true;
7775 }
7776
7777 static bool
7778 virtual_stack_registers_memory_p (rtx op)
7779 {
7780 int regnum;
7781
7782 if (GET_CODE (op) == REG)
7783 regnum = REGNO (op);
7784
7785 else if (GET_CODE (op) == PLUS
7786 && GET_CODE (XEXP (op, 0)) == REG
7787 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7788 regnum = REGNO (XEXP (op, 0));
7789
7790 else
7791 return false;
7792
7793 return (regnum >= FIRST_VIRTUAL_REGISTER
7794 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7795 }
7796
7797 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7798 is known to not straddle a 32k boundary. This function is used
7799 to determine whether -mcmodel=medium code can use TOC pointer
7800 relative addressing for OP. This means the alignment of the TOC
7801 pointer must also be taken into account, and unfortunately that is
7802 only 8 bytes. */
7803
7804 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7805 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7806 #endif
7807
7808 static bool
7809 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7810 machine_mode mode)
7811 {
7812 tree decl;
7813 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7814
7815 if (GET_CODE (op) != SYMBOL_REF)
7816 return false;
7817
7818 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7819 SYMBOL_REF. */
7820 if (mode_supports_dq_form (mode))
7821 return false;
7822
7823 dsize = GET_MODE_SIZE (mode);
7824 decl = SYMBOL_REF_DECL (op);
7825 if (!decl)
7826 {
7827 if (dsize == 0)
7828 return false;
7829
7830 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7831 replacing memory addresses with an anchor plus offset. We
7832 could find the decl by rummaging around in the block->objects
7833 VEC for the given offset but that seems like too much work. */
7834 dalign = BITS_PER_UNIT;
7835 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7836 && SYMBOL_REF_ANCHOR_P (op)
7837 && SYMBOL_REF_BLOCK (op) != NULL)
7838 {
7839 struct object_block *block = SYMBOL_REF_BLOCK (op);
7840
7841 dalign = block->alignment;
7842 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7843 }
7844 else if (CONSTANT_POOL_ADDRESS_P (op))
7845 {
7846 /* It would be nice to have get_pool_align().. */
7847 machine_mode cmode = get_pool_mode (op);
7848
7849 dalign = GET_MODE_ALIGNMENT (cmode);
7850 }
7851 }
7852 else if (DECL_P (decl))
7853 {
7854 dalign = DECL_ALIGN (decl);
7855
7856 if (dsize == 0)
7857 {
7858 /* Allow BLKmode when the entire object is known to not
7859 cross a 32k boundary. */
7860 if (!DECL_SIZE_UNIT (decl))
7861 return false;
7862
7863 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7864 return false;
7865
7866 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7867 if (dsize > 32768)
7868 return false;
7869
7870 dalign /= BITS_PER_UNIT;
7871 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7872 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7873 return dalign >= dsize;
7874 }
7875 }
7876 else
7877 gcc_unreachable ();
7878
7879 /* Find how many bits of the alignment we know for this access. */
7880 dalign /= BITS_PER_UNIT;
7881 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7882 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7883 mask = dalign - 1;
7884 lsb = offset & -offset;
7885 mask &= lsb - 1;
7886 dalign = mask + 1;
7887
7888 return dalign >= dsize;
7889 }
7890
7891 static bool
7892 constant_pool_expr_p (rtx op)
7893 {
7894 rtx base, offset;
7895
7896 split_const (op, &base, &offset);
7897 return (GET_CODE (base) == SYMBOL_REF
7898 && CONSTANT_POOL_ADDRESS_P (base)
7899 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7900 }
7901
7902 /* These are only used to pass through from print_operand/print_operand_address
7903 to rs6000_output_addr_const_extra over the intervening function
7904 output_addr_const which is not target code. */
7905 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7906
7907 /* Return true if OP is a toc pointer relative address (the output
7908 of create_TOC_reference). If STRICT, do not match non-split
7909 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7910 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7911 TOCREL_OFFSET_RET respectively. */
7912
7913 bool
7914 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7915 const_rtx *tocrel_offset_ret)
7916 {
7917 if (!TARGET_TOC)
7918 return false;
7919
7920 if (TARGET_CMODEL != CMODEL_SMALL)
7921 {
7922 /* When strict ensure we have everything tidy. */
7923 if (strict
7924 && !(GET_CODE (op) == LO_SUM
7925 && REG_P (XEXP (op, 0))
7926 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7927 return false;
7928
7929 /* When not strict, allow non-split TOC addresses and also allow
7930 (lo_sum (high ..)) TOC addresses created during reload. */
7931 if (GET_CODE (op) == LO_SUM)
7932 op = XEXP (op, 1);
7933 }
7934
7935 const_rtx tocrel_base = op;
7936 const_rtx tocrel_offset = const0_rtx;
7937
7938 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7939 {
7940 tocrel_base = XEXP (op, 0);
7941 tocrel_offset = XEXP (op, 1);
7942 }
7943
7944 if (tocrel_base_ret)
7945 *tocrel_base_ret = tocrel_base;
7946 if (tocrel_offset_ret)
7947 *tocrel_offset_ret = tocrel_offset;
7948
7949 return (GET_CODE (tocrel_base) == UNSPEC
7950 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7951 && REG_P (XVECEXP (tocrel_base, 0, 1))
7952 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7953 }
7954
7955 /* Return true if X is a constant pool address, and also for cmodel=medium
7956 if X is a toc-relative address known to be offsettable within MODE. */
7957
7958 bool
7959 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7960 bool strict)
7961 {
7962 const_rtx tocrel_base, tocrel_offset;
7963 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7964 && (TARGET_CMODEL != CMODEL_MEDIUM
7965 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7966 || mode == QImode
7967 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7968 INTVAL (tocrel_offset), mode)));
7969 }
7970
7971 static bool
7972 legitimate_small_data_p (machine_mode mode, rtx x)
7973 {
7974 return (DEFAULT_ABI == ABI_V4
7975 && !flag_pic && !TARGET_TOC
7976 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7977 && small_data_operand (x, mode));
7978 }
7979
7980 bool
7981 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7982 bool strict, bool worst_case)
7983 {
7984 unsigned HOST_WIDE_INT offset;
7985 unsigned int extra;
7986
7987 if (GET_CODE (x) != PLUS)
7988 return false;
7989 if (!REG_P (XEXP (x, 0)))
7990 return false;
7991 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7992 return false;
7993 if (mode_supports_dq_form (mode))
7994 return quad_address_p (x, mode, strict);
7995 if (!reg_offset_addressing_ok_p (mode))
7996 return virtual_stack_registers_memory_p (x);
7997 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7998 return true;
7999 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8000 return false;
8001
8002 offset = INTVAL (XEXP (x, 1));
8003 extra = 0;
8004 switch (mode)
8005 {
8006 case E_DFmode:
8007 case E_DDmode:
8008 case E_DImode:
8009 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8010 addressing. */
8011 if (VECTOR_MEM_VSX_P (mode))
8012 return false;
8013
8014 if (!worst_case)
8015 break;
8016 if (!TARGET_POWERPC64)
8017 extra = 4;
8018 else if (offset & 3)
8019 return false;
8020 break;
8021
8022 case E_TFmode:
8023 case E_IFmode:
8024 case E_KFmode:
8025 case E_TDmode:
8026 case E_TImode:
8027 case E_PTImode:
8028 extra = 8;
8029 if (!worst_case)
8030 break;
8031 if (!TARGET_POWERPC64)
8032 extra = 12;
8033 else if (offset & 3)
8034 return false;
8035 break;
8036
8037 default:
8038 break;
8039 }
8040
8041 offset += 0x8000;
8042 return offset < 0x10000 - extra;
8043 }
8044
8045 bool
8046 legitimate_indexed_address_p (rtx x, int strict)
8047 {
8048 rtx op0, op1;
8049
8050 if (GET_CODE (x) != PLUS)
8051 return false;
8052
8053 op0 = XEXP (x, 0);
8054 op1 = XEXP (x, 1);
8055
8056 return (REG_P (op0) && REG_P (op1)
8057 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8058 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8059 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8060 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8061 }
8062
8063 bool
8064 avoiding_indexed_address_p (machine_mode mode)
8065 {
8066 /* Avoid indexed addressing for modes that have non-indexed
8067 load/store instruction forms. */
8068 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8069 }
8070
8071 bool
8072 legitimate_indirect_address_p (rtx x, int strict)
8073 {
8074 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8075 }
8076
8077 bool
8078 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8079 {
8080 if (!TARGET_MACHO || !flag_pic
8081 || mode != SImode || GET_CODE (x) != MEM)
8082 return false;
8083 x = XEXP (x, 0);
8084
8085 if (GET_CODE (x) != LO_SUM)
8086 return false;
8087 if (GET_CODE (XEXP (x, 0)) != REG)
8088 return false;
8089 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8090 return false;
8091 x = XEXP (x, 1);
8092
8093 return CONSTANT_P (x);
8094 }
8095
8096 static bool
8097 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8098 {
8099 if (GET_CODE (x) != LO_SUM)
8100 return false;
8101 if (GET_CODE (XEXP (x, 0)) != REG)
8102 return false;
8103 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8104 return false;
8105 /* quad word addresses are restricted, and we can't use LO_SUM. */
8106 if (mode_supports_dq_form (mode))
8107 return false;
8108 x = XEXP (x, 1);
8109
8110 if (TARGET_ELF || TARGET_MACHO)
8111 {
8112 bool large_toc_ok;
8113
8114 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8115 return false;
8116 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8117 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8118 recognizes some LO_SUM addresses as valid although this
8119 function says opposite. In most cases, LRA through different
8120 transformations can generate correct code for address reloads.
8121 It cannot manage only some LO_SUM cases. So we need to add
8122 code analogous to one in rs6000_legitimize_reload_address for
8123 LOW_SUM here saying that some addresses are still valid. */
8124 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8125 && small_toc_ref (x, VOIDmode));
8126 if (TARGET_TOC && ! large_toc_ok)
8127 return false;
8128 if (GET_MODE_NUNITS (mode) != 1)
8129 return false;
8130 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8131 && !(/* ??? Assume floating point reg based on mode? */
8132 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8133 return false;
8134
8135 return CONSTANT_P (x) || large_toc_ok;
8136 }
8137
8138 return false;
8139 }
8140
8141
8142 /* Try machine-dependent ways of modifying an illegitimate address
8143 to be legitimate. If we find one, return the new, valid address.
8144 This is used from only one place: `memory_address' in explow.c.
8145
8146 OLDX is the address as it was before break_out_memory_refs was
8147 called. In some cases it is useful to look at this to decide what
8148 needs to be done.
8149
8150 It is always safe for this function to do nothing. It exists to
8151 recognize opportunities to optimize the output.
8152
8153 On RS/6000, first check for the sum of a register with a constant
8154 integer that is out of range. If so, generate code to add the
8155 constant with the low-order 16 bits masked to the register and force
8156 this result into another register (this can be done with `cau').
8157 Then generate an address of REG+(CONST&0xffff), allowing for the
8158 possibility of bit 16 being a one.
8159
8160 Then check for the sum of a register and something not constant, try to
8161 load the other things into a register and return the sum. */
8162
8163 static rtx
8164 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8165 machine_mode mode)
8166 {
8167 unsigned int extra;
8168
8169 if (!reg_offset_addressing_ok_p (mode)
8170 || mode_supports_dq_form (mode))
8171 {
8172 if (virtual_stack_registers_memory_p (x))
8173 return x;
8174
8175 /* In theory we should not be seeing addresses of the form reg+0,
8176 but just in case it is generated, optimize it away. */
8177 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8178 return force_reg (Pmode, XEXP (x, 0));
8179
8180 /* For TImode with load/store quad, restrict addresses to just a single
8181 pointer, so it works with both GPRs and VSX registers. */
8182 /* Make sure both operands are registers. */
8183 else if (GET_CODE (x) == PLUS
8184 && (mode != TImode || !TARGET_VSX))
8185 return gen_rtx_PLUS (Pmode,
8186 force_reg (Pmode, XEXP (x, 0)),
8187 force_reg (Pmode, XEXP (x, 1)));
8188 else
8189 return force_reg (Pmode, x);
8190 }
8191 if (GET_CODE (x) == SYMBOL_REF)
8192 {
8193 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8194 if (model != 0)
8195 return rs6000_legitimize_tls_address (x, model);
8196 }
8197
8198 extra = 0;
8199 switch (mode)
8200 {
8201 case E_TFmode:
8202 case E_TDmode:
8203 case E_TImode:
8204 case E_PTImode:
8205 case E_IFmode:
8206 case E_KFmode:
8207 /* As in legitimate_offset_address_p we do not assume
8208 worst-case. The mode here is just a hint as to the registers
8209 used. A TImode is usually in gprs, but may actually be in
8210 fprs. Leave worst-case scenario for reload to handle via
8211 insn constraints. PTImode is only GPRs. */
8212 extra = 8;
8213 break;
8214 default:
8215 break;
8216 }
8217
8218 if (GET_CODE (x) == PLUS
8219 && GET_CODE (XEXP (x, 0)) == REG
8220 && GET_CODE (XEXP (x, 1)) == CONST_INT
8221 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8222 >= 0x10000 - extra))
8223 {
8224 HOST_WIDE_INT high_int, low_int;
8225 rtx sum;
8226 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8227 if (low_int >= 0x8000 - extra)
8228 low_int = 0;
8229 high_int = INTVAL (XEXP (x, 1)) - low_int;
8230 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8231 GEN_INT (high_int)), 0);
8232 return plus_constant (Pmode, sum, low_int);
8233 }
8234 else if (GET_CODE (x) == PLUS
8235 && GET_CODE (XEXP (x, 0)) == REG
8236 && GET_CODE (XEXP (x, 1)) != CONST_INT
8237 && GET_MODE_NUNITS (mode) == 1
8238 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8239 || (/* ??? Assume floating point reg based on mode? */
8240 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8241 && !avoiding_indexed_address_p (mode))
8242 {
8243 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8244 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8245 }
8246 else if ((TARGET_ELF
8247 #if TARGET_MACHO
8248 || !MACHO_DYNAMIC_NO_PIC_P
8249 #endif
8250 )
8251 && TARGET_32BIT
8252 && TARGET_NO_TOC
8253 && ! flag_pic
8254 && GET_CODE (x) != CONST_INT
8255 && GET_CODE (x) != CONST_WIDE_INT
8256 && GET_CODE (x) != CONST_DOUBLE
8257 && CONSTANT_P (x)
8258 && GET_MODE_NUNITS (mode) == 1
8259 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8260 || (/* ??? Assume floating point reg based on mode? */
8261 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8262 {
8263 rtx reg = gen_reg_rtx (Pmode);
8264 if (TARGET_ELF)
8265 emit_insn (gen_elf_high (reg, x));
8266 else
8267 emit_insn (gen_macho_high (reg, x));
8268 return gen_rtx_LO_SUM (Pmode, reg, x);
8269 }
8270 else if (TARGET_TOC
8271 && GET_CODE (x) == SYMBOL_REF
8272 && constant_pool_expr_p (x)
8273 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8274 return create_TOC_reference (x, NULL_RTX);
8275 else
8276 return x;
8277 }
8278
8279 /* Debug version of rs6000_legitimize_address. */
8280 static rtx
8281 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8282 {
8283 rtx ret;
8284 rtx_insn *insns;
8285
8286 start_sequence ();
8287 ret = rs6000_legitimize_address (x, oldx, mode);
8288 insns = get_insns ();
8289 end_sequence ();
8290
8291 if (ret != x)
8292 {
8293 fprintf (stderr,
8294 "\nrs6000_legitimize_address: mode %s, old code %s, "
8295 "new code %s, modified\n",
8296 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8297 GET_RTX_NAME (GET_CODE (ret)));
8298
8299 fprintf (stderr, "Original address:\n");
8300 debug_rtx (x);
8301
8302 fprintf (stderr, "oldx:\n");
8303 debug_rtx (oldx);
8304
8305 fprintf (stderr, "New address:\n");
8306 debug_rtx (ret);
8307
8308 if (insns)
8309 {
8310 fprintf (stderr, "Insns added:\n");
8311 debug_rtx_list (insns, 20);
8312 }
8313 }
8314 else
8315 {
8316 fprintf (stderr,
8317 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8318 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8319
8320 debug_rtx (x);
8321 }
8322
8323 if (insns)
8324 emit_insn (insns);
8325
8326 return ret;
8327 }
8328
8329 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8330 We need to emit DTP-relative relocations. */
8331
8332 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8333 static void
8334 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8335 {
8336 switch (size)
8337 {
8338 case 4:
8339 fputs ("\t.long\t", file);
8340 break;
8341 case 8:
8342 fputs (DOUBLE_INT_ASM_OP, file);
8343 break;
8344 default:
8345 gcc_unreachable ();
8346 }
8347 output_addr_const (file, x);
8348 if (TARGET_ELF)
8349 fputs ("@dtprel+0x8000", file);
8350 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8351 {
8352 switch (SYMBOL_REF_TLS_MODEL (x))
8353 {
8354 case 0:
8355 break;
8356 case TLS_MODEL_LOCAL_EXEC:
8357 fputs ("@le", file);
8358 break;
8359 case TLS_MODEL_INITIAL_EXEC:
8360 fputs ("@ie", file);
8361 break;
8362 case TLS_MODEL_GLOBAL_DYNAMIC:
8363 case TLS_MODEL_LOCAL_DYNAMIC:
8364 fputs ("@m", file);
8365 break;
8366 default:
8367 gcc_unreachable ();
8368 }
8369 }
8370 }
8371
8372 /* Return true if X is a symbol that refers to real (rather than emulated)
8373 TLS. */
8374
8375 static bool
8376 rs6000_real_tls_symbol_ref_p (rtx x)
8377 {
8378 return (GET_CODE (x) == SYMBOL_REF
8379 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8380 }
8381
8382 /* In the name of slightly smaller debug output, and to cater to
8383 general assembler lossage, recognize various UNSPEC sequences
8384 and turn them back into a direct symbol reference. */
8385
8386 static rtx
8387 rs6000_delegitimize_address (rtx orig_x)
8388 {
8389 rtx x, y, offset;
8390
8391 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8392 orig_x = XVECEXP (orig_x, 0, 0);
8393
8394 orig_x = delegitimize_mem_from_attrs (orig_x);
8395
8396 x = orig_x;
8397 if (MEM_P (x))
8398 x = XEXP (x, 0);
8399
8400 y = x;
8401 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8402 y = XEXP (y, 1);
8403
8404 offset = NULL_RTX;
8405 if (GET_CODE (y) == PLUS
8406 && GET_MODE (y) == Pmode
8407 && CONST_INT_P (XEXP (y, 1)))
8408 {
8409 offset = XEXP (y, 1);
8410 y = XEXP (y, 0);
8411 }
8412
8413 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8414 {
8415 y = XVECEXP (y, 0, 0);
8416
8417 #ifdef HAVE_AS_TLS
8418 /* Do not associate thread-local symbols with the original
8419 constant pool symbol. */
8420 if (TARGET_XCOFF
8421 && GET_CODE (y) == SYMBOL_REF
8422 && CONSTANT_POOL_ADDRESS_P (y)
8423 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8424 return orig_x;
8425 #endif
8426
8427 if (offset != NULL_RTX)
8428 y = gen_rtx_PLUS (Pmode, y, offset);
8429 if (!MEM_P (orig_x))
8430 return y;
8431 else
8432 return replace_equiv_address_nv (orig_x, y);
8433 }
8434
8435 if (TARGET_MACHO
8436 && GET_CODE (orig_x) == LO_SUM
8437 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8438 {
8439 y = XEXP (XEXP (orig_x, 1), 0);
8440 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8441 return XVECEXP (y, 0, 0);
8442 }
8443
8444 return orig_x;
8445 }
8446
8447 /* Return true if X shouldn't be emitted into the debug info.
8448 The linker doesn't like .toc section references from
8449 .debug_* sections, so reject .toc section symbols. */
8450
8451 static bool
8452 rs6000_const_not_ok_for_debug_p (rtx x)
8453 {
8454 if (GET_CODE (x) == UNSPEC)
8455 return true;
8456 if (GET_CODE (x) == SYMBOL_REF
8457 && CONSTANT_POOL_ADDRESS_P (x))
8458 {
8459 rtx c = get_pool_constant (x);
8460 machine_mode cmode = get_pool_mode (x);
8461 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8462 return true;
8463 }
8464
8465 return false;
8466 }
8467
8468 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8469
8470 static bool
8471 rs6000_legitimate_combined_insn (rtx_insn *insn)
8472 {
8473 int icode = INSN_CODE (insn);
8474
8475 /* Reject creating doloop insns. Combine should not be allowed
8476 to create these for a number of reasons:
8477 1) In a nested loop, if combine creates one of these in an
8478 outer loop and the register allocator happens to allocate ctr
8479 to the outer loop insn, then the inner loop can't use ctr.
8480 Inner loops ought to be more highly optimized.
8481 2) Combine often wants to create one of these from what was
8482 originally a three insn sequence, first combining the three
8483 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8484 allocated ctr, the splitter takes use back to the three insn
8485 sequence. It's better to stop combine at the two insn
8486 sequence.
8487 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8488 insns, the register allocator sometimes uses floating point
8489 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8490 jump insn and output reloads are not implemented for jumps,
8491 the ctrsi/ctrdi splitters need to handle all possible cases.
8492 That's a pain, and it gets to be seriously difficult when a
8493 splitter that runs after reload needs memory to transfer from
8494 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8495 for the difficult case. It's better to not create problems
8496 in the first place. */
8497 if (icode != CODE_FOR_nothing
8498 && (icode == CODE_FOR_bdz_si
8499 || icode == CODE_FOR_bdz_di
8500 || icode == CODE_FOR_bdnz_si
8501 || icode == CODE_FOR_bdnz_di
8502 || icode == CODE_FOR_bdztf_si
8503 || icode == CODE_FOR_bdztf_di
8504 || icode == CODE_FOR_bdnztf_si
8505 || icode == CODE_FOR_bdnztf_di))
8506 return false;
8507
8508 return true;
8509 }
8510
8511 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8512
8513 static GTY(()) rtx rs6000_tls_symbol;
8514 static rtx
8515 rs6000_tls_get_addr (void)
8516 {
8517 if (!rs6000_tls_symbol)
8518 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8519
8520 return rs6000_tls_symbol;
8521 }
8522
8523 /* Construct the SYMBOL_REF for TLS GOT references. */
8524
8525 static GTY(()) rtx rs6000_got_symbol;
8526 static rtx
8527 rs6000_got_sym (void)
8528 {
8529 if (!rs6000_got_symbol)
8530 {
8531 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8532 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8533 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8534 }
8535
8536 return rs6000_got_symbol;
8537 }
8538
8539 /* AIX Thread-Local Address support. */
8540
8541 static rtx
8542 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8543 {
8544 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8545 const char *name;
8546 char *tlsname;
8547
8548 name = XSTR (addr, 0);
8549 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8550 or the symbol will be in TLS private data section. */
8551 if (name[strlen (name) - 1] != ']'
8552 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8553 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8554 {
8555 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8556 strcpy (tlsname, name);
8557 strcat (tlsname,
8558 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8559 tlsaddr = copy_rtx (addr);
8560 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8561 }
8562 else
8563 tlsaddr = addr;
8564
8565 /* Place addr into TOC constant pool. */
8566 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8567
8568 /* Output the TOC entry and create the MEM referencing the value. */
8569 if (constant_pool_expr_p (XEXP (sym, 0))
8570 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8571 {
8572 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8573 mem = gen_const_mem (Pmode, tocref);
8574 set_mem_alias_set (mem, get_TOC_alias_set ());
8575 }
8576 else
8577 return sym;
8578
8579 /* Use global-dynamic for local-dynamic. */
8580 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8581 || model == TLS_MODEL_LOCAL_DYNAMIC)
8582 {
8583 /* Create new TOC reference for @m symbol. */
8584 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8585 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8586 strcpy (tlsname, "*LCM");
8587 strcat (tlsname, name + 3);
8588 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8589 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8590 tocref = create_TOC_reference (modaddr, NULL_RTX);
8591 rtx modmem = gen_const_mem (Pmode, tocref);
8592 set_mem_alias_set (modmem, get_TOC_alias_set ());
8593
8594 rtx modreg = gen_reg_rtx (Pmode);
8595 emit_insn (gen_rtx_SET (modreg, modmem));
8596
8597 tmpreg = gen_reg_rtx (Pmode);
8598 emit_insn (gen_rtx_SET (tmpreg, mem));
8599
8600 dest = gen_reg_rtx (Pmode);
8601 if (TARGET_32BIT)
8602 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8603 else
8604 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8605 return dest;
8606 }
8607 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8608 else if (TARGET_32BIT)
8609 {
8610 tlsreg = gen_reg_rtx (SImode);
8611 emit_insn (gen_tls_get_tpointer (tlsreg));
8612 }
8613 else
8614 tlsreg = gen_rtx_REG (DImode, 13);
8615
8616 /* Load the TOC value into temporary register. */
8617 tmpreg = gen_reg_rtx (Pmode);
8618 emit_insn (gen_rtx_SET (tmpreg, mem));
8619 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8620 gen_rtx_MINUS (Pmode, addr, tlsreg));
8621
8622 /* Add TOC symbol value to TLS pointer. */
8623 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8624
8625 return dest;
8626 }
8627
8628 /* Mess with a call, to make it look like the tls_gdld insns when
8629 !TARGET_TLS_MARKERS. These insns have an extra unspec to
8630 differentiate them from standard calls, because they need to emit
8631 the arg setup insns as well as the actual call. That keeps the
8632 arg setup insns immediately adjacent to the branch and link. */
8633
8634 static void
8635 edit_tls_call_insn (rtx arg)
8636 {
8637 rtx call_insn = last_call_insn ();
8638 if (!TARGET_TLS_MARKERS)
8639 {
8640 rtx patt = PATTERN (call_insn);
8641 gcc_assert (GET_CODE (patt) == PARALLEL);
8642 rtvec orig = XVEC (patt, 0);
8643 rtvec v = rtvec_alloc (GET_NUM_ELEM (orig) + 1);
8644 gcc_assert (GET_NUM_ELEM (orig) > 0);
8645 /* The (set (..) (call (mem ..))). */
8646 RTVEC_ELT (v, 0) = RTVEC_ELT (orig, 0);
8647 /* The extra unspec. */
8648 RTVEC_ELT (v, 1) = arg;
8649 /* All other assorted call pattern pieces. */
8650 for (int i = 1; i < GET_NUM_ELEM (orig); i++)
8651 RTVEC_ELT (v, i + 1) = RTVEC_ELT (orig, i);
8652 XVEC (patt, 0) = v;
8653 }
8654 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8655 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8656 pic_offset_table_rtx);
8657 }
8658
8659 /* Passes the tls arg value for global dynamic and local dynamic
8660 emit_library_call_value in rs6000_legitimize_tls_address to
8661 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8662 marker relocs put on __tls_get_addr calls. */
8663 static rtx global_tlsarg;
8664
8665 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8666 this (thread-local) address. */
8667
8668 static rtx
8669 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8670 {
8671 rtx dest, insn;
8672
8673 if (TARGET_XCOFF)
8674 return rs6000_legitimize_tls_address_aix (addr, model);
8675
8676 dest = gen_reg_rtx (Pmode);
8677 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8678 {
8679 rtx tlsreg;
8680
8681 if (TARGET_64BIT)
8682 {
8683 tlsreg = gen_rtx_REG (Pmode, 13);
8684 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8685 }
8686 else
8687 {
8688 tlsreg = gen_rtx_REG (Pmode, 2);
8689 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8690 }
8691 emit_insn (insn);
8692 }
8693 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8694 {
8695 rtx tlsreg, tmp;
8696
8697 tmp = gen_reg_rtx (Pmode);
8698 if (TARGET_64BIT)
8699 {
8700 tlsreg = gen_rtx_REG (Pmode, 13);
8701 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8702 }
8703 else
8704 {
8705 tlsreg = gen_rtx_REG (Pmode, 2);
8706 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8707 }
8708 emit_insn (insn);
8709 if (TARGET_64BIT)
8710 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8711 else
8712 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8713 emit_insn (insn);
8714 }
8715 else
8716 {
8717 rtx got, tga, tmp1, tmp2;
8718
8719 /* We currently use relocations like @got@tlsgd for tls, which
8720 means the linker will handle allocation of tls entries, placing
8721 them in the .got section. So use a pointer to the .got section,
8722 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8723 or to secondary GOT sections used by 32-bit -fPIC. */
8724 if (TARGET_64BIT)
8725 got = gen_rtx_REG (Pmode, 2);
8726 else
8727 {
8728 if (flag_pic == 1)
8729 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8730 else
8731 {
8732 rtx gsym = rs6000_got_sym ();
8733 got = gen_reg_rtx (Pmode);
8734 if (flag_pic == 0)
8735 rs6000_emit_move (got, gsym, Pmode);
8736 else
8737 {
8738 rtx mem, lab;
8739
8740 tmp1 = gen_reg_rtx (Pmode);
8741 tmp2 = gen_reg_rtx (Pmode);
8742 mem = gen_const_mem (Pmode, tmp1);
8743 lab = gen_label_rtx ();
8744 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8745 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8746 if (TARGET_LINK_STACK)
8747 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8748 emit_move_insn (tmp2, mem);
8749 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8750 set_unique_reg_note (last, REG_EQUAL, gsym);
8751 }
8752 }
8753 }
8754
8755 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8756 {
8757 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8758 UNSPEC_TLSGD);
8759 global_tlsarg = arg;
8760 rtx argreg = const0_rtx;
8761 if (TARGET_TLS_MARKERS)
8762 {
8763 argreg = gen_rtx_REG (Pmode, 3);
8764 emit_insn (gen_rtx_SET (argreg, arg));
8765 }
8766
8767 tga = rs6000_tls_get_addr ();
8768 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8769 argreg, Pmode);
8770 global_tlsarg = NULL_RTX;
8771
8772 edit_tls_call_insn (arg);
8773 }
8774 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8775 {
8776 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got),
8777 UNSPEC_TLSLD);
8778 global_tlsarg = arg;
8779 rtx argreg = const0_rtx;
8780 if (TARGET_TLS_MARKERS)
8781 {
8782 argreg = gen_rtx_REG (Pmode, 3);
8783 emit_insn (gen_rtx_SET (argreg, arg));
8784 }
8785
8786 tga = rs6000_tls_get_addr ();
8787 tmp1 = gen_reg_rtx (Pmode);
8788 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8789 argreg, Pmode);
8790 global_tlsarg = NULL_RTX;
8791
8792 edit_tls_call_insn (arg);
8793
8794 if (rs6000_tls_size == 16)
8795 {
8796 if (TARGET_64BIT)
8797 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8798 else
8799 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8800 }
8801 else if (rs6000_tls_size == 32)
8802 {
8803 tmp2 = gen_reg_rtx (Pmode);
8804 if (TARGET_64BIT)
8805 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8806 else
8807 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8808 emit_insn (insn);
8809 if (TARGET_64BIT)
8810 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8811 else
8812 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8813 }
8814 else
8815 {
8816 tmp2 = gen_reg_rtx (Pmode);
8817 if (TARGET_64BIT)
8818 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8819 else
8820 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8821 emit_insn (insn);
8822 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8823 }
8824 emit_insn (insn);
8825 }
8826 else
8827 {
8828 /* IE, or 64-bit offset LE. */
8829 tmp2 = gen_reg_rtx (Pmode);
8830 if (TARGET_64BIT)
8831 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8832 else
8833 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8834 emit_insn (insn);
8835 if (TARGET_64BIT)
8836 insn = gen_tls_tls_64 (dest, tmp2, addr);
8837 else
8838 insn = gen_tls_tls_32 (dest, tmp2, addr);
8839 emit_insn (insn);
8840 }
8841 }
8842
8843 return dest;
8844 }
8845
8846 /* Only create the global variable for the stack protect guard if we are using
8847 the global flavor of that guard. */
8848 static tree
8849 rs6000_init_stack_protect_guard (void)
8850 {
8851 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8852 return default_stack_protect_guard ();
8853
8854 return NULL_TREE;
8855 }
8856
8857 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8858
8859 static bool
8860 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8861 {
8862 if (GET_CODE (x) == HIGH
8863 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8864 return true;
8865
8866 /* A TLS symbol in the TOC cannot contain a sum. */
8867 if (GET_CODE (x) == CONST
8868 && GET_CODE (XEXP (x, 0)) == PLUS
8869 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8870 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8871 return true;
8872
8873 /* Do not place an ELF TLS symbol in the constant pool. */
8874 return TARGET_ELF && tls_referenced_p (x);
8875 }
8876
8877 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8878 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8879 can be addressed relative to the toc pointer. */
8880
8881 static bool
8882 use_toc_relative_ref (rtx sym, machine_mode mode)
8883 {
8884 return ((constant_pool_expr_p (sym)
8885 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8886 get_pool_mode (sym)))
8887 || (TARGET_CMODEL == CMODEL_MEDIUM
8888 && SYMBOL_REF_LOCAL_P (sym)
8889 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8890 }
8891
8892 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8893 replace the input X, or the original X if no replacement is called for.
8894 The output parameter *WIN is 1 if the calling macro should goto WIN,
8895 0 if it should not.
8896
8897 For RS/6000, we wish to handle large displacements off a base
8898 register by splitting the addend across an addiu/addis and the mem insn.
8899 This cuts number of extra insns needed from 3 to 1.
8900
8901 On Darwin, we use this to generate code for floating point constants.
8902 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8903 The Darwin code is inside #if TARGET_MACHO because only then are the
8904 machopic_* functions defined. */
8905 static rtx
8906 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8907 int opnum, int type,
8908 int ind_levels ATTRIBUTE_UNUSED, int *win)
8909 {
8910 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8911 bool quad_offset_p = mode_supports_dq_form (mode);
8912
8913 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8914 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8915 if (reg_offset_p
8916 && opnum == 1
8917 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8918 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8919 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8920 && TARGET_P9_VECTOR)
8921 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8922 && TARGET_P9_VECTOR)))
8923 reg_offset_p = false;
8924
8925 /* We must recognize output that we have already generated ourselves. */
8926 if (GET_CODE (x) == PLUS
8927 && GET_CODE (XEXP (x, 0)) == PLUS
8928 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8929 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8930 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8931 {
8932 if (TARGET_DEBUG_ADDR)
8933 {
8934 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8935 debug_rtx (x);
8936 }
8937 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8938 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8939 opnum, (enum reload_type) type);
8940 *win = 1;
8941 return x;
8942 }
8943
8944 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8945 if (GET_CODE (x) == LO_SUM
8946 && GET_CODE (XEXP (x, 0)) == HIGH)
8947 {
8948 if (TARGET_DEBUG_ADDR)
8949 {
8950 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8951 debug_rtx (x);
8952 }
8953 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8954 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8955 opnum, (enum reload_type) type);
8956 *win = 1;
8957 return x;
8958 }
8959
8960 #if TARGET_MACHO
8961 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8962 && GET_CODE (x) == LO_SUM
8963 && GET_CODE (XEXP (x, 0)) == PLUS
8964 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8965 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8966 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8967 && machopic_operand_p (XEXP (x, 1)))
8968 {
8969 /* Result of previous invocation of this function on Darwin
8970 floating point constant. */
8971 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8972 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8973 opnum, (enum reload_type) type);
8974 *win = 1;
8975 return x;
8976 }
8977 #endif
8978
8979 if (TARGET_CMODEL != CMODEL_SMALL
8980 && reg_offset_p
8981 && !quad_offset_p
8982 && small_toc_ref (x, VOIDmode))
8983 {
8984 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8985 x = gen_rtx_LO_SUM (Pmode, hi, x);
8986 if (TARGET_DEBUG_ADDR)
8987 {
8988 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8989 debug_rtx (x);
8990 }
8991 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8992 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8993 opnum, (enum reload_type) type);
8994 *win = 1;
8995 return x;
8996 }
8997
8998 if (GET_CODE (x) == PLUS
8999 && REG_P (XEXP (x, 0))
9000 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9001 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9002 && CONST_INT_P (XEXP (x, 1))
9003 && reg_offset_p
9004 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9005 {
9006 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9007 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9008 HOST_WIDE_INT high
9009 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9010
9011 /* Check for 32-bit overflow or quad addresses with one of the
9012 four least significant bits set. */
9013 if (high + low != val
9014 || (quad_offset_p && (low & 0xf)))
9015 {
9016 *win = 0;
9017 return x;
9018 }
9019
9020 /* Reload the high part into a base reg; leave the low part
9021 in the mem directly. */
9022
9023 x = gen_rtx_PLUS (GET_MODE (x),
9024 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9025 GEN_INT (high)),
9026 GEN_INT (low));
9027
9028 if (TARGET_DEBUG_ADDR)
9029 {
9030 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9031 debug_rtx (x);
9032 }
9033 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9034 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9035 opnum, (enum reload_type) type);
9036 *win = 1;
9037 return x;
9038 }
9039
9040 if (GET_CODE (x) == SYMBOL_REF
9041 && reg_offset_p
9042 && !quad_offset_p
9043 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9044 #if TARGET_MACHO
9045 && DEFAULT_ABI == ABI_DARWIN
9046 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9047 && machopic_symbol_defined_p (x)
9048 #else
9049 && DEFAULT_ABI == ABI_V4
9050 && !flag_pic
9051 #endif
9052 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9053 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9054 without fprs.
9055 ??? Assume floating point reg based on mode? This assumption is
9056 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9057 where reload ends up doing a DFmode load of a constant from
9058 mem using two gprs. Unfortunately, at this point reload
9059 hasn't yet selected regs so poking around in reload data
9060 won't help and even if we could figure out the regs reliably,
9061 we'd still want to allow this transformation when the mem is
9062 naturally aligned. Since we say the address is good here, we
9063 can't disable offsets from LO_SUMs in mem_operand_gpr.
9064 FIXME: Allow offset from lo_sum for other modes too, when
9065 mem is sufficiently aligned.
9066
9067 Also disallow this if the type can go in VMX/Altivec registers, since
9068 those registers do not have d-form (reg+offset) address modes. */
9069 && !reg_addr[mode].scalar_in_vmx_p
9070 && mode != TFmode
9071 && mode != TDmode
9072 && mode != IFmode
9073 && mode != KFmode
9074 && (mode != TImode || !TARGET_VSX)
9075 && mode != PTImode
9076 && (mode != DImode || TARGET_POWERPC64)
9077 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9078 || TARGET_HARD_FLOAT))
9079 {
9080 #if TARGET_MACHO
9081 if (flag_pic)
9082 {
9083 rtx offset = machopic_gen_offset (x);
9084 x = gen_rtx_LO_SUM (GET_MODE (x),
9085 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9086 gen_rtx_HIGH (Pmode, offset)), offset);
9087 }
9088 else
9089 #endif
9090 x = gen_rtx_LO_SUM (GET_MODE (x),
9091 gen_rtx_HIGH (Pmode, x), x);
9092
9093 if (TARGET_DEBUG_ADDR)
9094 {
9095 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9096 debug_rtx (x);
9097 }
9098 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9099 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9100 opnum, (enum reload_type) type);
9101 *win = 1;
9102 return x;
9103 }
9104
9105 /* Reload an offset address wrapped by an AND that represents the
9106 masking of the lower bits. Strip the outer AND and let reload
9107 convert the offset address into an indirect address. For VSX,
9108 force reload to create the address with an AND in a separate
9109 register, because we can't guarantee an altivec register will
9110 be used. */
9111 if (VECTOR_MEM_ALTIVEC_P (mode)
9112 && GET_CODE (x) == AND
9113 && GET_CODE (XEXP (x, 0)) == PLUS
9114 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9115 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9116 && GET_CODE (XEXP (x, 1)) == CONST_INT
9117 && INTVAL (XEXP (x, 1)) == -16)
9118 {
9119 x = XEXP (x, 0);
9120 *win = 1;
9121 return x;
9122 }
9123
9124 if (TARGET_TOC
9125 && reg_offset_p
9126 && !quad_offset_p
9127 && GET_CODE (x) == SYMBOL_REF
9128 && use_toc_relative_ref (x, mode))
9129 {
9130 x = create_TOC_reference (x, NULL_RTX);
9131 if (TARGET_CMODEL != CMODEL_SMALL)
9132 {
9133 if (TARGET_DEBUG_ADDR)
9134 {
9135 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9136 debug_rtx (x);
9137 }
9138 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9139 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9140 opnum, (enum reload_type) type);
9141 }
9142 *win = 1;
9143 return x;
9144 }
9145 *win = 0;
9146 return x;
9147 }
9148
9149 /* Debug version of rs6000_legitimize_reload_address. */
9150 static rtx
9151 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9152 int opnum, int type,
9153 int ind_levels, int *win)
9154 {
9155 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9156 ind_levels, win);
9157 fprintf (stderr,
9158 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9159 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9160 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9161 debug_rtx (x);
9162
9163 if (x == ret)
9164 fprintf (stderr, "Same address returned\n");
9165 else if (!ret)
9166 fprintf (stderr, "NULL returned\n");
9167 else
9168 {
9169 fprintf (stderr, "New address:\n");
9170 debug_rtx (ret);
9171 }
9172
9173 return ret;
9174 }
9175
9176 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9177 that is a valid memory address for an instruction.
9178 The MODE argument is the machine mode for the MEM expression
9179 that wants to use this address.
9180
9181 On the RS/6000, there are four valid address: a SYMBOL_REF that
9182 refers to a constant pool entry of an address (or the sum of it
9183 plus a constant), a short (16-bit signed) constant plus a register,
9184 the sum of two registers, or a register indirect, possibly with an
9185 auto-increment. For DFmode, DDmode and DImode with a constant plus
9186 register, we must ensure that both words are addressable or PowerPC64
9187 with offset word aligned.
9188
9189 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9190 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9191 because adjacent memory cells are accessed by adding word-sized offsets
9192 during assembly output. */
9193 static bool
9194 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9195 {
9196 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9197 bool quad_offset_p = mode_supports_dq_form (mode);
9198
9199 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9200 if (VECTOR_MEM_ALTIVEC_P (mode)
9201 && GET_CODE (x) == AND
9202 && GET_CODE (XEXP (x, 1)) == CONST_INT
9203 && INTVAL (XEXP (x, 1)) == -16)
9204 x = XEXP (x, 0);
9205
9206 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9207 return 0;
9208 if (legitimate_indirect_address_p (x, reg_ok_strict))
9209 return 1;
9210 if (TARGET_UPDATE
9211 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9212 && mode_supports_pre_incdec_p (mode)
9213 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9214 return 1;
9215 /* Handle restricted vector d-form offsets in ISA 3.0. */
9216 if (quad_offset_p)
9217 {
9218 if (quad_address_p (x, mode, reg_ok_strict))
9219 return 1;
9220 }
9221 else if (virtual_stack_registers_memory_p (x))
9222 return 1;
9223
9224 else if (reg_offset_p)
9225 {
9226 if (legitimate_small_data_p (mode, x))
9227 return 1;
9228 if (legitimate_constant_pool_address_p (x, mode,
9229 reg_ok_strict || lra_in_progress))
9230 return 1;
9231 }
9232
9233 /* For TImode, if we have TImode in VSX registers, only allow register
9234 indirect addresses. This will allow the values to go in either GPRs
9235 or VSX registers without reloading. The vector types would tend to
9236 go into VSX registers, so we allow REG+REG, while TImode seems
9237 somewhat split, in that some uses are GPR based, and some VSX based. */
9238 /* FIXME: We could loosen this by changing the following to
9239 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9240 but currently we cannot allow REG+REG addressing for TImode. See
9241 PR72827 for complete details on how this ends up hoodwinking DSE. */
9242 if (mode == TImode && TARGET_VSX)
9243 return 0;
9244 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9245 if (! reg_ok_strict
9246 && reg_offset_p
9247 && GET_CODE (x) == PLUS
9248 && GET_CODE (XEXP (x, 0)) == REG
9249 && (XEXP (x, 0) == virtual_stack_vars_rtx
9250 || XEXP (x, 0) == arg_pointer_rtx)
9251 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9252 return 1;
9253 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9254 return 1;
9255 if (!FLOAT128_2REG_P (mode)
9256 && (TARGET_HARD_FLOAT
9257 || TARGET_POWERPC64
9258 || (mode != DFmode && mode != DDmode))
9259 && (TARGET_POWERPC64 || mode != DImode)
9260 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9261 && mode != PTImode
9262 && !avoiding_indexed_address_p (mode)
9263 && legitimate_indexed_address_p (x, reg_ok_strict))
9264 return 1;
9265 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9266 && mode_supports_pre_modify_p (mode)
9267 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9268 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9269 reg_ok_strict, false)
9270 || (!avoiding_indexed_address_p (mode)
9271 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9272 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9273 return 1;
9274 if (reg_offset_p && !quad_offset_p
9275 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9276 return 1;
9277 return 0;
9278 }
9279
9280 /* Debug version of rs6000_legitimate_address_p. */
9281 static bool
9282 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9283 bool reg_ok_strict)
9284 {
9285 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9286 fprintf (stderr,
9287 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9288 "strict = %d, reload = %s, code = %s\n",
9289 ret ? "true" : "false",
9290 GET_MODE_NAME (mode),
9291 reg_ok_strict,
9292 (reload_completed ? "after" : "before"),
9293 GET_RTX_NAME (GET_CODE (x)));
9294 debug_rtx (x);
9295
9296 return ret;
9297 }
9298
9299 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9300
9301 static bool
9302 rs6000_mode_dependent_address_p (const_rtx addr,
9303 addr_space_t as ATTRIBUTE_UNUSED)
9304 {
9305 return rs6000_mode_dependent_address_ptr (addr);
9306 }
9307
9308 /* Go to LABEL if ADDR (a legitimate address expression)
9309 has an effect that depends on the machine mode it is used for.
9310
9311 On the RS/6000 this is true of all integral offsets (since AltiVec
9312 and VSX modes don't allow them) or is a pre-increment or decrement.
9313
9314 ??? Except that due to conceptual problems in offsettable_address_p
9315 we can't really report the problems of integral offsets. So leave
9316 this assuming that the adjustable offset must be valid for the
9317 sub-words of a TFmode operand, which is what we had before. */
9318
9319 static bool
9320 rs6000_mode_dependent_address (const_rtx addr)
9321 {
9322 switch (GET_CODE (addr))
9323 {
9324 case PLUS:
9325 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9326 is considered a legitimate address before reload, so there
9327 are no offset restrictions in that case. Note that this
9328 condition is safe in strict mode because any address involving
9329 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9330 been rejected as illegitimate. */
9331 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9332 && XEXP (addr, 0) != arg_pointer_rtx
9333 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9334 {
9335 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9336 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9337 }
9338 break;
9339
9340 case LO_SUM:
9341 /* Anything in the constant pool is sufficiently aligned that
9342 all bytes have the same high part address. */
9343 return !legitimate_constant_pool_address_p (addr, QImode, false);
9344
9345 /* Auto-increment cases are now treated generically in recog.c. */
9346 case PRE_MODIFY:
9347 return TARGET_UPDATE;
9348
9349 /* AND is only allowed in Altivec loads. */
9350 case AND:
9351 return true;
9352
9353 default:
9354 break;
9355 }
9356
9357 return false;
9358 }
9359
9360 /* Debug version of rs6000_mode_dependent_address. */
9361 static bool
9362 rs6000_debug_mode_dependent_address (const_rtx addr)
9363 {
9364 bool ret = rs6000_mode_dependent_address (addr);
9365
9366 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9367 ret ? "true" : "false");
9368 debug_rtx (addr);
9369
9370 return ret;
9371 }
9372
9373 /* Implement FIND_BASE_TERM. */
9374
9375 rtx
9376 rs6000_find_base_term (rtx op)
9377 {
9378 rtx base;
9379
9380 base = op;
9381 if (GET_CODE (base) == CONST)
9382 base = XEXP (base, 0);
9383 if (GET_CODE (base) == PLUS)
9384 base = XEXP (base, 0);
9385 if (GET_CODE (base) == UNSPEC)
9386 switch (XINT (base, 1))
9387 {
9388 case UNSPEC_TOCREL:
9389 case UNSPEC_MACHOPIC_OFFSET:
9390 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9391 for aliasing purposes. */
9392 return XVECEXP (base, 0, 0);
9393 }
9394
9395 return op;
9396 }
9397
9398 /* More elaborate version of recog's offsettable_memref_p predicate
9399 that works around the ??? note of rs6000_mode_dependent_address.
9400 In particular it accepts
9401
9402 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9403
9404 in 32-bit mode, that the recog predicate rejects. */
9405
9406 static bool
9407 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9408 {
9409 bool worst_case;
9410
9411 if (!MEM_P (op))
9412 return false;
9413
9414 /* First mimic offsettable_memref_p. */
9415 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9416 return true;
9417
9418 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9419 the latter predicate knows nothing about the mode of the memory
9420 reference and, therefore, assumes that it is the largest supported
9421 mode (TFmode). As a consequence, legitimate offsettable memory
9422 references are rejected. rs6000_legitimate_offset_address_p contains
9423 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9424 at least with a little bit of help here given that we know the
9425 actual registers used. */
9426 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9427 || GET_MODE_SIZE (reg_mode) == 4);
9428 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9429 strict, worst_case);
9430 }
9431
9432 /* Determine the reassociation width to be used in reassociate_bb.
9433 This takes into account how many parallel operations we
9434 can actually do of a given type, and also the latency.
9435 P8:
9436 int add/sub 6/cycle
9437 mul 2/cycle
9438 vect add/sub/mul 2/cycle
9439 fp add/sub/mul 2/cycle
9440 dfp 1/cycle
9441 */
9442
9443 static int
9444 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9445 machine_mode mode)
9446 {
9447 switch (rs6000_tune)
9448 {
9449 case PROCESSOR_POWER8:
9450 case PROCESSOR_POWER9:
9451 if (DECIMAL_FLOAT_MODE_P (mode))
9452 return 1;
9453 if (VECTOR_MODE_P (mode))
9454 return 4;
9455 if (INTEGRAL_MODE_P (mode))
9456 return 1;
9457 if (FLOAT_MODE_P (mode))
9458 return 4;
9459 break;
9460 default:
9461 break;
9462 }
9463 return 1;
9464 }
9465
9466 /* Change register usage conditional on target flags. */
9467 static void
9468 rs6000_conditional_register_usage (void)
9469 {
9470 int i;
9471
9472 if (TARGET_DEBUG_TARGET)
9473 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9474
9475 /* Set MQ register fixed (already call_used) so that it will not be
9476 allocated. */
9477 fixed_regs[64] = 1;
9478
9479 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9480 if (TARGET_64BIT)
9481 fixed_regs[13] = call_used_regs[13]
9482 = call_really_used_regs[13] = 1;
9483
9484 /* Conditionally disable FPRs. */
9485 if (TARGET_SOFT_FLOAT)
9486 for (i = 32; i < 64; i++)
9487 fixed_regs[i] = call_used_regs[i]
9488 = call_really_used_regs[i] = 1;
9489
9490 /* The TOC register is not killed across calls in a way that is
9491 visible to the compiler. */
9492 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9493 call_really_used_regs[2] = 0;
9494
9495 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9496 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9497
9498 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9499 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9500 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9501 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9502
9503 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9504 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9505 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9506 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9507
9508 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9509 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9510 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9511
9512 if (!TARGET_ALTIVEC && !TARGET_VSX)
9513 {
9514 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9515 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9516 call_really_used_regs[VRSAVE_REGNO] = 1;
9517 }
9518
9519 if (TARGET_ALTIVEC || TARGET_VSX)
9520 global_regs[VSCR_REGNO] = 1;
9521
9522 if (TARGET_ALTIVEC_ABI)
9523 {
9524 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9525 call_used_regs[i] = call_really_used_regs[i] = 1;
9526
9527 /* AIX reserves VR20:31 in non-extended ABI mode. */
9528 if (TARGET_XCOFF)
9529 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9530 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9531 }
9532 }
9533
9534 \f
9535 /* Output insns to set DEST equal to the constant SOURCE as a series of
9536 lis, ori and shl instructions and return TRUE. */
9537
9538 bool
9539 rs6000_emit_set_const (rtx dest, rtx source)
9540 {
9541 machine_mode mode = GET_MODE (dest);
9542 rtx temp, set;
9543 rtx_insn *insn;
9544 HOST_WIDE_INT c;
9545
9546 gcc_checking_assert (CONST_INT_P (source));
9547 c = INTVAL (source);
9548 switch (mode)
9549 {
9550 case E_QImode:
9551 case E_HImode:
9552 emit_insn (gen_rtx_SET (dest, source));
9553 return true;
9554
9555 case E_SImode:
9556 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9557
9558 emit_insn (gen_rtx_SET (copy_rtx (temp),
9559 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9560 emit_insn (gen_rtx_SET (dest,
9561 gen_rtx_IOR (SImode, copy_rtx (temp),
9562 GEN_INT (c & 0xffff))));
9563 break;
9564
9565 case E_DImode:
9566 if (!TARGET_POWERPC64)
9567 {
9568 rtx hi, lo;
9569
9570 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9571 DImode);
9572 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9573 DImode);
9574 emit_move_insn (hi, GEN_INT (c >> 32));
9575 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9576 emit_move_insn (lo, GEN_INT (c));
9577 }
9578 else
9579 rs6000_emit_set_long_const (dest, c);
9580 break;
9581
9582 default:
9583 gcc_unreachable ();
9584 }
9585
9586 insn = get_last_insn ();
9587 set = single_set (insn);
9588 if (! CONSTANT_P (SET_SRC (set)))
9589 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9590
9591 return true;
9592 }
9593
9594 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9595 Output insns to set DEST equal to the constant C as a series of
9596 lis, ori and shl instructions. */
9597
9598 static void
9599 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9600 {
9601 rtx temp;
9602 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9603
9604 ud1 = c & 0xffff;
9605 c = c >> 16;
9606 ud2 = c & 0xffff;
9607 c = c >> 16;
9608 ud3 = c & 0xffff;
9609 c = c >> 16;
9610 ud4 = c & 0xffff;
9611
9612 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9613 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9614 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9615
9616 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9617 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9618 {
9619 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9620
9621 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9622 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9623 if (ud1 != 0)
9624 emit_move_insn (dest,
9625 gen_rtx_IOR (DImode, copy_rtx (temp),
9626 GEN_INT (ud1)));
9627 }
9628 else if (ud3 == 0 && ud4 == 0)
9629 {
9630 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9631
9632 gcc_assert (ud2 & 0x8000);
9633 emit_move_insn (copy_rtx (temp),
9634 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9635 if (ud1 != 0)
9636 emit_move_insn (copy_rtx (temp),
9637 gen_rtx_IOR (DImode, copy_rtx (temp),
9638 GEN_INT (ud1)));
9639 emit_move_insn (dest,
9640 gen_rtx_ZERO_EXTEND (DImode,
9641 gen_lowpart (SImode,
9642 copy_rtx (temp))));
9643 }
9644 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9645 || (ud4 == 0 && ! (ud3 & 0x8000)))
9646 {
9647 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9648
9649 emit_move_insn (copy_rtx (temp),
9650 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9651 if (ud2 != 0)
9652 emit_move_insn (copy_rtx (temp),
9653 gen_rtx_IOR (DImode, copy_rtx (temp),
9654 GEN_INT (ud2)));
9655 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9656 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9657 GEN_INT (16)));
9658 if (ud1 != 0)
9659 emit_move_insn (dest,
9660 gen_rtx_IOR (DImode, copy_rtx (temp),
9661 GEN_INT (ud1)));
9662 }
9663 else
9664 {
9665 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9666
9667 emit_move_insn (copy_rtx (temp),
9668 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9669 if (ud3 != 0)
9670 emit_move_insn (copy_rtx (temp),
9671 gen_rtx_IOR (DImode, copy_rtx (temp),
9672 GEN_INT (ud3)));
9673
9674 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9675 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9676 GEN_INT (32)));
9677 if (ud2 != 0)
9678 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9679 gen_rtx_IOR (DImode, copy_rtx (temp),
9680 GEN_INT (ud2 << 16)));
9681 if (ud1 != 0)
9682 emit_move_insn (dest,
9683 gen_rtx_IOR (DImode, copy_rtx (temp),
9684 GEN_INT (ud1)));
9685 }
9686 }
9687
9688 /* Helper for the following. Get rid of [r+r] memory refs
9689 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9690
9691 static void
9692 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9693 {
9694 if (GET_CODE (operands[0]) == MEM
9695 && GET_CODE (XEXP (operands[0], 0)) != REG
9696 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9697 GET_MODE (operands[0]), false))
9698 operands[0]
9699 = replace_equiv_address (operands[0],
9700 copy_addr_to_reg (XEXP (operands[0], 0)));
9701
9702 if (GET_CODE (operands[1]) == MEM
9703 && GET_CODE (XEXP (operands[1], 0)) != REG
9704 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9705 GET_MODE (operands[1]), false))
9706 operands[1]
9707 = replace_equiv_address (operands[1],
9708 copy_addr_to_reg (XEXP (operands[1], 0)));
9709 }
9710
9711 /* Generate a vector of constants to permute MODE for a little-endian
9712 storage operation by swapping the two halves of a vector. */
9713 static rtvec
9714 rs6000_const_vec (machine_mode mode)
9715 {
9716 int i, subparts;
9717 rtvec v;
9718
9719 switch (mode)
9720 {
9721 case E_V1TImode:
9722 subparts = 1;
9723 break;
9724 case E_V2DFmode:
9725 case E_V2DImode:
9726 subparts = 2;
9727 break;
9728 case E_V4SFmode:
9729 case E_V4SImode:
9730 subparts = 4;
9731 break;
9732 case E_V8HImode:
9733 subparts = 8;
9734 break;
9735 case E_V16QImode:
9736 subparts = 16;
9737 break;
9738 default:
9739 gcc_unreachable();
9740 }
9741
9742 v = rtvec_alloc (subparts);
9743
9744 for (i = 0; i < subparts / 2; ++i)
9745 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9746 for (i = subparts / 2; i < subparts; ++i)
9747 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9748
9749 return v;
9750 }
9751
9752 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9753 store operation. */
9754 void
9755 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9756 {
9757 /* Scalar permutations are easier to express in integer modes rather than
9758 floating-point modes, so cast them here. We use V1TImode instead
9759 of TImode to ensure that the values don't go through GPRs. */
9760 if (FLOAT128_VECTOR_P (mode))
9761 {
9762 dest = gen_lowpart (V1TImode, dest);
9763 source = gen_lowpart (V1TImode, source);
9764 mode = V1TImode;
9765 }
9766
9767 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9768 scalar. */
9769 if (mode == TImode || mode == V1TImode)
9770 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9771 GEN_INT (64))));
9772 else
9773 {
9774 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9775 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9776 }
9777 }
9778
9779 /* Emit a little-endian load from vector memory location SOURCE to VSX
9780 register DEST in mode MODE. The load is done with two permuting
9781 insn's that represent an lxvd2x and xxpermdi. */
9782 void
9783 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9784 {
9785 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9786 V1TImode). */
9787 if (mode == TImode || mode == V1TImode)
9788 {
9789 mode = V2DImode;
9790 dest = gen_lowpart (V2DImode, dest);
9791 source = adjust_address (source, V2DImode, 0);
9792 }
9793
9794 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9795 rs6000_emit_le_vsx_permute (tmp, source, mode);
9796 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9797 }
9798
9799 /* Emit a little-endian store to vector memory location DEST from VSX
9800 register SOURCE in mode MODE. The store is done with two permuting
9801 insn's that represent an xxpermdi and an stxvd2x. */
9802 void
9803 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9804 {
9805 /* This should never be called during or after LRA, because it does
9806 not re-permute the source register. It is intended only for use
9807 during expand. */
9808 gcc_assert (!lra_in_progress && !reload_completed);
9809
9810 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9811 V1TImode). */
9812 if (mode == TImode || mode == V1TImode)
9813 {
9814 mode = V2DImode;
9815 dest = adjust_address (dest, V2DImode, 0);
9816 source = gen_lowpart (V2DImode, source);
9817 }
9818
9819 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9820 rs6000_emit_le_vsx_permute (tmp, source, mode);
9821 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9822 }
9823
9824 /* Emit a sequence representing a little-endian VSX load or store,
9825 moving data from SOURCE to DEST in mode MODE. This is done
9826 separately from rs6000_emit_move to ensure it is called only
9827 during expand. LE VSX loads and stores introduced later are
9828 handled with a split. The expand-time RTL generation allows
9829 us to optimize away redundant pairs of register-permutes. */
9830 void
9831 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9832 {
9833 gcc_assert (!BYTES_BIG_ENDIAN
9834 && VECTOR_MEM_VSX_P (mode)
9835 && !TARGET_P9_VECTOR
9836 && !gpr_or_gpr_p (dest, source)
9837 && (MEM_P (source) ^ MEM_P (dest)));
9838
9839 if (MEM_P (source))
9840 {
9841 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9842 rs6000_emit_le_vsx_load (dest, source, mode);
9843 }
9844 else
9845 {
9846 if (!REG_P (source))
9847 source = force_reg (mode, source);
9848 rs6000_emit_le_vsx_store (dest, source, mode);
9849 }
9850 }
9851
9852 /* Return whether a SFmode or SImode move can be done without converting one
9853 mode to another. This arrises when we have:
9854
9855 (SUBREG:SF (REG:SI ...))
9856 (SUBREG:SI (REG:SF ...))
9857
9858 and one of the values is in a floating point/vector register, where SFmode
9859 scalars are stored in DFmode format. */
9860
9861 bool
9862 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9863 {
9864 if (TARGET_ALLOW_SF_SUBREG)
9865 return true;
9866
9867 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9868 return true;
9869
9870 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9871 return true;
9872
9873 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9874 if (SUBREG_P (dest))
9875 {
9876 rtx dest_subreg = SUBREG_REG (dest);
9877 rtx src_subreg = SUBREG_REG (src);
9878 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9879 }
9880
9881 return false;
9882 }
9883
9884
9885 /* Helper function to change moves with:
9886
9887 (SUBREG:SF (REG:SI)) and
9888 (SUBREG:SI (REG:SF))
9889
9890 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9891 values are stored as DFmode values in the VSX registers. We need to convert
9892 the bits before we can use a direct move or operate on the bits in the
9893 vector register as an integer type.
9894
9895 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9896
9897 static bool
9898 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9899 {
9900 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
9901 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9902 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9903 {
9904 rtx inner_source = SUBREG_REG (source);
9905 machine_mode inner_mode = GET_MODE (inner_source);
9906
9907 if (mode == SImode && inner_mode == SFmode)
9908 {
9909 emit_insn (gen_movsi_from_sf (dest, inner_source));
9910 return true;
9911 }
9912
9913 if (mode == SFmode && inner_mode == SImode)
9914 {
9915 emit_insn (gen_movsf_from_si (dest, inner_source));
9916 return true;
9917 }
9918 }
9919
9920 return false;
9921 }
9922
9923 /* Emit a move from SOURCE to DEST in mode MODE. */
9924 void
9925 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9926 {
9927 rtx operands[2];
9928 operands[0] = dest;
9929 operands[1] = source;
9930
9931 if (TARGET_DEBUG_ADDR)
9932 {
9933 fprintf (stderr,
9934 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9935 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9936 GET_MODE_NAME (mode),
9937 lra_in_progress,
9938 reload_completed,
9939 can_create_pseudo_p ());
9940 debug_rtx (dest);
9941 fprintf (stderr, "source:\n");
9942 debug_rtx (source);
9943 }
9944
9945 /* Check that we get CONST_WIDE_INT only when we should. */
9946 if (CONST_WIDE_INT_P (operands[1])
9947 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9948 gcc_unreachable ();
9949
9950 #ifdef HAVE_AS_GNU_ATTRIBUTE
9951 /* If we use a long double type, set the flags in .gnu_attribute that say
9952 what the long double type is. This is to allow the linker's warning
9953 message for the wrong long double to be useful, even if the function does
9954 not do a call (for example, doing a 128-bit add on power9 if the long
9955 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9956 used if they aren't the default long dobule type. */
9957 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9958 {
9959 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9960 rs6000_passes_float = rs6000_passes_long_double = true;
9961
9962 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9963 rs6000_passes_float = rs6000_passes_long_double = true;
9964 }
9965 #endif
9966
9967 /* See if we need to special case SImode/SFmode SUBREG moves. */
9968 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9969 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9970 return;
9971
9972 /* Check if GCC is setting up a block move that will end up using FP
9973 registers as temporaries. We must make sure this is acceptable. */
9974 if (GET_CODE (operands[0]) == MEM
9975 && GET_CODE (operands[1]) == MEM
9976 && mode == DImode
9977 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9978 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9979 && ! (rs6000_slow_unaligned_access (SImode,
9980 (MEM_ALIGN (operands[0]) > 32
9981 ? 32 : MEM_ALIGN (operands[0])))
9982 || rs6000_slow_unaligned_access (SImode,
9983 (MEM_ALIGN (operands[1]) > 32
9984 ? 32 : MEM_ALIGN (operands[1]))))
9985 && ! MEM_VOLATILE_P (operands [0])
9986 && ! MEM_VOLATILE_P (operands [1]))
9987 {
9988 emit_move_insn (adjust_address (operands[0], SImode, 0),
9989 adjust_address (operands[1], SImode, 0));
9990 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9991 adjust_address (copy_rtx (operands[1]), SImode, 4));
9992 return;
9993 }
9994
9995 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9996 && !gpc_reg_operand (operands[1], mode))
9997 operands[1] = force_reg (mode, operands[1]);
9998
9999 /* Recognize the case where operand[1] is a reference to thread-local
10000 data and load its address to a register. */
10001 if (tls_referenced_p (operands[1]))
10002 {
10003 enum tls_model model;
10004 rtx tmp = operands[1];
10005 rtx addend = NULL;
10006
10007 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10008 {
10009 addend = XEXP (XEXP (tmp, 0), 1);
10010 tmp = XEXP (XEXP (tmp, 0), 0);
10011 }
10012
10013 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10014 model = SYMBOL_REF_TLS_MODEL (tmp);
10015 gcc_assert (model != 0);
10016
10017 tmp = rs6000_legitimize_tls_address (tmp, model);
10018 if (addend)
10019 {
10020 tmp = gen_rtx_PLUS (mode, tmp, addend);
10021 tmp = force_operand (tmp, operands[0]);
10022 }
10023 operands[1] = tmp;
10024 }
10025
10026 /* 128-bit constant floating-point values on Darwin should really be loaded
10027 as two parts. However, this premature splitting is a problem when DFmode
10028 values can go into Altivec registers. */
10029 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
10030 && !reg_addr[DFmode].scalar_in_vmx_p)
10031 {
10032 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10033 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10034 DFmode);
10035 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10036 GET_MODE_SIZE (DFmode)),
10037 simplify_gen_subreg (DFmode, operands[1], mode,
10038 GET_MODE_SIZE (DFmode)),
10039 DFmode);
10040 return;
10041 }
10042
10043 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10044 p1:SD) if p1 is not of floating point class and p0 is spilled as
10045 we can have no analogous movsd_store for this. */
10046 if (lra_in_progress && mode == DDmode
10047 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10048 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10049 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10050 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10051 {
10052 enum reg_class cl;
10053 int regno = REGNO (SUBREG_REG (operands[1]));
10054
10055 if (regno >= FIRST_PSEUDO_REGISTER)
10056 {
10057 cl = reg_preferred_class (regno);
10058 regno = reg_renumber[regno];
10059 if (regno < 0)
10060 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10061 }
10062 if (regno >= 0 && ! FP_REGNO_P (regno))
10063 {
10064 mode = SDmode;
10065 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10066 operands[1] = SUBREG_REG (operands[1]);
10067 }
10068 }
10069 if (lra_in_progress
10070 && mode == SDmode
10071 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10072 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10073 && (REG_P (operands[1])
10074 || (GET_CODE (operands[1]) == SUBREG
10075 && REG_P (SUBREG_REG (operands[1])))))
10076 {
10077 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10078 ? SUBREG_REG (operands[1]) : operands[1]);
10079 enum reg_class cl;
10080
10081 if (regno >= FIRST_PSEUDO_REGISTER)
10082 {
10083 cl = reg_preferred_class (regno);
10084 gcc_assert (cl != NO_REGS);
10085 regno = reg_renumber[regno];
10086 if (regno < 0)
10087 regno = ira_class_hard_regs[cl][0];
10088 }
10089 if (FP_REGNO_P (regno))
10090 {
10091 if (GET_MODE (operands[0]) != DDmode)
10092 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10093 emit_insn (gen_movsd_store (operands[0], operands[1]));
10094 }
10095 else if (INT_REGNO_P (regno))
10096 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10097 else
10098 gcc_unreachable();
10099 return;
10100 }
10101 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10102 p:DD)) if p0 is not of floating point class and p1 is spilled as
10103 we can have no analogous movsd_load for this. */
10104 if (lra_in_progress && mode == DDmode
10105 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10106 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10107 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10108 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10109 {
10110 enum reg_class cl;
10111 int regno = REGNO (SUBREG_REG (operands[0]));
10112
10113 if (regno >= FIRST_PSEUDO_REGISTER)
10114 {
10115 cl = reg_preferred_class (regno);
10116 regno = reg_renumber[regno];
10117 if (regno < 0)
10118 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10119 }
10120 if (regno >= 0 && ! FP_REGNO_P (regno))
10121 {
10122 mode = SDmode;
10123 operands[0] = SUBREG_REG (operands[0]);
10124 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10125 }
10126 }
10127 if (lra_in_progress
10128 && mode == SDmode
10129 && (REG_P (operands[0])
10130 || (GET_CODE (operands[0]) == SUBREG
10131 && REG_P (SUBREG_REG (operands[0]))))
10132 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10133 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10134 {
10135 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10136 ? SUBREG_REG (operands[0]) : operands[0]);
10137 enum reg_class cl;
10138
10139 if (regno >= FIRST_PSEUDO_REGISTER)
10140 {
10141 cl = reg_preferred_class (regno);
10142 gcc_assert (cl != NO_REGS);
10143 regno = reg_renumber[regno];
10144 if (regno < 0)
10145 regno = ira_class_hard_regs[cl][0];
10146 }
10147 if (FP_REGNO_P (regno))
10148 {
10149 if (GET_MODE (operands[1]) != DDmode)
10150 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10151 emit_insn (gen_movsd_load (operands[0], operands[1]));
10152 }
10153 else if (INT_REGNO_P (regno))
10154 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10155 else
10156 gcc_unreachable();
10157 return;
10158 }
10159
10160 /* FIXME: In the long term, this switch statement should go away
10161 and be replaced by a sequence of tests based on things like
10162 mode == Pmode. */
10163 switch (mode)
10164 {
10165 case E_HImode:
10166 case E_QImode:
10167 if (CONSTANT_P (operands[1])
10168 && GET_CODE (operands[1]) != CONST_INT)
10169 operands[1] = force_const_mem (mode, operands[1]);
10170 break;
10171
10172 case E_TFmode:
10173 case E_TDmode:
10174 case E_IFmode:
10175 case E_KFmode:
10176 if (FLOAT128_2REG_P (mode))
10177 rs6000_eliminate_indexed_memrefs (operands);
10178 /* fall through */
10179
10180 case E_DFmode:
10181 case E_DDmode:
10182 case E_SFmode:
10183 case E_SDmode:
10184 if (CONSTANT_P (operands[1])
10185 && ! easy_fp_constant (operands[1], mode))
10186 operands[1] = force_const_mem (mode, operands[1]);
10187 break;
10188
10189 case E_V16QImode:
10190 case E_V8HImode:
10191 case E_V4SFmode:
10192 case E_V4SImode:
10193 case E_V2DFmode:
10194 case E_V2DImode:
10195 case E_V1TImode:
10196 if (CONSTANT_P (operands[1])
10197 && !easy_vector_constant (operands[1], mode))
10198 operands[1] = force_const_mem (mode, operands[1]);
10199 break;
10200
10201 case E_SImode:
10202 case E_DImode:
10203 /* Use default pattern for address of ELF small data */
10204 if (TARGET_ELF
10205 && mode == Pmode
10206 && DEFAULT_ABI == ABI_V4
10207 && (GET_CODE (operands[1]) == SYMBOL_REF
10208 || GET_CODE (operands[1]) == CONST)
10209 && small_data_operand (operands[1], mode))
10210 {
10211 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10212 return;
10213 }
10214
10215 if (DEFAULT_ABI == ABI_V4
10216 && mode == Pmode && mode == SImode
10217 && flag_pic == 1 && got_operand (operands[1], mode))
10218 {
10219 emit_insn (gen_movsi_got (operands[0], operands[1]));
10220 return;
10221 }
10222
10223 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10224 && TARGET_NO_TOC
10225 && ! flag_pic
10226 && mode == Pmode
10227 && CONSTANT_P (operands[1])
10228 && GET_CODE (operands[1]) != HIGH
10229 && GET_CODE (operands[1]) != CONST_INT)
10230 {
10231 rtx target = (!can_create_pseudo_p ()
10232 ? operands[0]
10233 : gen_reg_rtx (mode));
10234
10235 /* If this is a function address on -mcall-aixdesc,
10236 convert it to the address of the descriptor. */
10237 if (DEFAULT_ABI == ABI_AIX
10238 && GET_CODE (operands[1]) == SYMBOL_REF
10239 && XSTR (operands[1], 0)[0] == '.')
10240 {
10241 const char *name = XSTR (operands[1], 0);
10242 rtx new_ref;
10243 while (*name == '.')
10244 name++;
10245 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10246 CONSTANT_POOL_ADDRESS_P (new_ref)
10247 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10248 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10249 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10250 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10251 operands[1] = new_ref;
10252 }
10253
10254 if (DEFAULT_ABI == ABI_DARWIN)
10255 {
10256 #if TARGET_MACHO
10257 if (MACHO_DYNAMIC_NO_PIC_P)
10258 {
10259 /* Take care of any required data indirection. */
10260 operands[1] = rs6000_machopic_legitimize_pic_address (
10261 operands[1], mode, operands[0]);
10262 if (operands[0] != operands[1])
10263 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10264 return;
10265 }
10266 #endif
10267 emit_insn (gen_macho_high (target, operands[1]));
10268 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10269 return;
10270 }
10271
10272 emit_insn (gen_elf_high (target, operands[1]));
10273 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10274 return;
10275 }
10276
10277 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10278 and we have put it in the TOC, we just need to make a TOC-relative
10279 reference to it. */
10280 if (TARGET_TOC
10281 && GET_CODE (operands[1]) == SYMBOL_REF
10282 && use_toc_relative_ref (operands[1], mode))
10283 operands[1] = create_TOC_reference (operands[1], operands[0]);
10284 else if (mode == Pmode
10285 && CONSTANT_P (operands[1])
10286 && GET_CODE (operands[1]) != HIGH
10287 && ((REG_P (operands[0])
10288 && FP_REGNO_P (REGNO (operands[0])))
10289 || !CONST_INT_P (operands[1])
10290 || (num_insns_constant (operands[1], mode)
10291 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10292 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10293 && (TARGET_CMODEL == CMODEL_SMALL
10294 || can_create_pseudo_p ()
10295 || (REG_P (operands[0])
10296 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10297 {
10298
10299 #if TARGET_MACHO
10300 /* Darwin uses a special PIC legitimizer. */
10301 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10302 {
10303 operands[1] =
10304 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10305 operands[0]);
10306 if (operands[0] != operands[1])
10307 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10308 return;
10309 }
10310 #endif
10311
10312 /* If we are to limit the number of things we put in the TOC and
10313 this is a symbol plus a constant we can add in one insn,
10314 just put the symbol in the TOC and add the constant. */
10315 if (GET_CODE (operands[1]) == CONST
10316 && TARGET_NO_SUM_IN_TOC
10317 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10318 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10319 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10320 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10321 && ! side_effects_p (operands[0]))
10322 {
10323 rtx sym =
10324 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10325 rtx other = XEXP (XEXP (operands[1], 0), 1);
10326
10327 sym = force_reg (mode, sym);
10328 emit_insn (gen_add3_insn (operands[0], sym, other));
10329 return;
10330 }
10331
10332 operands[1] = force_const_mem (mode, operands[1]);
10333
10334 if (TARGET_TOC
10335 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10336 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10337 {
10338 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10339 operands[0]);
10340 operands[1] = gen_const_mem (mode, tocref);
10341 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10342 }
10343 }
10344 break;
10345
10346 case E_TImode:
10347 if (!VECTOR_MEM_VSX_P (TImode))
10348 rs6000_eliminate_indexed_memrefs (operands);
10349 break;
10350
10351 case E_PTImode:
10352 rs6000_eliminate_indexed_memrefs (operands);
10353 break;
10354
10355 default:
10356 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10357 }
10358
10359 /* Above, we may have called force_const_mem which may have returned
10360 an invalid address. If we can, fix this up; otherwise, reload will
10361 have to deal with it. */
10362 if (GET_CODE (operands[1]) == MEM)
10363 operands[1] = validize_mem (operands[1]);
10364
10365 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10366 }
10367 \f
10368 /* Nonzero if we can use a floating-point register to pass this arg. */
10369 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10370 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10371 && (CUM)->fregno <= FP_ARG_MAX_REG \
10372 && TARGET_HARD_FLOAT)
10373
10374 /* Nonzero if we can use an AltiVec register to pass this arg. */
10375 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10376 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10377 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10378 && TARGET_ALTIVEC_ABI \
10379 && (NAMED))
10380
10381 /* Walk down the type tree of TYPE counting consecutive base elements.
10382 If *MODEP is VOIDmode, then set it to the first valid floating point
10383 or vector type. If a non-floating point or vector type is found, or
10384 if a floating point or vector type that doesn't match a non-VOIDmode
10385 *MODEP is found, then return -1, otherwise return the count in the
10386 sub-tree. */
10387
10388 static int
10389 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10390 {
10391 machine_mode mode;
10392 HOST_WIDE_INT size;
10393
10394 switch (TREE_CODE (type))
10395 {
10396 case REAL_TYPE:
10397 mode = TYPE_MODE (type);
10398 if (!SCALAR_FLOAT_MODE_P (mode))
10399 return -1;
10400
10401 if (*modep == VOIDmode)
10402 *modep = mode;
10403
10404 if (*modep == mode)
10405 return 1;
10406
10407 break;
10408
10409 case COMPLEX_TYPE:
10410 mode = TYPE_MODE (TREE_TYPE (type));
10411 if (!SCALAR_FLOAT_MODE_P (mode))
10412 return -1;
10413
10414 if (*modep == VOIDmode)
10415 *modep = mode;
10416
10417 if (*modep == mode)
10418 return 2;
10419
10420 break;
10421
10422 case VECTOR_TYPE:
10423 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10424 return -1;
10425
10426 /* Use V4SImode as representative of all 128-bit vector types. */
10427 size = int_size_in_bytes (type);
10428 switch (size)
10429 {
10430 case 16:
10431 mode = V4SImode;
10432 break;
10433 default:
10434 return -1;
10435 }
10436
10437 if (*modep == VOIDmode)
10438 *modep = mode;
10439
10440 /* Vector modes are considered to be opaque: two vectors are
10441 equivalent for the purposes of being homogeneous aggregates
10442 if they are the same size. */
10443 if (*modep == mode)
10444 return 1;
10445
10446 break;
10447
10448 case ARRAY_TYPE:
10449 {
10450 int count;
10451 tree index = TYPE_DOMAIN (type);
10452
10453 /* Can't handle incomplete types nor sizes that are not
10454 fixed. */
10455 if (!COMPLETE_TYPE_P (type)
10456 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10457 return -1;
10458
10459 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10460 if (count == -1
10461 || !index
10462 || !TYPE_MAX_VALUE (index)
10463 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10464 || !TYPE_MIN_VALUE (index)
10465 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10466 || count < 0)
10467 return -1;
10468
10469 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10470 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10471
10472 /* There must be no padding. */
10473 if (wi::to_wide (TYPE_SIZE (type))
10474 != count * GET_MODE_BITSIZE (*modep))
10475 return -1;
10476
10477 return count;
10478 }
10479
10480 case RECORD_TYPE:
10481 {
10482 int count = 0;
10483 int sub_count;
10484 tree field;
10485
10486 /* Can't handle incomplete types nor sizes that are not
10487 fixed. */
10488 if (!COMPLETE_TYPE_P (type)
10489 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10490 return -1;
10491
10492 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10493 {
10494 if (TREE_CODE (field) != FIELD_DECL)
10495 continue;
10496
10497 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10498 if (sub_count < 0)
10499 return -1;
10500 count += sub_count;
10501 }
10502
10503 /* There must be no padding. */
10504 if (wi::to_wide (TYPE_SIZE (type))
10505 != count * GET_MODE_BITSIZE (*modep))
10506 return -1;
10507
10508 return count;
10509 }
10510
10511 case UNION_TYPE:
10512 case QUAL_UNION_TYPE:
10513 {
10514 /* These aren't very interesting except in a degenerate case. */
10515 int count = 0;
10516 int sub_count;
10517 tree field;
10518
10519 /* Can't handle incomplete types nor sizes that are not
10520 fixed. */
10521 if (!COMPLETE_TYPE_P (type)
10522 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10523 return -1;
10524
10525 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10526 {
10527 if (TREE_CODE (field) != FIELD_DECL)
10528 continue;
10529
10530 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10531 if (sub_count < 0)
10532 return -1;
10533 count = count > sub_count ? count : sub_count;
10534 }
10535
10536 /* There must be no padding. */
10537 if (wi::to_wide (TYPE_SIZE (type))
10538 != count * GET_MODE_BITSIZE (*modep))
10539 return -1;
10540
10541 return count;
10542 }
10543
10544 default:
10545 break;
10546 }
10547
10548 return -1;
10549 }
10550
10551 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10552 float or vector aggregate that shall be passed in FP/vector registers
10553 according to the ELFv2 ABI, return the homogeneous element mode in
10554 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10555
10556 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10557
10558 static bool
10559 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10560 machine_mode *elt_mode,
10561 int *n_elts)
10562 {
10563 /* Note that we do not accept complex types at the top level as
10564 homogeneous aggregates; these types are handled via the
10565 targetm.calls.split_complex_arg mechanism. Complex types
10566 can be elements of homogeneous aggregates, however. */
10567 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10568 && AGGREGATE_TYPE_P (type))
10569 {
10570 machine_mode field_mode = VOIDmode;
10571 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10572
10573 if (field_count > 0)
10574 {
10575 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10576 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10577
10578 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10579 up to AGGR_ARG_NUM_REG registers. */
10580 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10581 {
10582 if (elt_mode)
10583 *elt_mode = field_mode;
10584 if (n_elts)
10585 *n_elts = field_count;
10586 return true;
10587 }
10588 }
10589 }
10590
10591 if (elt_mode)
10592 *elt_mode = mode;
10593 if (n_elts)
10594 *n_elts = 1;
10595 return false;
10596 }
10597
10598 /* Return a nonzero value to say to return the function value in
10599 memory, just as large structures are always returned. TYPE will be
10600 the data type of the value, and FNTYPE will be the type of the
10601 function doing the returning, or @code{NULL} for libcalls.
10602
10603 The AIX ABI for the RS/6000 specifies that all structures are
10604 returned in memory. The Darwin ABI does the same.
10605
10606 For the Darwin 64 Bit ABI, a function result can be returned in
10607 registers or in memory, depending on the size of the return data
10608 type. If it is returned in registers, the value occupies the same
10609 registers as it would if it were the first and only function
10610 argument. Otherwise, the function places its result in memory at
10611 the location pointed to by GPR3.
10612
10613 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10614 but a draft put them in memory, and GCC used to implement the draft
10615 instead of the final standard. Therefore, aix_struct_return
10616 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10617 compatibility can change DRAFT_V4_STRUCT_RET to override the
10618 default, and -m switches get the final word. See
10619 rs6000_option_override_internal for more details.
10620
10621 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10622 long double support is enabled. These values are returned in memory.
10623
10624 int_size_in_bytes returns -1 for variable size objects, which go in
10625 memory always. The cast to unsigned makes -1 > 8. */
10626
10627 static bool
10628 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10629 {
10630 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10631 if (TARGET_MACHO
10632 && rs6000_darwin64_abi
10633 && TREE_CODE (type) == RECORD_TYPE
10634 && int_size_in_bytes (type) > 0)
10635 {
10636 CUMULATIVE_ARGS valcum;
10637 rtx valret;
10638
10639 valcum.words = 0;
10640 valcum.fregno = FP_ARG_MIN_REG;
10641 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10642 /* Do a trial code generation as if this were going to be passed
10643 as an argument; if any part goes in memory, we return NULL. */
10644 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10645 if (valret)
10646 return false;
10647 /* Otherwise fall through to more conventional ABI rules. */
10648 }
10649
10650 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10651 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10652 NULL, NULL))
10653 return false;
10654
10655 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10656 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10657 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10658 return false;
10659
10660 if (AGGREGATE_TYPE_P (type)
10661 && (aix_struct_return
10662 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10663 return true;
10664
10665 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10666 modes only exist for GCC vector types if -maltivec. */
10667 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10668 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10669 return false;
10670
10671 /* Return synthetic vectors in memory. */
10672 if (TREE_CODE (type) == VECTOR_TYPE
10673 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10674 {
10675 static bool warned_for_return_big_vectors = false;
10676 if (!warned_for_return_big_vectors)
10677 {
10678 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10679 "non-standard ABI extension with no compatibility "
10680 "guarantee");
10681 warned_for_return_big_vectors = true;
10682 }
10683 return true;
10684 }
10685
10686 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10687 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10688 return true;
10689
10690 return false;
10691 }
10692
10693 /* Specify whether values returned in registers should be at the most
10694 significant end of a register. We want aggregates returned by
10695 value to match the way aggregates are passed to functions. */
10696
10697 static bool
10698 rs6000_return_in_msb (const_tree valtype)
10699 {
10700 return (DEFAULT_ABI == ABI_ELFv2
10701 && BYTES_BIG_ENDIAN
10702 && AGGREGATE_TYPE_P (valtype)
10703 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10704 == PAD_UPWARD));
10705 }
10706
10707 #ifdef HAVE_AS_GNU_ATTRIBUTE
10708 /* Return TRUE if a call to function FNDECL may be one that
10709 potentially affects the function calling ABI of the object file. */
10710
10711 static bool
10712 call_ABI_of_interest (tree fndecl)
10713 {
10714 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10715 {
10716 struct cgraph_node *c_node;
10717
10718 /* Libcalls are always interesting. */
10719 if (fndecl == NULL_TREE)
10720 return true;
10721
10722 /* Any call to an external function is interesting. */
10723 if (DECL_EXTERNAL (fndecl))
10724 return true;
10725
10726 /* Interesting functions that we are emitting in this object file. */
10727 c_node = cgraph_node::get (fndecl);
10728 c_node = c_node->ultimate_alias_target ();
10729 return !c_node->only_called_directly_p ();
10730 }
10731 return false;
10732 }
10733 #endif
10734
10735 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10736 for a call to a function whose data type is FNTYPE.
10737 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10738
10739 For incoming args we set the number of arguments in the prototype large
10740 so we never return a PARALLEL. */
10741
10742 void
10743 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10744 rtx libname ATTRIBUTE_UNUSED, int incoming,
10745 int libcall, int n_named_args,
10746 tree fndecl,
10747 machine_mode return_mode ATTRIBUTE_UNUSED)
10748 {
10749 static CUMULATIVE_ARGS zero_cumulative;
10750
10751 *cum = zero_cumulative;
10752 cum->words = 0;
10753 cum->fregno = FP_ARG_MIN_REG;
10754 cum->vregno = ALTIVEC_ARG_MIN_REG;
10755 cum->prototype = (fntype && prototype_p (fntype));
10756 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10757 ? CALL_LIBCALL : CALL_NORMAL);
10758 cum->sysv_gregno = GP_ARG_MIN_REG;
10759 cum->stdarg = stdarg_p (fntype);
10760 cum->libcall = libcall;
10761
10762 cum->nargs_prototype = 0;
10763 if (incoming || cum->prototype)
10764 cum->nargs_prototype = n_named_args;
10765
10766 /* Check for a longcall attribute. */
10767 if ((!fntype && rs6000_default_long_calls)
10768 || (fntype
10769 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10770 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10771 cum->call_cookie |= CALL_LONG;
10772 else if (DEFAULT_ABI != ABI_DARWIN)
10773 {
10774 bool is_local = (fndecl
10775 && !DECL_EXTERNAL (fndecl)
10776 && !DECL_WEAK (fndecl)
10777 && (*targetm.binds_local_p) (fndecl));
10778 if (is_local)
10779 ;
10780 else if (flag_plt)
10781 {
10782 if (fntype
10783 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10784 cum->call_cookie |= CALL_LONG;
10785 }
10786 else
10787 {
10788 if (!(fntype
10789 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10790 cum->call_cookie |= CALL_LONG;
10791 }
10792 }
10793
10794 if (TARGET_DEBUG_ARG)
10795 {
10796 fprintf (stderr, "\ninit_cumulative_args:");
10797 if (fntype)
10798 {
10799 tree ret_type = TREE_TYPE (fntype);
10800 fprintf (stderr, " ret code = %s,",
10801 get_tree_code_name (TREE_CODE (ret_type)));
10802 }
10803
10804 if (cum->call_cookie & CALL_LONG)
10805 fprintf (stderr, " longcall,");
10806
10807 fprintf (stderr, " proto = %d, nargs = %d\n",
10808 cum->prototype, cum->nargs_prototype);
10809 }
10810
10811 #ifdef HAVE_AS_GNU_ATTRIBUTE
10812 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10813 {
10814 cum->escapes = call_ABI_of_interest (fndecl);
10815 if (cum->escapes)
10816 {
10817 tree return_type;
10818
10819 if (fntype)
10820 {
10821 return_type = TREE_TYPE (fntype);
10822 return_mode = TYPE_MODE (return_type);
10823 }
10824 else
10825 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10826
10827 if (return_type != NULL)
10828 {
10829 if (TREE_CODE (return_type) == RECORD_TYPE
10830 && TYPE_TRANSPARENT_AGGR (return_type))
10831 {
10832 return_type = TREE_TYPE (first_field (return_type));
10833 return_mode = TYPE_MODE (return_type);
10834 }
10835 if (AGGREGATE_TYPE_P (return_type)
10836 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10837 <= 8))
10838 rs6000_returns_struct = true;
10839 }
10840 if (SCALAR_FLOAT_MODE_P (return_mode))
10841 {
10842 rs6000_passes_float = true;
10843 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10844 && (FLOAT128_IBM_P (return_mode)
10845 || FLOAT128_IEEE_P (return_mode)
10846 || (return_type != NULL
10847 && (TYPE_MAIN_VARIANT (return_type)
10848 == long_double_type_node))))
10849 rs6000_passes_long_double = true;
10850
10851 /* Note if we passed or return a IEEE 128-bit type. We changed
10852 the mangling for these types, and we may need to make an alias
10853 with the old mangling. */
10854 if (FLOAT128_IEEE_P (return_mode))
10855 rs6000_passes_ieee128 = true;
10856 }
10857 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10858 rs6000_passes_vector = true;
10859 }
10860 }
10861 #endif
10862
10863 if (fntype
10864 && !TARGET_ALTIVEC
10865 && TARGET_ALTIVEC_ABI
10866 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10867 {
10868 error ("cannot return value in vector register because"
10869 " altivec instructions are disabled, use %qs"
10870 " to enable them", "-maltivec");
10871 }
10872 }
10873 \f
10874 /* The mode the ABI uses for a word. This is not the same as word_mode
10875 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10876
10877 static scalar_int_mode
10878 rs6000_abi_word_mode (void)
10879 {
10880 return TARGET_32BIT ? SImode : DImode;
10881 }
10882
10883 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10884 static char *
10885 rs6000_offload_options (void)
10886 {
10887 if (TARGET_64BIT)
10888 return xstrdup ("-foffload-abi=lp64");
10889 else
10890 return xstrdup ("-foffload-abi=ilp32");
10891 }
10892
10893 /* On rs6000, function arguments are promoted, as are function return
10894 values. */
10895
10896 static machine_mode
10897 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10898 machine_mode mode,
10899 int *punsignedp ATTRIBUTE_UNUSED,
10900 const_tree, int)
10901 {
10902 PROMOTE_MODE (mode, *punsignedp, type);
10903
10904 return mode;
10905 }
10906
10907 /* Return true if TYPE must be passed on the stack and not in registers. */
10908
10909 static bool
10910 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10911 {
10912 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10913 return must_pass_in_stack_var_size (mode, type);
10914 else
10915 return must_pass_in_stack_var_size_or_pad (mode, type);
10916 }
10917
10918 static inline bool
10919 is_complex_IBM_long_double (machine_mode mode)
10920 {
10921 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10922 }
10923
10924 /* Whether ABI_V4 passes MODE args to a function in floating point
10925 registers. */
10926
10927 static bool
10928 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10929 {
10930 if (!TARGET_HARD_FLOAT)
10931 return false;
10932 if (mode == DFmode)
10933 return true;
10934 if (mode == SFmode && named)
10935 return true;
10936 /* ABI_V4 passes complex IBM long double in 8 gprs.
10937 Stupid, but we can't change the ABI now. */
10938 if (is_complex_IBM_long_double (mode))
10939 return false;
10940 if (FLOAT128_2REG_P (mode))
10941 return true;
10942 if (DECIMAL_FLOAT_MODE_P (mode))
10943 return true;
10944 return false;
10945 }
10946
10947 /* Implement TARGET_FUNCTION_ARG_PADDING.
10948
10949 For the AIX ABI structs are always stored left shifted in their
10950 argument slot. */
10951
10952 static pad_direction
10953 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10954 {
10955 #ifndef AGGREGATE_PADDING_FIXED
10956 #define AGGREGATE_PADDING_FIXED 0
10957 #endif
10958 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10959 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10960 #endif
10961
10962 if (!AGGREGATE_PADDING_FIXED)
10963 {
10964 /* GCC used to pass structures of the same size as integer types as
10965 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10966 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10967 passed padded downward, except that -mstrict-align further
10968 muddied the water in that multi-component structures of 2 and 4
10969 bytes in size were passed padded upward.
10970
10971 The following arranges for best compatibility with previous
10972 versions of gcc, but removes the -mstrict-align dependency. */
10973 if (BYTES_BIG_ENDIAN)
10974 {
10975 HOST_WIDE_INT size = 0;
10976
10977 if (mode == BLKmode)
10978 {
10979 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10980 size = int_size_in_bytes (type);
10981 }
10982 else
10983 size = GET_MODE_SIZE (mode);
10984
10985 if (size == 1 || size == 2 || size == 4)
10986 return PAD_DOWNWARD;
10987 }
10988 return PAD_UPWARD;
10989 }
10990
10991 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10992 {
10993 if (type != 0 && AGGREGATE_TYPE_P (type))
10994 return PAD_UPWARD;
10995 }
10996
10997 /* Fall back to the default. */
10998 return default_function_arg_padding (mode, type);
10999 }
11000
11001 /* If defined, a C expression that gives the alignment boundary, in bits,
11002 of an argument with the specified mode and type. If it is not defined,
11003 PARM_BOUNDARY is used for all arguments.
11004
11005 V.4 wants long longs and doubles to be double word aligned. Just
11006 testing the mode size is a boneheaded way to do this as it means
11007 that other types such as complex int are also double word aligned.
11008 However, we're stuck with this because changing the ABI might break
11009 existing library interfaces.
11010
11011 Quadword align Altivec/VSX vectors.
11012 Quadword align large synthetic vector types. */
11013
11014 static unsigned int
11015 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11016 {
11017 machine_mode elt_mode;
11018 int n_elts;
11019
11020 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11021
11022 if (DEFAULT_ABI == ABI_V4
11023 && (GET_MODE_SIZE (mode) == 8
11024 || (TARGET_HARD_FLOAT
11025 && !is_complex_IBM_long_double (mode)
11026 && FLOAT128_2REG_P (mode))))
11027 return 64;
11028 else if (FLOAT128_VECTOR_P (mode))
11029 return 128;
11030 else if (type && TREE_CODE (type) == VECTOR_TYPE
11031 && int_size_in_bytes (type) >= 8
11032 && int_size_in_bytes (type) < 16)
11033 return 64;
11034 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11035 || (type && TREE_CODE (type) == VECTOR_TYPE
11036 && int_size_in_bytes (type) >= 16))
11037 return 128;
11038
11039 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11040 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11041 -mcompat-align-parm is used. */
11042 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11043 || DEFAULT_ABI == ABI_ELFv2)
11044 && type && TYPE_ALIGN (type) > 64)
11045 {
11046 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11047 or homogeneous float/vector aggregates here. We already handled
11048 vector aggregates above, but still need to check for float here. */
11049 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11050 && !SCALAR_FLOAT_MODE_P (elt_mode));
11051
11052 /* We used to check for BLKmode instead of the above aggregate type
11053 check. Warn when this results in any difference to the ABI. */
11054 if (aggregate_p != (mode == BLKmode))
11055 {
11056 static bool warned;
11057 if (!warned && warn_psabi)
11058 {
11059 warned = true;
11060 inform (input_location,
11061 "the ABI of passing aggregates with %d-byte alignment"
11062 " has changed in GCC 5",
11063 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11064 }
11065 }
11066
11067 if (aggregate_p)
11068 return 128;
11069 }
11070
11071 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11072 implement the "aggregate type" check as a BLKmode check here; this
11073 means certain aggregate types are in fact not aligned. */
11074 if (TARGET_MACHO && rs6000_darwin64_abi
11075 && mode == BLKmode
11076 && type && TYPE_ALIGN (type) > 64)
11077 return 128;
11078
11079 return PARM_BOUNDARY;
11080 }
11081
11082 /* The offset in words to the start of the parameter save area. */
11083
11084 static unsigned int
11085 rs6000_parm_offset (void)
11086 {
11087 return (DEFAULT_ABI == ABI_V4 ? 2
11088 : DEFAULT_ABI == ABI_ELFv2 ? 4
11089 : 6);
11090 }
11091
11092 /* For a function parm of MODE and TYPE, return the starting word in
11093 the parameter area. NWORDS of the parameter area are already used. */
11094
11095 static unsigned int
11096 rs6000_parm_start (machine_mode mode, const_tree type,
11097 unsigned int nwords)
11098 {
11099 unsigned int align;
11100
11101 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11102 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11103 }
11104
11105 /* Compute the size (in words) of a function argument. */
11106
11107 static unsigned long
11108 rs6000_arg_size (machine_mode mode, const_tree type)
11109 {
11110 unsigned long size;
11111
11112 if (mode != BLKmode)
11113 size = GET_MODE_SIZE (mode);
11114 else
11115 size = int_size_in_bytes (type);
11116
11117 if (TARGET_32BIT)
11118 return (size + 3) >> 2;
11119 else
11120 return (size + 7) >> 3;
11121 }
11122 \f
11123 /* Use this to flush pending int fields. */
11124
11125 static void
11126 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11127 HOST_WIDE_INT bitpos, int final)
11128 {
11129 unsigned int startbit, endbit;
11130 int intregs, intoffset;
11131
11132 /* Handle the situations where a float is taking up the first half
11133 of the GPR, and the other half is empty (typically due to
11134 alignment restrictions). We can detect this by a 8-byte-aligned
11135 int field, or by seeing that this is the final flush for this
11136 argument. Count the word and continue on. */
11137 if (cum->floats_in_gpr == 1
11138 && (cum->intoffset % 64 == 0
11139 || (cum->intoffset == -1 && final)))
11140 {
11141 cum->words++;
11142 cum->floats_in_gpr = 0;
11143 }
11144
11145 if (cum->intoffset == -1)
11146 return;
11147
11148 intoffset = cum->intoffset;
11149 cum->intoffset = -1;
11150 cum->floats_in_gpr = 0;
11151
11152 if (intoffset % BITS_PER_WORD != 0)
11153 {
11154 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11155 if (!int_mode_for_size (bits, 0).exists ())
11156 {
11157 /* We couldn't find an appropriate mode, which happens,
11158 e.g., in packed structs when there are 3 bytes to load.
11159 Back intoffset back to the beginning of the word in this
11160 case. */
11161 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11162 }
11163 }
11164
11165 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11166 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11167 intregs = (endbit - startbit) / BITS_PER_WORD;
11168 cum->words += intregs;
11169 /* words should be unsigned. */
11170 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11171 {
11172 int pad = (endbit/BITS_PER_WORD) - cum->words;
11173 cum->words += pad;
11174 }
11175 }
11176
11177 /* The darwin64 ABI calls for us to recurse down through structs,
11178 looking for elements passed in registers. Unfortunately, we have
11179 to track int register count here also because of misalignments
11180 in powerpc alignment mode. */
11181
11182 static void
11183 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11184 const_tree type,
11185 HOST_WIDE_INT startbitpos)
11186 {
11187 tree f;
11188
11189 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11190 if (TREE_CODE (f) == FIELD_DECL)
11191 {
11192 HOST_WIDE_INT bitpos = startbitpos;
11193 tree ftype = TREE_TYPE (f);
11194 machine_mode mode;
11195 if (ftype == error_mark_node)
11196 continue;
11197 mode = TYPE_MODE (ftype);
11198
11199 if (DECL_SIZE (f) != 0
11200 && tree_fits_uhwi_p (bit_position (f)))
11201 bitpos += int_bit_position (f);
11202
11203 /* ??? FIXME: else assume zero offset. */
11204
11205 if (TREE_CODE (ftype) == RECORD_TYPE)
11206 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11207 else if (USE_FP_FOR_ARG_P (cum, mode))
11208 {
11209 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11210 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11211 cum->fregno += n_fpregs;
11212 /* Single-precision floats present a special problem for
11213 us, because they are smaller than an 8-byte GPR, and so
11214 the structure-packing rules combined with the standard
11215 varargs behavior mean that we want to pack float/float
11216 and float/int combinations into a single register's
11217 space. This is complicated by the arg advance flushing,
11218 which works on arbitrarily large groups of int-type
11219 fields. */
11220 if (mode == SFmode)
11221 {
11222 if (cum->floats_in_gpr == 1)
11223 {
11224 /* Two floats in a word; count the word and reset
11225 the float count. */
11226 cum->words++;
11227 cum->floats_in_gpr = 0;
11228 }
11229 else if (bitpos % 64 == 0)
11230 {
11231 /* A float at the beginning of an 8-byte word;
11232 count it and put off adjusting cum->words until
11233 we see if a arg advance flush is going to do it
11234 for us. */
11235 cum->floats_in_gpr++;
11236 }
11237 else
11238 {
11239 /* The float is at the end of a word, preceded
11240 by integer fields, so the arg advance flush
11241 just above has already set cum->words and
11242 everything is taken care of. */
11243 }
11244 }
11245 else
11246 cum->words += n_fpregs;
11247 }
11248 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11249 {
11250 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11251 cum->vregno++;
11252 cum->words += 2;
11253 }
11254 else if (cum->intoffset == -1)
11255 cum->intoffset = bitpos;
11256 }
11257 }
11258
11259 /* Check for an item that needs to be considered specially under the darwin 64
11260 bit ABI. These are record types where the mode is BLK or the structure is
11261 8 bytes in size. */
11262 static int
11263 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11264 {
11265 return rs6000_darwin64_abi
11266 && ((mode == BLKmode
11267 && TREE_CODE (type) == RECORD_TYPE
11268 && int_size_in_bytes (type) > 0)
11269 || (type && TREE_CODE (type) == RECORD_TYPE
11270 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11271 }
11272
11273 /* Update the data in CUM to advance over an argument
11274 of mode MODE and data type TYPE.
11275 (TYPE is null for libcalls where that information may not be available.)
11276
11277 Note that for args passed by reference, function_arg will be called
11278 with MODE and TYPE set to that of the pointer to the arg, not the arg
11279 itself. */
11280
11281 static void
11282 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11283 const_tree type, bool named, int depth)
11284 {
11285 machine_mode elt_mode;
11286 int n_elts;
11287
11288 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11289
11290 /* Only tick off an argument if we're not recursing. */
11291 if (depth == 0)
11292 cum->nargs_prototype--;
11293
11294 #ifdef HAVE_AS_GNU_ATTRIBUTE
11295 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11296 && cum->escapes)
11297 {
11298 if (SCALAR_FLOAT_MODE_P (mode))
11299 {
11300 rs6000_passes_float = true;
11301 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11302 && (FLOAT128_IBM_P (mode)
11303 || FLOAT128_IEEE_P (mode)
11304 || (type != NULL
11305 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11306 rs6000_passes_long_double = true;
11307
11308 /* Note if we passed or return a IEEE 128-bit type. We changed the
11309 mangling for these types, and we may need to make an alias with
11310 the old mangling. */
11311 if (FLOAT128_IEEE_P (mode))
11312 rs6000_passes_ieee128 = true;
11313 }
11314 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11315 rs6000_passes_vector = true;
11316 }
11317 #endif
11318
11319 if (TARGET_ALTIVEC_ABI
11320 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11321 || (type && TREE_CODE (type) == VECTOR_TYPE
11322 && int_size_in_bytes (type) == 16)))
11323 {
11324 bool stack = false;
11325
11326 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11327 {
11328 cum->vregno += n_elts;
11329
11330 if (!TARGET_ALTIVEC)
11331 error ("cannot pass argument in vector register because"
11332 " altivec instructions are disabled, use %qs"
11333 " to enable them", "-maltivec");
11334
11335 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11336 even if it is going to be passed in a vector register.
11337 Darwin does the same for variable-argument functions. */
11338 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11339 && TARGET_64BIT)
11340 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11341 stack = true;
11342 }
11343 else
11344 stack = true;
11345
11346 if (stack)
11347 {
11348 int align;
11349
11350 /* Vector parameters must be 16-byte aligned. In 32-bit
11351 mode this means we need to take into account the offset
11352 to the parameter save area. In 64-bit mode, they just
11353 have to start on an even word, since the parameter save
11354 area is 16-byte aligned. */
11355 if (TARGET_32BIT)
11356 align = -(rs6000_parm_offset () + cum->words) & 3;
11357 else
11358 align = cum->words & 1;
11359 cum->words += align + rs6000_arg_size (mode, type);
11360
11361 if (TARGET_DEBUG_ARG)
11362 {
11363 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11364 cum->words, align);
11365 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11366 cum->nargs_prototype, cum->prototype,
11367 GET_MODE_NAME (mode));
11368 }
11369 }
11370 }
11371 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11372 {
11373 int size = int_size_in_bytes (type);
11374 /* Variable sized types have size == -1 and are
11375 treated as if consisting entirely of ints.
11376 Pad to 16 byte boundary if needed. */
11377 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11378 && (cum->words % 2) != 0)
11379 cum->words++;
11380 /* For varargs, we can just go up by the size of the struct. */
11381 if (!named)
11382 cum->words += (size + 7) / 8;
11383 else
11384 {
11385 /* It is tempting to say int register count just goes up by
11386 sizeof(type)/8, but this is wrong in a case such as
11387 { int; double; int; } [powerpc alignment]. We have to
11388 grovel through the fields for these too. */
11389 cum->intoffset = 0;
11390 cum->floats_in_gpr = 0;
11391 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11392 rs6000_darwin64_record_arg_advance_flush (cum,
11393 size * BITS_PER_UNIT, 1);
11394 }
11395 if (TARGET_DEBUG_ARG)
11396 {
11397 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11398 cum->words, TYPE_ALIGN (type), size);
11399 fprintf (stderr,
11400 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11401 cum->nargs_prototype, cum->prototype,
11402 GET_MODE_NAME (mode));
11403 }
11404 }
11405 else if (DEFAULT_ABI == ABI_V4)
11406 {
11407 if (abi_v4_pass_in_fpr (mode, named))
11408 {
11409 /* _Decimal128 must use an even/odd register pair. This assumes
11410 that the register number is odd when fregno is odd. */
11411 if (mode == TDmode && (cum->fregno % 2) == 1)
11412 cum->fregno++;
11413
11414 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11415 <= FP_ARG_V4_MAX_REG)
11416 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11417 else
11418 {
11419 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11420 if (mode == DFmode || FLOAT128_IBM_P (mode)
11421 || mode == DDmode || mode == TDmode)
11422 cum->words += cum->words & 1;
11423 cum->words += rs6000_arg_size (mode, type);
11424 }
11425 }
11426 else
11427 {
11428 int n_words = rs6000_arg_size (mode, type);
11429 int gregno = cum->sysv_gregno;
11430
11431 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11432 As does any other 2 word item such as complex int due to a
11433 historical mistake. */
11434 if (n_words == 2)
11435 gregno += (1 - gregno) & 1;
11436
11437 /* Multi-reg args are not split between registers and stack. */
11438 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11439 {
11440 /* Long long is aligned on the stack. So are other 2 word
11441 items such as complex int due to a historical mistake. */
11442 if (n_words == 2)
11443 cum->words += cum->words & 1;
11444 cum->words += n_words;
11445 }
11446
11447 /* Note: continuing to accumulate gregno past when we've started
11448 spilling to the stack indicates the fact that we've started
11449 spilling to the stack to expand_builtin_saveregs. */
11450 cum->sysv_gregno = gregno + n_words;
11451 }
11452
11453 if (TARGET_DEBUG_ARG)
11454 {
11455 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11456 cum->words, cum->fregno);
11457 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11458 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11459 fprintf (stderr, "mode = %4s, named = %d\n",
11460 GET_MODE_NAME (mode), named);
11461 }
11462 }
11463 else
11464 {
11465 int n_words = rs6000_arg_size (mode, type);
11466 int start_words = cum->words;
11467 int align_words = rs6000_parm_start (mode, type, start_words);
11468
11469 cum->words = align_words + n_words;
11470
11471 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11472 {
11473 /* _Decimal128 must be passed in an even/odd float register pair.
11474 This assumes that the register number is odd when fregno is
11475 odd. */
11476 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11477 cum->fregno++;
11478 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11479 }
11480
11481 if (TARGET_DEBUG_ARG)
11482 {
11483 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11484 cum->words, cum->fregno);
11485 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11486 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11487 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11488 named, align_words - start_words, depth);
11489 }
11490 }
11491 }
11492
11493 static void
11494 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11495 const_tree type, bool named)
11496 {
11497 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11498 0);
11499 }
11500
11501 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11502 structure between cum->intoffset and bitpos to integer registers. */
11503
11504 static void
11505 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11506 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11507 {
11508 machine_mode mode;
11509 unsigned int regno;
11510 unsigned int startbit, endbit;
11511 int this_regno, intregs, intoffset;
11512 rtx reg;
11513
11514 if (cum->intoffset == -1)
11515 return;
11516
11517 intoffset = cum->intoffset;
11518 cum->intoffset = -1;
11519
11520 /* If this is the trailing part of a word, try to only load that
11521 much into the register. Otherwise load the whole register. Note
11522 that in the latter case we may pick up unwanted bits. It's not a
11523 problem at the moment but may wish to revisit. */
11524
11525 if (intoffset % BITS_PER_WORD != 0)
11526 {
11527 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11528 if (!int_mode_for_size (bits, 0).exists (&mode))
11529 {
11530 /* We couldn't find an appropriate mode, which happens,
11531 e.g., in packed structs when there are 3 bytes to load.
11532 Back intoffset back to the beginning of the word in this
11533 case. */
11534 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11535 mode = word_mode;
11536 }
11537 }
11538 else
11539 mode = word_mode;
11540
11541 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11542 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11543 intregs = (endbit - startbit) / BITS_PER_WORD;
11544 this_regno = cum->words + intoffset / BITS_PER_WORD;
11545
11546 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11547 cum->use_stack = 1;
11548
11549 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11550 if (intregs <= 0)
11551 return;
11552
11553 intoffset /= BITS_PER_UNIT;
11554 do
11555 {
11556 regno = GP_ARG_MIN_REG + this_regno;
11557 reg = gen_rtx_REG (mode, regno);
11558 rvec[(*k)++] =
11559 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11560
11561 this_regno += 1;
11562 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11563 mode = word_mode;
11564 intregs -= 1;
11565 }
11566 while (intregs > 0);
11567 }
11568
11569 /* Recursive workhorse for the following. */
11570
11571 static void
11572 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11573 HOST_WIDE_INT startbitpos, rtx rvec[],
11574 int *k)
11575 {
11576 tree f;
11577
11578 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11579 if (TREE_CODE (f) == FIELD_DECL)
11580 {
11581 HOST_WIDE_INT bitpos = startbitpos;
11582 tree ftype = TREE_TYPE (f);
11583 machine_mode mode;
11584 if (ftype == error_mark_node)
11585 continue;
11586 mode = TYPE_MODE (ftype);
11587
11588 if (DECL_SIZE (f) != 0
11589 && tree_fits_uhwi_p (bit_position (f)))
11590 bitpos += int_bit_position (f);
11591
11592 /* ??? FIXME: else assume zero offset. */
11593
11594 if (TREE_CODE (ftype) == RECORD_TYPE)
11595 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11596 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11597 {
11598 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11599 #if 0
11600 switch (mode)
11601 {
11602 case E_SCmode: mode = SFmode; break;
11603 case E_DCmode: mode = DFmode; break;
11604 case E_TCmode: mode = TFmode; break;
11605 default: break;
11606 }
11607 #endif
11608 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11609 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11610 {
11611 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11612 && (mode == TFmode || mode == TDmode));
11613 /* Long double or _Decimal128 split over regs and memory. */
11614 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11615 cum->use_stack=1;
11616 }
11617 rvec[(*k)++]
11618 = gen_rtx_EXPR_LIST (VOIDmode,
11619 gen_rtx_REG (mode, cum->fregno++),
11620 GEN_INT (bitpos / BITS_PER_UNIT));
11621 if (FLOAT128_2REG_P (mode))
11622 cum->fregno++;
11623 }
11624 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11625 {
11626 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11627 rvec[(*k)++]
11628 = gen_rtx_EXPR_LIST (VOIDmode,
11629 gen_rtx_REG (mode, cum->vregno++),
11630 GEN_INT (bitpos / BITS_PER_UNIT));
11631 }
11632 else if (cum->intoffset == -1)
11633 cum->intoffset = bitpos;
11634 }
11635 }
11636
11637 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11638 the register(s) to be used for each field and subfield of a struct
11639 being passed by value, along with the offset of where the
11640 register's value may be found in the block. FP fields go in FP
11641 register, vector fields go in vector registers, and everything
11642 else goes in int registers, packed as in memory.
11643
11644 This code is also used for function return values. RETVAL indicates
11645 whether this is the case.
11646
11647 Much of this is taken from the SPARC V9 port, which has a similar
11648 calling convention. */
11649
11650 static rtx
11651 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11652 bool named, bool retval)
11653 {
11654 rtx rvec[FIRST_PSEUDO_REGISTER];
11655 int k = 1, kbase = 1;
11656 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11657 /* This is a copy; modifications are not visible to our caller. */
11658 CUMULATIVE_ARGS copy_cum = *orig_cum;
11659 CUMULATIVE_ARGS *cum = &copy_cum;
11660
11661 /* Pad to 16 byte boundary if needed. */
11662 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11663 && (cum->words % 2) != 0)
11664 cum->words++;
11665
11666 cum->intoffset = 0;
11667 cum->use_stack = 0;
11668 cum->named = named;
11669
11670 /* Put entries into rvec[] for individual FP and vector fields, and
11671 for the chunks of memory that go in int regs. Note we start at
11672 element 1; 0 is reserved for an indication of using memory, and
11673 may or may not be filled in below. */
11674 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11675 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11676
11677 /* If any part of the struct went on the stack put all of it there.
11678 This hack is because the generic code for
11679 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11680 parts of the struct are not at the beginning. */
11681 if (cum->use_stack)
11682 {
11683 if (retval)
11684 return NULL_RTX; /* doesn't go in registers at all */
11685 kbase = 0;
11686 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11687 }
11688 if (k > 1 || cum->use_stack)
11689 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11690 else
11691 return NULL_RTX;
11692 }
11693
11694 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11695
11696 static rtx
11697 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11698 int align_words)
11699 {
11700 int n_units;
11701 int i, k;
11702 rtx rvec[GP_ARG_NUM_REG + 1];
11703
11704 if (align_words >= GP_ARG_NUM_REG)
11705 return NULL_RTX;
11706
11707 n_units = rs6000_arg_size (mode, type);
11708
11709 /* Optimize the simple case where the arg fits in one gpr, except in
11710 the case of BLKmode due to assign_parms assuming that registers are
11711 BITS_PER_WORD wide. */
11712 if (n_units == 0
11713 || (n_units == 1 && mode != BLKmode))
11714 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11715
11716 k = 0;
11717 if (align_words + n_units > GP_ARG_NUM_REG)
11718 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11719 using a magic NULL_RTX component.
11720 This is not strictly correct. Only some of the arg belongs in
11721 memory, not all of it. However, the normal scheme using
11722 function_arg_partial_nregs can result in unusual subregs, eg.
11723 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11724 store the whole arg to memory is often more efficient than code
11725 to store pieces, and we know that space is available in the right
11726 place for the whole arg. */
11727 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11728
11729 i = 0;
11730 do
11731 {
11732 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11733 rtx off = GEN_INT (i++ * 4);
11734 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11735 }
11736 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11737
11738 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11739 }
11740
11741 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11742 but must also be copied into the parameter save area starting at
11743 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11744 to the GPRs and/or memory. Return the number of elements used. */
11745
11746 static int
11747 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11748 int align_words, rtx *rvec)
11749 {
11750 int k = 0;
11751
11752 if (align_words < GP_ARG_NUM_REG)
11753 {
11754 int n_words = rs6000_arg_size (mode, type);
11755
11756 if (align_words + n_words > GP_ARG_NUM_REG
11757 || mode == BLKmode
11758 || (TARGET_32BIT && TARGET_POWERPC64))
11759 {
11760 /* If this is partially on the stack, then we only
11761 include the portion actually in registers here. */
11762 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11763 int i = 0;
11764
11765 if (align_words + n_words > GP_ARG_NUM_REG)
11766 {
11767 /* Not all of the arg fits in gprs. Say that it goes in memory
11768 too, using a magic NULL_RTX component. Also see comment in
11769 rs6000_mixed_function_arg for why the normal
11770 function_arg_partial_nregs scheme doesn't work in this case. */
11771 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11772 }
11773
11774 do
11775 {
11776 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11777 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11778 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11779 }
11780 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11781 }
11782 else
11783 {
11784 /* The whole arg fits in gprs. */
11785 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11786 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11787 }
11788 }
11789 else
11790 {
11791 /* It's entirely in memory. */
11792 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11793 }
11794
11795 return k;
11796 }
11797
11798 /* RVEC is a vector of K components of an argument of mode MODE.
11799 Construct the final function_arg return value from it. */
11800
11801 static rtx
11802 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11803 {
11804 gcc_assert (k >= 1);
11805
11806 /* Avoid returning a PARALLEL in the trivial cases. */
11807 if (k == 1)
11808 {
11809 if (XEXP (rvec[0], 0) == NULL_RTX)
11810 return NULL_RTX;
11811
11812 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11813 return XEXP (rvec[0], 0);
11814 }
11815
11816 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11817 }
11818
11819 /* Determine where to put an argument to a function.
11820 Value is zero to push the argument on the stack,
11821 or a hard register in which to store the argument.
11822
11823 MODE is the argument's machine mode.
11824 TYPE is the data type of the argument (as a tree).
11825 This is null for libcalls where that information may
11826 not be available.
11827 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11828 the preceding args and about the function being called. It is
11829 not modified in this routine.
11830 NAMED is nonzero if this argument is a named parameter
11831 (otherwise it is an extra parameter matching an ellipsis).
11832
11833 On RS/6000 the first eight words of non-FP are normally in registers
11834 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11835 Under V.4, the first 8 FP args are in registers.
11836
11837 If this is floating-point and no prototype is specified, we use
11838 both an FP and integer register (or possibly FP reg and stack). Library
11839 functions (when CALL_LIBCALL is set) always have the proper types for args,
11840 so we can pass the FP value just in one register. emit_library_function
11841 doesn't support PARALLEL anyway.
11842
11843 Note that for args passed by reference, function_arg will be called
11844 with MODE and TYPE set to that of the pointer to the arg, not the arg
11845 itself. */
11846
11847 static rtx
11848 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11849 const_tree type, bool named)
11850 {
11851 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11852 enum rs6000_abi abi = DEFAULT_ABI;
11853 machine_mode elt_mode;
11854 int n_elts;
11855
11856 /* Return a marker to indicate whether CR1 needs to set or clear the
11857 bit that V.4 uses to say fp args were passed in registers.
11858 Assume that we don't need the marker for software floating point,
11859 or compiler generated library calls. */
11860 if (mode == VOIDmode)
11861 {
11862 if (abi == ABI_V4
11863 && (cum->call_cookie & CALL_LIBCALL) == 0
11864 && (cum->stdarg
11865 || (cum->nargs_prototype < 0
11866 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11867 && TARGET_HARD_FLOAT)
11868 return GEN_INT (cum->call_cookie
11869 | ((cum->fregno == FP_ARG_MIN_REG)
11870 ? CALL_V4_SET_FP_ARGS
11871 : CALL_V4_CLEAR_FP_ARGS));
11872
11873 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11874 }
11875
11876 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11877
11878 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11879 {
11880 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11881 if (rslt != NULL_RTX)
11882 return rslt;
11883 /* Else fall through to usual handling. */
11884 }
11885
11886 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11887 {
11888 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11889 rtx r, off;
11890 int i, k = 0;
11891
11892 /* Do we also need to pass this argument in the parameter save area?
11893 Library support functions for IEEE 128-bit are assumed to not need the
11894 value passed both in GPRs and in vector registers. */
11895 if (TARGET_64BIT && !cum->prototype
11896 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11897 {
11898 int align_words = ROUND_UP (cum->words, 2);
11899 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11900 }
11901
11902 /* Describe where this argument goes in the vector registers. */
11903 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11904 {
11905 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11906 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11907 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11908 }
11909
11910 return rs6000_finish_function_arg (mode, rvec, k);
11911 }
11912 else if (TARGET_ALTIVEC_ABI
11913 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11914 || (type && TREE_CODE (type) == VECTOR_TYPE
11915 && int_size_in_bytes (type) == 16)))
11916 {
11917 if (named || abi == ABI_V4)
11918 return NULL_RTX;
11919 else
11920 {
11921 /* Vector parameters to varargs functions under AIX or Darwin
11922 get passed in memory and possibly also in GPRs. */
11923 int align, align_words, n_words;
11924 machine_mode part_mode;
11925
11926 /* Vector parameters must be 16-byte aligned. In 32-bit
11927 mode this means we need to take into account the offset
11928 to the parameter save area. In 64-bit mode, they just
11929 have to start on an even word, since the parameter save
11930 area is 16-byte aligned. */
11931 if (TARGET_32BIT)
11932 align = -(rs6000_parm_offset () + cum->words) & 3;
11933 else
11934 align = cum->words & 1;
11935 align_words = cum->words + align;
11936
11937 /* Out of registers? Memory, then. */
11938 if (align_words >= GP_ARG_NUM_REG)
11939 return NULL_RTX;
11940
11941 if (TARGET_32BIT && TARGET_POWERPC64)
11942 return rs6000_mixed_function_arg (mode, type, align_words);
11943
11944 /* The vector value goes in GPRs. Only the part of the
11945 value in GPRs is reported here. */
11946 part_mode = mode;
11947 n_words = rs6000_arg_size (mode, type);
11948 if (align_words + n_words > GP_ARG_NUM_REG)
11949 /* Fortunately, there are only two possibilities, the value
11950 is either wholly in GPRs or half in GPRs and half not. */
11951 part_mode = DImode;
11952
11953 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11954 }
11955 }
11956
11957 else if (abi == ABI_V4)
11958 {
11959 if (abi_v4_pass_in_fpr (mode, named))
11960 {
11961 /* _Decimal128 must use an even/odd register pair. This assumes
11962 that the register number is odd when fregno is odd. */
11963 if (mode == TDmode && (cum->fregno % 2) == 1)
11964 cum->fregno++;
11965
11966 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11967 <= FP_ARG_V4_MAX_REG)
11968 return gen_rtx_REG (mode, cum->fregno);
11969 else
11970 return NULL_RTX;
11971 }
11972 else
11973 {
11974 int n_words = rs6000_arg_size (mode, type);
11975 int gregno = cum->sysv_gregno;
11976
11977 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11978 As does any other 2 word item such as complex int due to a
11979 historical mistake. */
11980 if (n_words == 2)
11981 gregno += (1 - gregno) & 1;
11982
11983 /* Multi-reg args are not split between registers and stack. */
11984 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11985 return NULL_RTX;
11986
11987 if (TARGET_32BIT && TARGET_POWERPC64)
11988 return rs6000_mixed_function_arg (mode, type,
11989 gregno - GP_ARG_MIN_REG);
11990 return gen_rtx_REG (mode, gregno);
11991 }
11992 }
11993 else
11994 {
11995 int align_words = rs6000_parm_start (mode, type, cum->words);
11996
11997 /* _Decimal128 must be passed in an even/odd float register pair.
11998 This assumes that the register number is odd when fregno is odd. */
11999 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12000 cum->fregno++;
12001
12002 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12003 && !(TARGET_AIX && !TARGET_ELF
12004 && type != NULL && AGGREGATE_TYPE_P (type)))
12005 {
12006 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12007 rtx r, off;
12008 int i, k = 0;
12009 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12010 int fpr_words;
12011
12012 /* Do we also need to pass this argument in the parameter
12013 save area? */
12014 if (type && (cum->nargs_prototype <= 0
12015 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12016 && TARGET_XL_COMPAT
12017 && align_words >= GP_ARG_NUM_REG)))
12018 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12019
12020 /* Describe where this argument goes in the fprs. */
12021 for (i = 0; i < n_elts
12022 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12023 {
12024 /* Check if the argument is split over registers and memory.
12025 This can only ever happen for long double or _Decimal128;
12026 complex types are handled via split_complex_arg. */
12027 machine_mode fmode = elt_mode;
12028 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12029 {
12030 gcc_assert (FLOAT128_2REG_P (fmode));
12031 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12032 }
12033
12034 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12035 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12036 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12037 }
12038
12039 /* If there were not enough FPRs to hold the argument, the rest
12040 usually goes into memory. However, if the current position
12041 is still within the register parameter area, a portion may
12042 actually have to go into GPRs.
12043
12044 Note that it may happen that the portion of the argument
12045 passed in the first "half" of the first GPR was already
12046 passed in the last FPR as well.
12047
12048 For unnamed arguments, we already set up GPRs to cover the
12049 whole argument in rs6000_psave_function_arg, so there is
12050 nothing further to do at this point. */
12051 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12052 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12053 && cum->nargs_prototype > 0)
12054 {
12055 static bool warned;
12056
12057 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12058 int n_words = rs6000_arg_size (mode, type);
12059
12060 align_words += fpr_words;
12061 n_words -= fpr_words;
12062
12063 do
12064 {
12065 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12066 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12067 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12068 }
12069 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12070
12071 if (!warned && warn_psabi)
12072 {
12073 warned = true;
12074 inform (input_location,
12075 "the ABI of passing homogeneous float aggregates"
12076 " has changed in GCC 5");
12077 }
12078 }
12079
12080 return rs6000_finish_function_arg (mode, rvec, k);
12081 }
12082 else if (align_words < GP_ARG_NUM_REG)
12083 {
12084 if (TARGET_32BIT && TARGET_POWERPC64)
12085 return rs6000_mixed_function_arg (mode, type, align_words);
12086
12087 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12088 }
12089 else
12090 return NULL_RTX;
12091 }
12092 }
12093 \f
12094 /* For an arg passed partly in registers and partly in memory, this is
12095 the number of bytes passed in registers. For args passed entirely in
12096 registers or entirely in memory, zero. When an arg is described by a
12097 PARALLEL, perhaps using more than one register type, this function
12098 returns the number of bytes used by the first element of the PARALLEL. */
12099
12100 static int
12101 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12102 tree type, bool named)
12103 {
12104 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12105 bool passed_in_gprs = true;
12106 int ret = 0;
12107 int align_words;
12108 machine_mode elt_mode;
12109 int n_elts;
12110
12111 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12112
12113 if (DEFAULT_ABI == ABI_V4)
12114 return 0;
12115
12116 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12117 {
12118 /* If we are passing this arg in the fixed parameter save area (gprs or
12119 memory) as well as VRs, we do not use the partial bytes mechanism;
12120 instead, rs6000_function_arg will return a PARALLEL including a memory
12121 element as necessary. Library support functions for IEEE 128-bit are
12122 assumed to not need the value passed both in GPRs and in vector
12123 registers. */
12124 if (TARGET_64BIT && !cum->prototype
12125 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12126 return 0;
12127
12128 /* Otherwise, we pass in VRs only. Check for partial copies. */
12129 passed_in_gprs = false;
12130 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12131 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12132 }
12133
12134 /* In this complicated case we just disable the partial_nregs code. */
12135 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12136 return 0;
12137
12138 align_words = rs6000_parm_start (mode, type, cum->words);
12139
12140 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12141 && !(TARGET_AIX && !TARGET_ELF
12142 && type != NULL && AGGREGATE_TYPE_P (type)))
12143 {
12144 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12145
12146 /* If we are passing this arg in the fixed parameter save area
12147 (gprs or memory) as well as FPRs, we do not use the partial
12148 bytes mechanism; instead, rs6000_function_arg will return a
12149 PARALLEL including a memory element as necessary. */
12150 if (type
12151 && (cum->nargs_prototype <= 0
12152 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12153 && TARGET_XL_COMPAT
12154 && align_words >= GP_ARG_NUM_REG)))
12155 return 0;
12156
12157 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12158 passed_in_gprs = false;
12159 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12160 {
12161 /* Compute number of bytes / words passed in FPRs. If there
12162 is still space available in the register parameter area
12163 *after* that amount, a part of the argument will be passed
12164 in GPRs. In that case, the total amount passed in any
12165 registers is equal to the amount that would have been passed
12166 in GPRs if everything were passed there, so we fall back to
12167 the GPR code below to compute the appropriate value. */
12168 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12169 * MIN (8, GET_MODE_SIZE (elt_mode)));
12170 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12171
12172 if (align_words + fpr_words < GP_ARG_NUM_REG)
12173 passed_in_gprs = true;
12174 else
12175 ret = fpr;
12176 }
12177 }
12178
12179 if (passed_in_gprs
12180 && align_words < GP_ARG_NUM_REG
12181 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12182 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12183
12184 if (ret != 0 && TARGET_DEBUG_ARG)
12185 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12186
12187 return ret;
12188 }
12189 \f
12190 /* A C expression that indicates when an argument must be passed by
12191 reference. If nonzero for an argument, a copy of that argument is
12192 made in memory and a pointer to the argument is passed instead of
12193 the argument itself. The pointer is passed in whatever way is
12194 appropriate for passing a pointer to that type.
12195
12196 Under V.4, aggregates and long double are passed by reference.
12197
12198 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12199 reference unless the AltiVec vector extension ABI is in force.
12200
12201 As an extension to all ABIs, variable sized types are passed by
12202 reference. */
12203
12204 static bool
12205 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12206 machine_mode mode, const_tree type,
12207 bool named ATTRIBUTE_UNUSED)
12208 {
12209 if (!type)
12210 return 0;
12211
12212 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12213 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12214 {
12215 if (TARGET_DEBUG_ARG)
12216 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12217 return 1;
12218 }
12219
12220 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12221 {
12222 if (TARGET_DEBUG_ARG)
12223 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12224 return 1;
12225 }
12226
12227 if (int_size_in_bytes (type) < 0)
12228 {
12229 if (TARGET_DEBUG_ARG)
12230 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12231 return 1;
12232 }
12233
12234 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12235 modes only exist for GCC vector types if -maltivec. */
12236 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12237 {
12238 if (TARGET_DEBUG_ARG)
12239 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12240 return 1;
12241 }
12242
12243 /* Pass synthetic vectors in memory. */
12244 if (TREE_CODE (type) == VECTOR_TYPE
12245 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12246 {
12247 static bool warned_for_pass_big_vectors = false;
12248 if (TARGET_DEBUG_ARG)
12249 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12250 if (!warned_for_pass_big_vectors)
12251 {
12252 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12253 "non-standard ABI extension with no compatibility "
12254 "guarantee");
12255 warned_for_pass_big_vectors = true;
12256 }
12257 return 1;
12258 }
12259
12260 return 0;
12261 }
12262
12263 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12264 already processes. Return true if the parameter must be passed
12265 (fully or partially) on the stack. */
12266
12267 static bool
12268 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12269 {
12270 machine_mode mode;
12271 int unsignedp;
12272 rtx entry_parm;
12273
12274 /* Catch errors. */
12275 if (type == NULL || type == error_mark_node)
12276 return true;
12277
12278 /* Handle types with no storage requirement. */
12279 if (TYPE_MODE (type) == VOIDmode)
12280 return false;
12281
12282 /* Handle complex types. */
12283 if (TREE_CODE (type) == COMPLEX_TYPE)
12284 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12285 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12286
12287 /* Handle transparent aggregates. */
12288 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12289 && TYPE_TRANSPARENT_AGGR (type))
12290 type = TREE_TYPE (first_field (type));
12291
12292 /* See if this arg was passed by invisible reference. */
12293 if (pass_by_reference (get_cumulative_args (args_so_far),
12294 TYPE_MODE (type), type, true))
12295 type = build_pointer_type (type);
12296
12297 /* Find mode as it is passed by the ABI. */
12298 unsignedp = TYPE_UNSIGNED (type);
12299 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12300
12301 /* If we must pass in stack, we need a stack. */
12302 if (rs6000_must_pass_in_stack (mode, type))
12303 return true;
12304
12305 /* If there is no incoming register, we need a stack. */
12306 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12307 if (entry_parm == NULL)
12308 return true;
12309
12310 /* Likewise if we need to pass both in registers and on the stack. */
12311 if (GET_CODE (entry_parm) == PARALLEL
12312 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12313 return true;
12314
12315 /* Also true if we're partially in registers and partially not. */
12316 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12317 return true;
12318
12319 /* Update info on where next arg arrives in registers. */
12320 rs6000_function_arg_advance (args_so_far, mode, type, true);
12321 return false;
12322 }
12323
12324 /* Return true if FUN has no prototype, has a variable argument
12325 list, or passes any parameter in memory. */
12326
12327 static bool
12328 rs6000_function_parms_need_stack (tree fun, bool incoming)
12329 {
12330 tree fntype, result;
12331 CUMULATIVE_ARGS args_so_far_v;
12332 cumulative_args_t args_so_far;
12333
12334 if (!fun)
12335 /* Must be a libcall, all of which only use reg parms. */
12336 return false;
12337
12338 fntype = fun;
12339 if (!TYPE_P (fun))
12340 fntype = TREE_TYPE (fun);
12341
12342 /* Varargs functions need the parameter save area. */
12343 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12344 return true;
12345
12346 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12347 args_so_far = pack_cumulative_args (&args_so_far_v);
12348
12349 /* When incoming, we will have been passed the function decl.
12350 It is necessary to use the decl to handle K&R style functions,
12351 where TYPE_ARG_TYPES may not be available. */
12352 if (incoming)
12353 {
12354 gcc_assert (DECL_P (fun));
12355 result = DECL_RESULT (fun);
12356 }
12357 else
12358 result = TREE_TYPE (fntype);
12359
12360 if (result && aggregate_value_p (result, fntype))
12361 {
12362 if (!TYPE_P (result))
12363 result = TREE_TYPE (result);
12364 result = build_pointer_type (result);
12365 rs6000_parm_needs_stack (args_so_far, result);
12366 }
12367
12368 if (incoming)
12369 {
12370 tree parm;
12371
12372 for (parm = DECL_ARGUMENTS (fun);
12373 parm && parm != void_list_node;
12374 parm = TREE_CHAIN (parm))
12375 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12376 return true;
12377 }
12378 else
12379 {
12380 function_args_iterator args_iter;
12381 tree arg_type;
12382
12383 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12384 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12385 return true;
12386 }
12387
12388 return false;
12389 }
12390
12391 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12392 usually a constant depending on the ABI. However, in the ELFv2 ABI
12393 the register parameter area is optional when calling a function that
12394 has a prototype is scope, has no variable argument list, and passes
12395 all parameters in registers. */
12396
12397 int
12398 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12399 {
12400 int reg_parm_stack_space;
12401
12402 switch (DEFAULT_ABI)
12403 {
12404 default:
12405 reg_parm_stack_space = 0;
12406 break;
12407
12408 case ABI_AIX:
12409 case ABI_DARWIN:
12410 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12411 break;
12412
12413 case ABI_ELFv2:
12414 /* ??? Recomputing this every time is a bit expensive. Is there
12415 a place to cache this information? */
12416 if (rs6000_function_parms_need_stack (fun, incoming))
12417 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12418 else
12419 reg_parm_stack_space = 0;
12420 break;
12421 }
12422
12423 return reg_parm_stack_space;
12424 }
12425
12426 static void
12427 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12428 {
12429 int i;
12430 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12431
12432 if (nregs == 0)
12433 return;
12434
12435 for (i = 0; i < nregs; i++)
12436 {
12437 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12438 if (reload_completed)
12439 {
12440 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12441 tem = NULL_RTX;
12442 else
12443 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12444 i * GET_MODE_SIZE (reg_mode));
12445 }
12446 else
12447 tem = replace_equiv_address (tem, XEXP (tem, 0));
12448
12449 gcc_assert (tem);
12450
12451 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12452 }
12453 }
12454 \f
12455 /* Perform any needed actions needed for a function that is receiving a
12456 variable number of arguments.
12457
12458 CUM is as above.
12459
12460 MODE and TYPE are the mode and type of the current parameter.
12461
12462 PRETEND_SIZE is a variable that should be set to the amount of stack
12463 that must be pushed by the prolog to pretend that our caller pushed
12464 it.
12465
12466 Normally, this macro will push all remaining incoming registers on the
12467 stack and set PRETEND_SIZE to the length of the registers pushed. */
12468
12469 static void
12470 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12471 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12472 int no_rtl)
12473 {
12474 CUMULATIVE_ARGS next_cum;
12475 int reg_size = TARGET_32BIT ? 4 : 8;
12476 rtx save_area = NULL_RTX, mem;
12477 int first_reg_offset;
12478 alias_set_type set;
12479
12480 /* Skip the last named argument. */
12481 next_cum = *get_cumulative_args (cum);
12482 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12483
12484 if (DEFAULT_ABI == ABI_V4)
12485 {
12486 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12487
12488 if (! no_rtl)
12489 {
12490 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12491 HOST_WIDE_INT offset = 0;
12492
12493 /* Try to optimize the size of the varargs save area.
12494 The ABI requires that ap.reg_save_area is doubleword
12495 aligned, but we don't need to allocate space for all
12496 the bytes, only those to which we actually will save
12497 anything. */
12498 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12499 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12500 if (TARGET_HARD_FLOAT
12501 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12502 && cfun->va_list_fpr_size)
12503 {
12504 if (gpr_reg_num)
12505 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12506 * UNITS_PER_FP_WORD;
12507 if (cfun->va_list_fpr_size
12508 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12509 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12510 else
12511 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12512 * UNITS_PER_FP_WORD;
12513 }
12514 if (gpr_reg_num)
12515 {
12516 offset = -((first_reg_offset * reg_size) & ~7);
12517 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12518 {
12519 gpr_reg_num = cfun->va_list_gpr_size;
12520 if (reg_size == 4 && (first_reg_offset & 1))
12521 gpr_reg_num++;
12522 }
12523 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12524 }
12525 else if (fpr_size)
12526 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12527 * UNITS_PER_FP_WORD
12528 - (int) (GP_ARG_NUM_REG * reg_size);
12529
12530 if (gpr_size + fpr_size)
12531 {
12532 rtx reg_save_area
12533 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12534 gcc_assert (GET_CODE (reg_save_area) == MEM);
12535 reg_save_area = XEXP (reg_save_area, 0);
12536 if (GET_CODE (reg_save_area) == PLUS)
12537 {
12538 gcc_assert (XEXP (reg_save_area, 0)
12539 == virtual_stack_vars_rtx);
12540 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12541 offset += INTVAL (XEXP (reg_save_area, 1));
12542 }
12543 else
12544 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12545 }
12546
12547 cfun->machine->varargs_save_offset = offset;
12548 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12549 }
12550 }
12551 else
12552 {
12553 first_reg_offset = next_cum.words;
12554 save_area = crtl->args.internal_arg_pointer;
12555
12556 if (targetm.calls.must_pass_in_stack (mode, type))
12557 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12558 }
12559
12560 set = get_varargs_alias_set ();
12561 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12562 && cfun->va_list_gpr_size)
12563 {
12564 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12565
12566 if (va_list_gpr_counter_field)
12567 /* V4 va_list_gpr_size counts number of registers needed. */
12568 n_gpr = cfun->va_list_gpr_size;
12569 else
12570 /* char * va_list instead counts number of bytes needed. */
12571 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12572
12573 if (nregs > n_gpr)
12574 nregs = n_gpr;
12575
12576 mem = gen_rtx_MEM (BLKmode,
12577 plus_constant (Pmode, save_area,
12578 first_reg_offset * reg_size));
12579 MEM_NOTRAP_P (mem) = 1;
12580 set_mem_alias_set (mem, set);
12581 set_mem_align (mem, BITS_PER_WORD);
12582
12583 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12584 nregs);
12585 }
12586
12587 /* Save FP registers if needed. */
12588 if (DEFAULT_ABI == ABI_V4
12589 && TARGET_HARD_FLOAT
12590 && ! no_rtl
12591 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12592 && cfun->va_list_fpr_size)
12593 {
12594 int fregno = next_cum.fregno, nregs;
12595 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12596 rtx lab = gen_label_rtx ();
12597 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12598 * UNITS_PER_FP_WORD);
12599
12600 emit_jump_insn
12601 (gen_rtx_SET (pc_rtx,
12602 gen_rtx_IF_THEN_ELSE (VOIDmode,
12603 gen_rtx_NE (VOIDmode, cr1,
12604 const0_rtx),
12605 gen_rtx_LABEL_REF (VOIDmode, lab),
12606 pc_rtx)));
12607
12608 for (nregs = 0;
12609 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12610 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12611 {
12612 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12613 plus_constant (Pmode, save_area, off));
12614 MEM_NOTRAP_P (mem) = 1;
12615 set_mem_alias_set (mem, set);
12616 set_mem_align (mem, GET_MODE_ALIGNMENT (
12617 TARGET_HARD_FLOAT ? DFmode : SFmode));
12618 emit_move_insn (mem, gen_rtx_REG (
12619 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12620 }
12621
12622 emit_label (lab);
12623 }
12624 }
12625
12626 /* Create the va_list data type. */
12627
12628 static tree
12629 rs6000_build_builtin_va_list (void)
12630 {
12631 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12632
12633 /* For AIX, prefer 'char *' because that's what the system
12634 header files like. */
12635 if (DEFAULT_ABI != ABI_V4)
12636 return build_pointer_type (char_type_node);
12637
12638 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12639 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12640 get_identifier ("__va_list_tag"), record);
12641
12642 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12643 unsigned_char_type_node);
12644 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12645 unsigned_char_type_node);
12646 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12647 every user file. */
12648 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12649 get_identifier ("reserved"), short_unsigned_type_node);
12650 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12651 get_identifier ("overflow_arg_area"),
12652 ptr_type_node);
12653 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12654 get_identifier ("reg_save_area"),
12655 ptr_type_node);
12656
12657 va_list_gpr_counter_field = f_gpr;
12658 va_list_fpr_counter_field = f_fpr;
12659
12660 DECL_FIELD_CONTEXT (f_gpr) = record;
12661 DECL_FIELD_CONTEXT (f_fpr) = record;
12662 DECL_FIELD_CONTEXT (f_res) = record;
12663 DECL_FIELD_CONTEXT (f_ovf) = record;
12664 DECL_FIELD_CONTEXT (f_sav) = record;
12665
12666 TYPE_STUB_DECL (record) = type_decl;
12667 TYPE_NAME (record) = type_decl;
12668 TYPE_FIELDS (record) = f_gpr;
12669 DECL_CHAIN (f_gpr) = f_fpr;
12670 DECL_CHAIN (f_fpr) = f_res;
12671 DECL_CHAIN (f_res) = f_ovf;
12672 DECL_CHAIN (f_ovf) = f_sav;
12673
12674 layout_type (record);
12675
12676 /* The correct type is an array type of one element. */
12677 return build_array_type (record, build_index_type (size_zero_node));
12678 }
12679
12680 /* Implement va_start. */
12681
12682 static void
12683 rs6000_va_start (tree valist, rtx nextarg)
12684 {
12685 HOST_WIDE_INT words, n_gpr, n_fpr;
12686 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12687 tree gpr, fpr, ovf, sav, t;
12688
12689 /* Only SVR4 needs something special. */
12690 if (DEFAULT_ABI != ABI_V4)
12691 {
12692 std_expand_builtin_va_start (valist, nextarg);
12693 return;
12694 }
12695
12696 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12697 f_fpr = DECL_CHAIN (f_gpr);
12698 f_res = DECL_CHAIN (f_fpr);
12699 f_ovf = DECL_CHAIN (f_res);
12700 f_sav = DECL_CHAIN (f_ovf);
12701
12702 valist = build_simple_mem_ref (valist);
12703 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12704 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12705 f_fpr, NULL_TREE);
12706 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12707 f_ovf, NULL_TREE);
12708 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12709 f_sav, NULL_TREE);
12710
12711 /* Count number of gp and fp argument registers used. */
12712 words = crtl->args.info.words;
12713 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12714 GP_ARG_NUM_REG);
12715 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12716 FP_ARG_NUM_REG);
12717
12718 if (TARGET_DEBUG_ARG)
12719 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12720 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12721 words, n_gpr, n_fpr);
12722
12723 if (cfun->va_list_gpr_size)
12724 {
12725 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12726 build_int_cst (NULL_TREE, n_gpr));
12727 TREE_SIDE_EFFECTS (t) = 1;
12728 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12729 }
12730
12731 if (cfun->va_list_fpr_size)
12732 {
12733 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12734 build_int_cst (NULL_TREE, n_fpr));
12735 TREE_SIDE_EFFECTS (t) = 1;
12736 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12737
12738 #ifdef HAVE_AS_GNU_ATTRIBUTE
12739 if (call_ABI_of_interest (cfun->decl))
12740 rs6000_passes_float = true;
12741 #endif
12742 }
12743
12744 /* Find the overflow area. */
12745 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12746 if (words != 0)
12747 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12748 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12749 TREE_SIDE_EFFECTS (t) = 1;
12750 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12751
12752 /* If there were no va_arg invocations, don't set up the register
12753 save area. */
12754 if (!cfun->va_list_gpr_size
12755 && !cfun->va_list_fpr_size
12756 && n_gpr < GP_ARG_NUM_REG
12757 && n_fpr < FP_ARG_V4_MAX_REG)
12758 return;
12759
12760 /* Find the register save area. */
12761 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12762 if (cfun->machine->varargs_save_offset)
12763 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12764 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12765 TREE_SIDE_EFFECTS (t) = 1;
12766 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12767 }
12768
12769 /* Implement va_arg. */
12770
12771 static tree
12772 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12773 gimple_seq *post_p)
12774 {
12775 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12776 tree gpr, fpr, ovf, sav, reg, t, u;
12777 int size, rsize, n_reg, sav_ofs, sav_scale;
12778 tree lab_false, lab_over, addr;
12779 int align;
12780 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12781 int regalign = 0;
12782 gimple *stmt;
12783
12784 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12785 {
12786 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12787 return build_va_arg_indirect_ref (t);
12788 }
12789
12790 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12791 earlier version of gcc, with the property that it always applied alignment
12792 adjustments to the va-args (even for zero-sized types). The cheapest way
12793 to deal with this is to replicate the effect of the part of
12794 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12795 of relevance.
12796 We don't need to check for pass-by-reference because of the test above.
12797 We can return a simplifed answer, since we know there's no offset to add. */
12798
12799 if (((TARGET_MACHO
12800 && rs6000_darwin64_abi)
12801 || DEFAULT_ABI == ABI_ELFv2
12802 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12803 && integer_zerop (TYPE_SIZE (type)))
12804 {
12805 unsigned HOST_WIDE_INT align, boundary;
12806 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12807 align = PARM_BOUNDARY / BITS_PER_UNIT;
12808 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12809 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12810 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12811 boundary /= BITS_PER_UNIT;
12812 if (boundary > align)
12813 {
12814 tree t ;
12815 /* This updates arg ptr by the amount that would be necessary
12816 to align the zero-sized (but not zero-alignment) item. */
12817 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12818 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12819 gimplify_and_add (t, pre_p);
12820
12821 t = fold_convert (sizetype, valist_tmp);
12822 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12823 fold_convert (TREE_TYPE (valist),
12824 fold_build2 (BIT_AND_EXPR, sizetype, t,
12825 size_int (-boundary))));
12826 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12827 gimplify_and_add (t, pre_p);
12828 }
12829 /* Since it is zero-sized there's no increment for the item itself. */
12830 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12831 return build_va_arg_indirect_ref (valist_tmp);
12832 }
12833
12834 if (DEFAULT_ABI != ABI_V4)
12835 {
12836 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12837 {
12838 tree elem_type = TREE_TYPE (type);
12839 machine_mode elem_mode = TYPE_MODE (elem_type);
12840 int elem_size = GET_MODE_SIZE (elem_mode);
12841
12842 if (elem_size < UNITS_PER_WORD)
12843 {
12844 tree real_part, imag_part;
12845 gimple_seq post = NULL;
12846
12847 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12848 &post);
12849 /* Copy the value into a temporary, lest the formal temporary
12850 be reused out from under us. */
12851 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12852 gimple_seq_add_seq (pre_p, post);
12853
12854 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12855 post_p);
12856
12857 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12858 }
12859 }
12860
12861 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12862 }
12863
12864 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12865 f_fpr = DECL_CHAIN (f_gpr);
12866 f_res = DECL_CHAIN (f_fpr);
12867 f_ovf = DECL_CHAIN (f_res);
12868 f_sav = DECL_CHAIN (f_ovf);
12869
12870 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12871 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12872 f_fpr, NULL_TREE);
12873 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12874 f_ovf, NULL_TREE);
12875 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12876 f_sav, NULL_TREE);
12877
12878 size = int_size_in_bytes (type);
12879 rsize = (size + 3) / 4;
12880 int pad = 4 * rsize - size;
12881 align = 1;
12882
12883 machine_mode mode = TYPE_MODE (type);
12884 if (abi_v4_pass_in_fpr (mode, false))
12885 {
12886 /* FP args go in FP registers, if present. */
12887 reg = fpr;
12888 n_reg = (size + 7) / 8;
12889 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12890 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12891 if (mode != SFmode && mode != SDmode)
12892 align = 8;
12893 }
12894 else
12895 {
12896 /* Otherwise into GP registers. */
12897 reg = gpr;
12898 n_reg = rsize;
12899 sav_ofs = 0;
12900 sav_scale = 4;
12901 if (n_reg == 2)
12902 align = 8;
12903 }
12904
12905 /* Pull the value out of the saved registers.... */
12906
12907 lab_over = NULL;
12908 addr = create_tmp_var (ptr_type_node, "addr");
12909
12910 /* AltiVec vectors never go in registers when -mabi=altivec. */
12911 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12912 align = 16;
12913 else
12914 {
12915 lab_false = create_artificial_label (input_location);
12916 lab_over = create_artificial_label (input_location);
12917
12918 /* Long long is aligned in the registers. As are any other 2 gpr
12919 item such as complex int due to a historical mistake. */
12920 u = reg;
12921 if (n_reg == 2 && reg == gpr)
12922 {
12923 regalign = 1;
12924 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12925 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12926 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12927 unshare_expr (reg), u);
12928 }
12929 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12930 reg number is 0 for f1, so we want to make it odd. */
12931 else if (reg == fpr && mode == TDmode)
12932 {
12933 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12934 build_int_cst (TREE_TYPE (reg), 1));
12935 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12936 }
12937
12938 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12939 t = build2 (GE_EXPR, boolean_type_node, u, t);
12940 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12941 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12942 gimplify_and_add (t, pre_p);
12943
12944 t = sav;
12945 if (sav_ofs)
12946 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12947
12948 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12949 build_int_cst (TREE_TYPE (reg), n_reg));
12950 u = fold_convert (sizetype, u);
12951 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12952 t = fold_build_pointer_plus (t, u);
12953
12954 /* _Decimal32 varargs are located in the second word of the 64-bit
12955 FP register for 32-bit binaries. */
12956 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12957 t = fold_build_pointer_plus_hwi (t, size);
12958
12959 /* Args are passed right-aligned. */
12960 if (BYTES_BIG_ENDIAN)
12961 t = fold_build_pointer_plus_hwi (t, pad);
12962
12963 gimplify_assign (addr, t, pre_p);
12964
12965 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12966
12967 stmt = gimple_build_label (lab_false);
12968 gimple_seq_add_stmt (pre_p, stmt);
12969
12970 if ((n_reg == 2 && !regalign) || n_reg > 2)
12971 {
12972 /* Ensure that we don't find any more args in regs.
12973 Alignment has taken care of for special cases. */
12974 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12975 }
12976 }
12977
12978 /* ... otherwise out of the overflow area. */
12979
12980 /* Care for on-stack alignment if needed. */
12981 t = ovf;
12982 if (align != 1)
12983 {
12984 t = fold_build_pointer_plus_hwi (t, align - 1);
12985 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12986 build_int_cst (TREE_TYPE (t), -align));
12987 }
12988
12989 /* Args are passed right-aligned. */
12990 if (BYTES_BIG_ENDIAN)
12991 t = fold_build_pointer_plus_hwi (t, pad);
12992
12993 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12994
12995 gimplify_assign (unshare_expr (addr), t, pre_p);
12996
12997 t = fold_build_pointer_plus_hwi (t, size);
12998 gimplify_assign (unshare_expr (ovf), t, pre_p);
12999
13000 if (lab_over)
13001 {
13002 stmt = gimple_build_label (lab_over);
13003 gimple_seq_add_stmt (pre_p, stmt);
13004 }
13005
13006 if (STRICT_ALIGNMENT
13007 && (TYPE_ALIGN (type)
13008 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13009 {
13010 /* The value (of type complex double, for example) may not be
13011 aligned in memory in the saved registers, so copy via a
13012 temporary. (This is the same code as used for SPARC.) */
13013 tree tmp = create_tmp_var (type, "va_arg_tmp");
13014 tree dest_addr = build_fold_addr_expr (tmp);
13015
13016 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13017 3, dest_addr, addr, size_int (rsize * 4));
13018 TREE_ADDRESSABLE (tmp) = 1;
13019
13020 gimplify_and_add (copy, pre_p);
13021 addr = dest_addr;
13022 }
13023
13024 addr = fold_convert (ptrtype, addr);
13025 return build_va_arg_indirect_ref (addr);
13026 }
13027
13028 /* Builtins. */
13029
13030 static void
13031 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13032 {
13033 tree t;
13034 unsigned classify = rs6000_builtin_info[(int)code].attr;
13035 const char *attr_string = "";
13036
13037 gcc_assert (name != NULL);
13038 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13039
13040 if (rs6000_builtin_decls[(int)code])
13041 fatal_error (input_location,
13042 "internal error: builtin function %qs already processed",
13043 name);
13044
13045 rs6000_builtin_decls[(int)code] = t =
13046 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13047
13048 /* Set any special attributes. */
13049 if ((classify & RS6000_BTC_CONST) != 0)
13050 {
13051 /* const function, function only depends on the inputs. */
13052 TREE_READONLY (t) = 1;
13053 TREE_NOTHROW (t) = 1;
13054 attr_string = ", const";
13055 }
13056 else if ((classify & RS6000_BTC_PURE) != 0)
13057 {
13058 /* pure function, function can read global memory, but does not set any
13059 external state. */
13060 DECL_PURE_P (t) = 1;
13061 TREE_NOTHROW (t) = 1;
13062 attr_string = ", pure";
13063 }
13064 else if ((classify & RS6000_BTC_FP) != 0)
13065 {
13066 /* Function is a math function. If rounding mode is on, then treat the
13067 function as not reading global memory, but it can have arbitrary side
13068 effects. If it is off, then assume the function is a const function.
13069 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13070 builtin-attribute.def that is used for the math functions. */
13071 TREE_NOTHROW (t) = 1;
13072 if (flag_rounding_math)
13073 {
13074 DECL_PURE_P (t) = 1;
13075 DECL_IS_NOVOPS (t) = 1;
13076 attr_string = ", fp, pure";
13077 }
13078 else
13079 {
13080 TREE_READONLY (t) = 1;
13081 attr_string = ", fp, const";
13082 }
13083 }
13084 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13085 gcc_unreachable ();
13086
13087 if (TARGET_DEBUG_BUILTIN)
13088 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13089 (int)code, name, attr_string);
13090 }
13091
13092 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13093
13094 #undef RS6000_BUILTIN_0
13095 #undef RS6000_BUILTIN_1
13096 #undef RS6000_BUILTIN_2
13097 #undef RS6000_BUILTIN_3
13098 #undef RS6000_BUILTIN_A
13099 #undef RS6000_BUILTIN_D
13100 #undef RS6000_BUILTIN_H
13101 #undef RS6000_BUILTIN_P
13102 #undef RS6000_BUILTIN_X
13103
13104 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13105 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13106 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13107 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13108 { MASK, ICODE, NAME, ENUM },
13109
13110 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13111 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13112 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13113 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13114 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13115
13116 static const struct builtin_description bdesc_3arg[] =
13117 {
13118 #include "rs6000-builtin.def"
13119 };
13120
13121 /* DST operations: void foo (void *, const int, const char). */
13122
13123 #undef RS6000_BUILTIN_0
13124 #undef RS6000_BUILTIN_1
13125 #undef RS6000_BUILTIN_2
13126 #undef RS6000_BUILTIN_3
13127 #undef RS6000_BUILTIN_A
13128 #undef RS6000_BUILTIN_D
13129 #undef RS6000_BUILTIN_H
13130 #undef RS6000_BUILTIN_P
13131 #undef RS6000_BUILTIN_X
13132
13133 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13134 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13135 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13136 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13137 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13138 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13139 { MASK, ICODE, NAME, ENUM },
13140
13141 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13142 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13143 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13144
13145 static const struct builtin_description bdesc_dst[] =
13146 {
13147 #include "rs6000-builtin.def"
13148 };
13149
13150 /* Simple binary operations: VECc = foo (VECa, VECb). */
13151
13152 #undef RS6000_BUILTIN_0
13153 #undef RS6000_BUILTIN_1
13154 #undef RS6000_BUILTIN_2
13155 #undef RS6000_BUILTIN_3
13156 #undef RS6000_BUILTIN_A
13157 #undef RS6000_BUILTIN_D
13158 #undef RS6000_BUILTIN_H
13159 #undef RS6000_BUILTIN_P
13160 #undef RS6000_BUILTIN_X
13161
13162 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13163 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13164 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13165 { MASK, ICODE, NAME, ENUM },
13166
13167 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13168 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13169 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13170 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13171 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13172 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13173
13174 static const struct builtin_description bdesc_2arg[] =
13175 {
13176 #include "rs6000-builtin.def"
13177 };
13178
13179 #undef RS6000_BUILTIN_0
13180 #undef RS6000_BUILTIN_1
13181 #undef RS6000_BUILTIN_2
13182 #undef RS6000_BUILTIN_3
13183 #undef RS6000_BUILTIN_A
13184 #undef RS6000_BUILTIN_D
13185 #undef RS6000_BUILTIN_H
13186 #undef RS6000_BUILTIN_P
13187 #undef RS6000_BUILTIN_X
13188
13189 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13190 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13191 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13192 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13193 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13194 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13195 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13196 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13197 { MASK, ICODE, NAME, ENUM },
13198
13199 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13200
13201 /* AltiVec predicates. */
13202
13203 static const struct builtin_description bdesc_altivec_preds[] =
13204 {
13205 #include "rs6000-builtin.def"
13206 };
13207
13208 /* ABS* operations. */
13209
13210 #undef RS6000_BUILTIN_0
13211 #undef RS6000_BUILTIN_1
13212 #undef RS6000_BUILTIN_2
13213 #undef RS6000_BUILTIN_3
13214 #undef RS6000_BUILTIN_A
13215 #undef RS6000_BUILTIN_D
13216 #undef RS6000_BUILTIN_H
13217 #undef RS6000_BUILTIN_P
13218 #undef RS6000_BUILTIN_X
13219
13220 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13221 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13222 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13223 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13224 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13225 { MASK, ICODE, NAME, ENUM },
13226
13227 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13228 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13229 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13230 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13231
13232 static const struct builtin_description bdesc_abs[] =
13233 {
13234 #include "rs6000-builtin.def"
13235 };
13236
13237 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13238 foo (VECa). */
13239
13240 #undef RS6000_BUILTIN_0
13241 #undef RS6000_BUILTIN_1
13242 #undef RS6000_BUILTIN_2
13243 #undef RS6000_BUILTIN_3
13244 #undef RS6000_BUILTIN_A
13245 #undef RS6000_BUILTIN_D
13246 #undef RS6000_BUILTIN_H
13247 #undef RS6000_BUILTIN_P
13248 #undef RS6000_BUILTIN_X
13249
13250 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13251 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13252 { MASK, ICODE, NAME, ENUM },
13253
13254 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13255 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13256 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13257 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13258 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13259 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13260 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13261
13262 static const struct builtin_description bdesc_1arg[] =
13263 {
13264 #include "rs6000-builtin.def"
13265 };
13266
13267 /* Simple no-argument operations: result = __builtin_darn_32 () */
13268
13269 #undef RS6000_BUILTIN_0
13270 #undef RS6000_BUILTIN_1
13271 #undef RS6000_BUILTIN_2
13272 #undef RS6000_BUILTIN_3
13273 #undef RS6000_BUILTIN_A
13274 #undef RS6000_BUILTIN_D
13275 #undef RS6000_BUILTIN_H
13276 #undef RS6000_BUILTIN_P
13277 #undef RS6000_BUILTIN_X
13278
13279 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13280 { MASK, ICODE, NAME, ENUM },
13281
13282 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13283 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13284 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13285 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13286 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13287 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13288 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13289 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13290
13291 static const struct builtin_description bdesc_0arg[] =
13292 {
13293 #include "rs6000-builtin.def"
13294 };
13295
13296 /* HTM builtins. */
13297 #undef RS6000_BUILTIN_0
13298 #undef RS6000_BUILTIN_1
13299 #undef RS6000_BUILTIN_2
13300 #undef RS6000_BUILTIN_3
13301 #undef RS6000_BUILTIN_A
13302 #undef RS6000_BUILTIN_D
13303 #undef RS6000_BUILTIN_H
13304 #undef RS6000_BUILTIN_P
13305 #undef RS6000_BUILTIN_X
13306
13307 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13308 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13309 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13310 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13311 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13312 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13313 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13314 { MASK, ICODE, NAME, ENUM },
13315
13316 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13317 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13318
13319 static const struct builtin_description bdesc_htm[] =
13320 {
13321 #include "rs6000-builtin.def"
13322 };
13323
13324 #undef RS6000_BUILTIN_0
13325 #undef RS6000_BUILTIN_1
13326 #undef RS6000_BUILTIN_2
13327 #undef RS6000_BUILTIN_3
13328 #undef RS6000_BUILTIN_A
13329 #undef RS6000_BUILTIN_D
13330 #undef RS6000_BUILTIN_H
13331 #undef RS6000_BUILTIN_P
13332
13333 /* Return true if a builtin function is overloaded. */
13334 bool
13335 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13336 {
13337 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13338 }
13339
13340 const char *
13341 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13342 {
13343 return rs6000_builtin_info[(int)fncode].name;
13344 }
13345
13346 /* Expand an expression EXP that calls a builtin without arguments. */
13347 static rtx
13348 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13349 {
13350 rtx pat;
13351 machine_mode tmode = insn_data[icode].operand[0].mode;
13352
13353 if (icode == CODE_FOR_nothing)
13354 /* Builtin not supported on this processor. */
13355 return 0;
13356
13357 if (icode == CODE_FOR_rs6000_mffsl
13358 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13359 {
13360 error ("__builtin_mffsl() not supported with -msoft-float");
13361 return const0_rtx;
13362 }
13363
13364 if (target == 0
13365 || GET_MODE (target) != tmode
13366 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13367 target = gen_reg_rtx (tmode);
13368
13369 pat = GEN_FCN (icode) (target);
13370 if (! pat)
13371 return 0;
13372 emit_insn (pat);
13373
13374 return target;
13375 }
13376
13377
13378 static rtx
13379 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13380 {
13381 rtx pat;
13382 tree arg0 = CALL_EXPR_ARG (exp, 0);
13383 tree arg1 = CALL_EXPR_ARG (exp, 1);
13384 rtx op0 = expand_normal (arg0);
13385 rtx op1 = expand_normal (arg1);
13386 machine_mode mode0 = insn_data[icode].operand[0].mode;
13387 machine_mode mode1 = insn_data[icode].operand[1].mode;
13388
13389 if (icode == CODE_FOR_nothing)
13390 /* Builtin not supported on this processor. */
13391 return 0;
13392
13393 /* If we got invalid arguments bail out before generating bad rtl. */
13394 if (arg0 == error_mark_node || arg1 == error_mark_node)
13395 return const0_rtx;
13396
13397 if (GET_CODE (op0) != CONST_INT
13398 || INTVAL (op0) > 255
13399 || INTVAL (op0) < 0)
13400 {
13401 error ("argument 1 must be an 8-bit field value");
13402 return const0_rtx;
13403 }
13404
13405 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13406 op0 = copy_to_mode_reg (mode0, op0);
13407
13408 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13409 op1 = copy_to_mode_reg (mode1, op1);
13410
13411 pat = GEN_FCN (icode) (op0, op1);
13412 if (!pat)
13413 return const0_rtx;
13414 emit_insn (pat);
13415
13416 return NULL_RTX;
13417 }
13418
13419 static rtx
13420 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13421 {
13422 rtx pat;
13423 tree arg0 = CALL_EXPR_ARG (exp, 0);
13424 rtx op0 = expand_normal (arg0);
13425
13426 if (icode == CODE_FOR_nothing)
13427 /* Builtin not supported on this processor. */
13428 return 0;
13429
13430 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13431 {
13432 error ("__builtin_mtfsb0 and __builtin_mtfsb1 not supported with -msoft-float");
13433 return const0_rtx;
13434 }
13435
13436 /* If we got invalid arguments bail out before generating bad rtl. */
13437 if (arg0 == error_mark_node)
13438 return const0_rtx;
13439
13440 /* Only allow bit numbers 0 to 31. */
13441 if (!u5bit_cint_operand (op0, VOIDmode))
13442 {
13443 error ("Argument must be a constant between 0 and 31.");
13444 return const0_rtx;
13445 }
13446
13447 pat = GEN_FCN (icode) (op0);
13448 if (!pat)
13449 return const0_rtx;
13450 emit_insn (pat);
13451
13452 return NULL_RTX;
13453 }
13454
13455 static rtx
13456 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13457 {
13458 rtx pat;
13459 tree arg0 = CALL_EXPR_ARG (exp, 0);
13460 rtx op0 = expand_normal (arg0);
13461 machine_mode mode0 = insn_data[icode].operand[0].mode;
13462
13463 if (icode == CODE_FOR_nothing)
13464 /* Builtin not supported on this processor. */
13465 return 0;
13466
13467 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13468 {
13469 error ("__builtin_set_fpscr_rn not supported with -msoft-float");
13470 return const0_rtx;
13471 }
13472
13473 /* If we got invalid arguments bail out before generating bad rtl. */
13474 if (arg0 == error_mark_node)
13475 return const0_rtx;
13476
13477 /* If the argument is a constant, check the range. Argument can only be a
13478 2-bit value. Unfortunately, can't check the range of the value at
13479 compile time if the argument is a variable. The least significant two
13480 bits of the argument, regardless of type, are used to set the rounding
13481 mode. All other bits are ignored. */
13482 if (GET_CODE (op0) == CONST_INT && !const_0_to_3_operand(op0, VOIDmode))
13483 {
13484 error ("Argument must be a value between 0 and 3.");
13485 return const0_rtx;
13486 }
13487
13488 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13489 op0 = copy_to_mode_reg (mode0, op0);
13490
13491 pat = GEN_FCN (icode) (op0);
13492 if (!pat)
13493 return const0_rtx;
13494 emit_insn (pat);
13495
13496 return NULL_RTX;
13497 }
13498 static rtx
13499 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13500 {
13501 rtx pat;
13502 tree arg0 = CALL_EXPR_ARG (exp, 0);
13503 rtx op0 = expand_normal (arg0);
13504 machine_mode mode0 = insn_data[icode].operand[0].mode;
13505
13506 if (TARGET_32BIT)
13507 /* Builtin not supported in 32-bit mode. */
13508 fatal_error (input_location,
13509 "__builtin_set_fpscr_drn is not supported in 32-bit mode.");
13510
13511 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13512 {
13513 error ("__builtin_set_fpscr_drn not supported with -msoft-float");
13514 return const0_rtx;
13515 }
13516
13517 if (icode == CODE_FOR_nothing)
13518 /* Builtin not supported on this processor. */
13519 return 0;
13520
13521 /* If we got invalid arguments bail out before generating bad rtl. */
13522 if (arg0 == error_mark_node)
13523 return const0_rtx;
13524
13525 /* If the argument is a constant, check the range. Agrument can only be a
13526 3-bit value. Unfortunately, can't check the range of the value at
13527 compile time if the argument is a variable. The least significant two
13528 bits of the argument, regardless of type, are used to set the rounding
13529 mode. All other bits are ignored. */
13530 if (GET_CODE (op0) == CONST_INT && !const_0_to_7_operand(op0, VOIDmode))
13531 {
13532 error ("Argument must be a value between 0 and 7.");
13533 return const0_rtx;
13534 }
13535
13536 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13537 op0 = copy_to_mode_reg (mode0, op0);
13538
13539 pat = GEN_FCN (icode) (op0);
13540 if (! pat)
13541 return const0_rtx;
13542 emit_insn (pat);
13543
13544 return NULL_RTX;
13545 }
13546
13547 static rtx
13548 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13549 {
13550 rtx pat;
13551 tree arg0 = CALL_EXPR_ARG (exp, 0);
13552 rtx op0 = expand_normal (arg0);
13553 machine_mode tmode = insn_data[icode].operand[0].mode;
13554 machine_mode mode0 = insn_data[icode].operand[1].mode;
13555
13556 if (icode == CODE_FOR_nothing)
13557 /* Builtin not supported on this processor. */
13558 return 0;
13559
13560 /* If we got invalid arguments bail out before generating bad rtl. */
13561 if (arg0 == error_mark_node)
13562 return const0_rtx;
13563
13564 if (icode == CODE_FOR_altivec_vspltisb
13565 || icode == CODE_FOR_altivec_vspltish
13566 || icode == CODE_FOR_altivec_vspltisw)
13567 {
13568 /* Only allow 5-bit *signed* literals. */
13569 if (GET_CODE (op0) != CONST_INT
13570 || INTVAL (op0) > 15
13571 || INTVAL (op0) < -16)
13572 {
13573 error ("argument 1 must be a 5-bit signed literal");
13574 return CONST0_RTX (tmode);
13575 }
13576 }
13577
13578 if (target == 0
13579 || GET_MODE (target) != tmode
13580 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13581 target = gen_reg_rtx (tmode);
13582
13583 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13584 op0 = copy_to_mode_reg (mode0, op0);
13585
13586 pat = GEN_FCN (icode) (target, op0);
13587 if (! pat)
13588 return 0;
13589 emit_insn (pat);
13590
13591 return target;
13592 }
13593
13594 static rtx
13595 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13596 {
13597 rtx pat, scratch1, scratch2;
13598 tree arg0 = CALL_EXPR_ARG (exp, 0);
13599 rtx op0 = expand_normal (arg0);
13600 machine_mode tmode = insn_data[icode].operand[0].mode;
13601 machine_mode mode0 = insn_data[icode].operand[1].mode;
13602
13603 /* If we have invalid arguments, bail out before generating bad rtl. */
13604 if (arg0 == error_mark_node)
13605 return const0_rtx;
13606
13607 if (target == 0
13608 || GET_MODE (target) != tmode
13609 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13610 target = gen_reg_rtx (tmode);
13611
13612 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13613 op0 = copy_to_mode_reg (mode0, op0);
13614
13615 scratch1 = gen_reg_rtx (mode0);
13616 scratch2 = gen_reg_rtx (mode0);
13617
13618 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13619 if (! pat)
13620 return 0;
13621 emit_insn (pat);
13622
13623 return target;
13624 }
13625
13626 static rtx
13627 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13628 {
13629 rtx pat;
13630 tree arg0 = CALL_EXPR_ARG (exp, 0);
13631 tree arg1 = CALL_EXPR_ARG (exp, 1);
13632 rtx op0 = expand_normal (arg0);
13633 rtx op1 = expand_normal (arg1);
13634 machine_mode tmode = insn_data[icode].operand[0].mode;
13635 machine_mode mode0 = insn_data[icode].operand[1].mode;
13636 machine_mode mode1 = insn_data[icode].operand[2].mode;
13637
13638 if (icode == CODE_FOR_nothing)
13639 /* Builtin not supported on this processor. */
13640 return 0;
13641
13642 /* If we got invalid arguments bail out before generating bad rtl. */
13643 if (arg0 == error_mark_node || arg1 == error_mark_node)
13644 return const0_rtx;
13645
13646 if (icode == CODE_FOR_unpackv1ti
13647 || icode == CODE_FOR_unpackkf
13648 || icode == CODE_FOR_unpacktf
13649 || icode == CODE_FOR_unpackif
13650 || icode == CODE_FOR_unpacktd)
13651 {
13652 /* Only allow 1-bit unsigned literals. */
13653 STRIP_NOPS (arg1);
13654 if (TREE_CODE (arg1) != INTEGER_CST
13655 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13656 {
13657 error ("argument 2 must be a 1-bit unsigned literal");
13658 return CONST0_RTX (tmode);
13659 }
13660 }
13661 else if (icode == CODE_FOR_altivec_vspltw)
13662 {
13663 /* Only allow 2-bit unsigned literals. */
13664 STRIP_NOPS (arg1);
13665 if (TREE_CODE (arg1) != INTEGER_CST
13666 || TREE_INT_CST_LOW (arg1) & ~3)
13667 {
13668 error ("argument 2 must be a 2-bit unsigned literal");
13669 return CONST0_RTX (tmode);
13670 }
13671 }
13672 else if (icode == CODE_FOR_altivec_vsplth)
13673 {
13674 /* Only allow 3-bit unsigned literals. */
13675 STRIP_NOPS (arg1);
13676 if (TREE_CODE (arg1) != INTEGER_CST
13677 || TREE_INT_CST_LOW (arg1) & ~7)
13678 {
13679 error ("argument 2 must be a 3-bit unsigned literal");
13680 return CONST0_RTX (tmode);
13681 }
13682 }
13683 else if (icode == CODE_FOR_altivec_vspltb)
13684 {
13685 /* Only allow 4-bit unsigned literals. */
13686 STRIP_NOPS (arg1);
13687 if (TREE_CODE (arg1) != INTEGER_CST
13688 || TREE_INT_CST_LOW (arg1) & ~15)
13689 {
13690 error ("argument 2 must be a 4-bit unsigned literal");
13691 return CONST0_RTX (tmode);
13692 }
13693 }
13694 else if (icode == CODE_FOR_altivec_vcfux
13695 || icode == CODE_FOR_altivec_vcfsx
13696 || icode == CODE_FOR_altivec_vctsxs
13697 || icode == CODE_FOR_altivec_vctuxs)
13698 {
13699 /* Only allow 5-bit unsigned literals. */
13700 STRIP_NOPS (arg1);
13701 if (TREE_CODE (arg1) != INTEGER_CST
13702 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13703 {
13704 error ("argument 2 must be a 5-bit unsigned literal");
13705 return CONST0_RTX (tmode);
13706 }
13707 }
13708 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13709 || icode == CODE_FOR_dfptstsfi_lt_dd
13710 || icode == CODE_FOR_dfptstsfi_gt_dd
13711 || icode == CODE_FOR_dfptstsfi_unordered_dd
13712 || icode == CODE_FOR_dfptstsfi_eq_td
13713 || icode == CODE_FOR_dfptstsfi_lt_td
13714 || icode == CODE_FOR_dfptstsfi_gt_td
13715 || icode == CODE_FOR_dfptstsfi_unordered_td)
13716 {
13717 /* Only allow 6-bit unsigned literals. */
13718 STRIP_NOPS (arg0);
13719 if (TREE_CODE (arg0) != INTEGER_CST
13720 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13721 {
13722 error ("argument 1 must be a 6-bit unsigned literal");
13723 return CONST0_RTX (tmode);
13724 }
13725 }
13726 else if (icode == CODE_FOR_xststdcqp_kf
13727 || icode == CODE_FOR_xststdcqp_tf
13728 || icode == CODE_FOR_xststdcdp
13729 || icode == CODE_FOR_xststdcsp
13730 || icode == CODE_FOR_xvtstdcdp
13731 || icode == CODE_FOR_xvtstdcsp)
13732 {
13733 /* Only allow 7-bit unsigned literals. */
13734 STRIP_NOPS (arg1);
13735 if (TREE_CODE (arg1) != INTEGER_CST
13736 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13737 {
13738 error ("argument 2 must be a 7-bit unsigned literal");
13739 return CONST0_RTX (tmode);
13740 }
13741 }
13742
13743 if (target == 0
13744 || GET_MODE (target) != tmode
13745 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13746 target = gen_reg_rtx (tmode);
13747
13748 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13749 op0 = copy_to_mode_reg (mode0, op0);
13750 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13751 op1 = copy_to_mode_reg (mode1, op1);
13752
13753 pat = GEN_FCN (icode) (target, op0, op1);
13754 if (! pat)
13755 return 0;
13756 emit_insn (pat);
13757
13758 return target;
13759 }
13760
13761 static rtx
13762 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13763 {
13764 rtx pat, scratch;
13765 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13766 tree arg0 = CALL_EXPR_ARG (exp, 1);
13767 tree arg1 = CALL_EXPR_ARG (exp, 2);
13768 rtx op0 = expand_normal (arg0);
13769 rtx op1 = expand_normal (arg1);
13770 machine_mode tmode = SImode;
13771 machine_mode mode0 = insn_data[icode].operand[1].mode;
13772 machine_mode mode1 = insn_data[icode].operand[2].mode;
13773 int cr6_form_int;
13774
13775 if (TREE_CODE (cr6_form) != INTEGER_CST)
13776 {
13777 error ("argument 1 of %qs must be a constant",
13778 "__builtin_altivec_predicate");
13779 return const0_rtx;
13780 }
13781 else
13782 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13783
13784 gcc_assert (mode0 == mode1);
13785
13786 /* If we have invalid arguments, bail out before generating bad rtl. */
13787 if (arg0 == error_mark_node || arg1 == error_mark_node)
13788 return const0_rtx;
13789
13790 if (target == 0
13791 || GET_MODE (target) != tmode
13792 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13793 target = gen_reg_rtx (tmode);
13794
13795 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13796 op0 = copy_to_mode_reg (mode0, op0);
13797 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13798 op1 = copy_to_mode_reg (mode1, op1);
13799
13800 /* Note that for many of the relevant operations (e.g. cmpne or
13801 cmpeq) with float or double operands, it makes more sense for the
13802 mode of the allocated scratch register to select a vector of
13803 integer. But the choice to copy the mode of operand 0 was made
13804 long ago and there are no plans to change it. */
13805 scratch = gen_reg_rtx (mode0);
13806
13807 pat = GEN_FCN (icode) (scratch, op0, op1);
13808 if (! pat)
13809 return 0;
13810 emit_insn (pat);
13811
13812 /* The vec_any* and vec_all* predicates use the same opcodes for two
13813 different operations, but the bits in CR6 will be different
13814 depending on what information we want. So we have to play tricks
13815 with CR6 to get the right bits out.
13816
13817 If you think this is disgusting, look at the specs for the
13818 AltiVec predicates. */
13819
13820 switch (cr6_form_int)
13821 {
13822 case 0:
13823 emit_insn (gen_cr6_test_for_zero (target));
13824 break;
13825 case 1:
13826 emit_insn (gen_cr6_test_for_zero_reverse (target));
13827 break;
13828 case 2:
13829 emit_insn (gen_cr6_test_for_lt (target));
13830 break;
13831 case 3:
13832 emit_insn (gen_cr6_test_for_lt_reverse (target));
13833 break;
13834 default:
13835 error ("argument 1 of %qs is out of range",
13836 "__builtin_altivec_predicate");
13837 break;
13838 }
13839
13840 return target;
13841 }
13842
13843 rtx
13844 swap_endian_selector_for_mode (machine_mode mode)
13845 {
13846 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13847 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13848 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13849 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13850
13851 unsigned int *swaparray, i;
13852 rtx perm[16];
13853
13854 switch (mode)
13855 {
13856 case E_V1TImode:
13857 swaparray = swap1;
13858 break;
13859 case E_V2DFmode:
13860 case E_V2DImode:
13861 swaparray = swap2;
13862 break;
13863 case E_V4SFmode:
13864 case E_V4SImode:
13865 swaparray = swap4;
13866 break;
13867 case E_V8HImode:
13868 swaparray = swap8;
13869 break;
13870 default:
13871 gcc_unreachable ();
13872 }
13873
13874 for (i = 0; i < 16; ++i)
13875 perm[i] = GEN_INT (swaparray[i]);
13876
13877 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13878 gen_rtvec_v (16, perm)));
13879 }
13880
13881 static rtx
13882 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13883 {
13884 rtx pat, addr;
13885 tree arg0 = CALL_EXPR_ARG (exp, 0);
13886 tree arg1 = CALL_EXPR_ARG (exp, 1);
13887 machine_mode tmode = insn_data[icode].operand[0].mode;
13888 machine_mode mode0 = Pmode;
13889 machine_mode mode1 = Pmode;
13890 rtx op0 = expand_normal (arg0);
13891 rtx op1 = expand_normal (arg1);
13892
13893 if (icode == CODE_FOR_nothing)
13894 /* Builtin not supported on this processor. */
13895 return 0;
13896
13897 /* If we got invalid arguments bail out before generating bad rtl. */
13898 if (arg0 == error_mark_node || arg1 == error_mark_node)
13899 return const0_rtx;
13900
13901 if (target == 0
13902 || GET_MODE (target) != tmode
13903 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13904 target = gen_reg_rtx (tmode);
13905
13906 op1 = copy_to_mode_reg (mode1, op1);
13907
13908 /* For LVX, express the RTL accurately by ANDing the address with -16.
13909 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13910 so the raw address is fine. */
13911 if (icode == CODE_FOR_altivec_lvx_v1ti
13912 || icode == CODE_FOR_altivec_lvx_v2df
13913 || icode == CODE_FOR_altivec_lvx_v2di
13914 || icode == CODE_FOR_altivec_lvx_v4sf
13915 || icode == CODE_FOR_altivec_lvx_v4si
13916 || icode == CODE_FOR_altivec_lvx_v8hi
13917 || icode == CODE_FOR_altivec_lvx_v16qi)
13918 {
13919 rtx rawaddr;
13920 if (op0 == const0_rtx)
13921 rawaddr = op1;
13922 else
13923 {
13924 op0 = copy_to_mode_reg (mode0, op0);
13925 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13926 }
13927 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13928 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13929
13930 emit_insn (gen_rtx_SET (target, addr));
13931 }
13932 else
13933 {
13934 if (op0 == const0_rtx)
13935 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13936 else
13937 {
13938 op0 = copy_to_mode_reg (mode0, op0);
13939 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13940 gen_rtx_PLUS (Pmode, op1, op0));
13941 }
13942
13943 pat = GEN_FCN (icode) (target, addr);
13944 if (! pat)
13945 return 0;
13946 emit_insn (pat);
13947 }
13948
13949 return target;
13950 }
13951
13952 static rtx
13953 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13954 {
13955 rtx pat;
13956 tree arg0 = CALL_EXPR_ARG (exp, 0);
13957 tree arg1 = CALL_EXPR_ARG (exp, 1);
13958 tree arg2 = CALL_EXPR_ARG (exp, 2);
13959 rtx op0 = expand_normal (arg0);
13960 rtx op1 = expand_normal (arg1);
13961 rtx op2 = expand_normal (arg2);
13962 machine_mode mode0 = insn_data[icode].operand[0].mode;
13963 machine_mode mode1 = insn_data[icode].operand[1].mode;
13964 machine_mode mode2 = insn_data[icode].operand[2].mode;
13965
13966 if (icode == CODE_FOR_nothing)
13967 /* Builtin not supported on this processor. */
13968 return NULL_RTX;
13969
13970 /* If we got invalid arguments bail out before generating bad rtl. */
13971 if (arg0 == error_mark_node
13972 || arg1 == error_mark_node
13973 || arg2 == error_mark_node)
13974 return NULL_RTX;
13975
13976 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13977 op0 = copy_to_mode_reg (mode0, op0);
13978 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13979 op1 = copy_to_mode_reg (mode1, op1);
13980 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13981 op2 = copy_to_mode_reg (mode2, op2);
13982
13983 pat = GEN_FCN (icode) (op0, op1, op2);
13984 if (pat)
13985 emit_insn (pat);
13986
13987 return NULL_RTX;
13988 }
13989
13990 static rtx
13991 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13992 {
13993 tree arg0 = CALL_EXPR_ARG (exp, 0);
13994 tree arg1 = CALL_EXPR_ARG (exp, 1);
13995 tree arg2 = CALL_EXPR_ARG (exp, 2);
13996 rtx op0 = expand_normal (arg0);
13997 rtx op1 = expand_normal (arg1);
13998 rtx op2 = expand_normal (arg2);
13999 rtx pat, addr, rawaddr;
14000 machine_mode tmode = insn_data[icode].operand[0].mode;
14001 machine_mode smode = insn_data[icode].operand[1].mode;
14002 machine_mode mode1 = Pmode;
14003 machine_mode mode2 = Pmode;
14004
14005 /* Invalid arguments. Bail before doing anything stoopid! */
14006 if (arg0 == error_mark_node
14007 || arg1 == error_mark_node
14008 || arg2 == error_mark_node)
14009 return const0_rtx;
14010
14011 op2 = copy_to_mode_reg (mode2, op2);
14012
14013 /* For STVX, express the RTL accurately by ANDing the address with -16.
14014 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14015 so the raw address is fine. */
14016 if (icode == CODE_FOR_altivec_stvx_v2df
14017 || icode == CODE_FOR_altivec_stvx_v2di
14018 || icode == CODE_FOR_altivec_stvx_v4sf
14019 || icode == CODE_FOR_altivec_stvx_v4si
14020 || icode == CODE_FOR_altivec_stvx_v8hi
14021 || icode == CODE_FOR_altivec_stvx_v16qi)
14022 {
14023 if (op1 == const0_rtx)
14024 rawaddr = op2;
14025 else
14026 {
14027 op1 = copy_to_mode_reg (mode1, op1);
14028 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14029 }
14030
14031 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14032 addr = gen_rtx_MEM (tmode, addr);
14033
14034 op0 = copy_to_mode_reg (tmode, op0);
14035
14036 emit_insn (gen_rtx_SET (addr, op0));
14037 }
14038 else
14039 {
14040 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14041 op0 = copy_to_mode_reg (smode, op0);
14042
14043 if (op1 == const0_rtx)
14044 addr = gen_rtx_MEM (tmode, op2);
14045 else
14046 {
14047 op1 = copy_to_mode_reg (mode1, op1);
14048 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14049 }
14050
14051 pat = GEN_FCN (icode) (addr, op0);
14052 if (pat)
14053 emit_insn (pat);
14054 }
14055
14056 return NULL_RTX;
14057 }
14058
14059 /* Return the appropriate SPR number associated with the given builtin. */
14060 static inline HOST_WIDE_INT
14061 htm_spr_num (enum rs6000_builtins code)
14062 {
14063 if (code == HTM_BUILTIN_GET_TFHAR
14064 || code == HTM_BUILTIN_SET_TFHAR)
14065 return TFHAR_SPR;
14066 else if (code == HTM_BUILTIN_GET_TFIAR
14067 || code == HTM_BUILTIN_SET_TFIAR)
14068 return TFIAR_SPR;
14069 else if (code == HTM_BUILTIN_GET_TEXASR
14070 || code == HTM_BUILTIN_SET_TEXASR)
14071 return TEXASR_SPR;
14072 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14073 || code == HTM_BUILTIN_SET_TEXASRU);
14074 return TEXASRU_SPR;
14075 }
14076
14077 /* Return the appropriate SPR regno associated with the given builtin. */
14078 static inline HOST_WIDE_INT
14079 htm_spr_regno (enum rs6000_builtins code)
14080 {
14081 if (code == HTM_BUILTIN_GET_TFHAR
14082 || code == HTM_BUILTIN_SET_TFHAR)
14083 return TFHAR_REGNO;
14084 else if (code == HTM_BUILTIN_GET_TFIAR
14085 || code == HTM_BUILTIN_SET_TFIAR)
14086 return TFIAR_REGNO;
14087 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14088 || code == HTM_BUILTIN_SET_TEXASR
14089 || code == HTM_BUILTIN_GET_TEXASRU
14090 || code == HTM_BUILTIN_SET_TEXASRU);
14091 return TEXASR_REGNO;
14092 }
14093
14094 /* Return the correct ICODE value depending on whether we are
14095 setting or reading the HTM SPRs. */
14096 static inline enum insn_code
14097 rs6000_htm_spr_icode (bool nonvoid)
14098 {
14099 if (nonvoid)
14100 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14101 else
14102 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14103 }
14104
14105 /* Expand the HTM builtin in EXP and store the result in TARGET.
14106 Store true in *EXPANDEDP if we found a builtin to expand. */
14107 static rtx
14108 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14109 {
14110 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14111 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14112 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14113 const struct builtin_description *d;
14114 size_t i;
14115
14116 *expandedp = true;
14117
14118 if (!TARGET_POWERPC64
14119 && (fcode == HTM_BUILTIN_TABORTDC
14120 || fcode == HTM_BUILTIN_TABORTDCI))
14121 {
14122 size_t uns_fcode = (size_t)fcode;
14123 const char *name = rs6000_builtin_info[uns_fcode].name;
14124 error ("builtin %qs is only valid in 64-bit mode", name);
14125 return const0_rtx;
14126 }
14127
14128 /* Expand the HTM builtins. */
14129 d = bdesc_htm;
14130 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14131 if (d->code == fcode)
14132 {
14133 rtx op[MAX_HTM_OPERANDS], pat;
14134 int nopnds = 0;
14135 tree arg;
14136 call_expr_arg_iterator iter;
14137 unsigned attr = rs6000_builtin_info[fcode].attr;
14138 enum insn_code icode = d->icode;
14139 const struct insn_operand_data *insn_op;
14140 bool uses_spr = (attr & RS6000_BTC_SPR);
14141 rtx cr = NULL_RTX;
14142
14143 if (uses_spr)
14144 icode = rs6000_htm_spr_icode (nonvoid);
14145 insn_op = &insn_data[icode].operand[0];
14146
14147 if (nonvoid)
14148 {
14149 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14150 if (!target
14151 || GET_MODE (target) != tmode
14152 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14153 target = gen_reg_rtx (tmode);
14154 if (uses_spr)
14155 op[nopnds++] = target;
14156 }
14157
14158 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14159 {
14160 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14161 return const0_rtx;
14162
14163 insn_op = &insn_data[icode].operand[nopnds];
14164
14165 op[nopnds] = expand_normal (arg);
14166
14167 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14168 {
14169 if (!strcmp (insn_op->constraint, "n"))
14170 {
14171 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14172 if (!CONST_INT_P (op[nopnds]))
14173 error ("argument %d must be an unsigned literal", arg_num);
14174 else
14175 error ("argument %d is an unsigned literal that is "
14176 "out of range", arg_num);
14177 return const0_rtx;
14178 }
14179 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14180 }
14181
14182 nopnds++;
14183 }
14184
14185 /* Handle the builtins for extended mnemonics. These accept
14186 no arguments, but map to builtins that take arguments. */
14187 switch (fcode)
14188 {
14189 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14190 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14191 op[nopnds++] = GEN_INT (1);
14192 if (flag_checking)
14193 attr |= RS6000_BTC_UNARY;
14194 break;
14195 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14196 op[nopnds++] = GEN_INT (0);
14197 if (flag_checking)
14198 attr |= RS6000_BTC_UNARY;
14199 break;
14200 default:
14201 break;
14202 }
14203
14204 /* If this builtin accesses SPRs, then pass in the appropriate
14205 SPR number and SPR regno as the last two operands. */
14206 if (uses_spr)
14207 {
14208 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14209 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14210 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14211 }
14212 /* If this builtin accesses a CR, then pass in a scratch
14213 CR as the last operand. */
14214 else if (attr & RS6000_BTC_CR)
14215 { cr = gen_reg_rtx (CCmode);
14216 op[nopnds++] = cr;
14217 }
14218
14219 if (flag_checking)
14220 {
14221 int expected_nopnds = 0;
14222 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14223 expected_nopnds = 1;
14224 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14225 expected_nopnds = 2;
14226 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14227 expected_nopnds = 3;
14228 if (!(attr & RS6000_BTC_VOID))
14229 expected_nopnds += 1;
14230 if (uses_spr)
14231 expected_nopnds += 2;
14232
14233 gcc_assert (nopnds == expected_nopnds
14234 && nopnds <= MAX_HTM_OPERANDS);
14235 }
14236
14237 switch (nopnds)
14238 {
14239 case 1:
14240 pat = GEN_FCN (icode) (op[0]);
14241 break;
14242 case 2:
14243 pat = GEN_FCN (icode) (op[0], op[1]);
14244 break;
14245 case 3:
14246 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14247 break;
14248 case 4:
14249 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14250 break;
14251 default:
14252 gcc_unreachable ();
14253 }
14254 if (!pat)
14255 return NULL_RTX;
14256 emit_insn (pat);
14257
14258 if (attr & RS6000_BTC_CR)
14259 {
14260 if (fcode == HTM_BUILTIN_TBEGIN)
14261 {
14262 /* Emit code to set TARGET to true or false depending on
14263 whether the tbegin. instruction successfully or failed
14264 to start a transaction. We do this by placing the 1's
14265 complement of CR's EQ bit into TARGET. */
14266 rtx scratch = gen_reg_rtx (SImode);
14267 emit_insn (gen_rtx_SET (scratch,
14268 gen_rtx_EQ (SImode, cr,
14269 const0_rtx)));
14270 emit_insn (gen_rtx_SET (target,
14271 gen_rtx_XOR (SImode, scratch,
14272 GEN_INT (1))));
14273 }
14274 else
14275 {
14276 /* Emit code to copy the 4-bit condition register field
14277 CR into the least significant end of register TARGET. */
14278 rtx scratch1 = gen_reg_rtx (SImode);
14279 rtx scratch2 = gen_reg_rtx (SImode);
14280 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14281 emit_insn (gen_movcc (subreg, cr));
14282 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14283 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14284 }
14285 }
14286
14287 if (nonvoid)
14288 return target;
14289 return const0_rtx;
14290 }
14291
14292 *expandedp = false;
14293 return NULL_RTX;
14294 }
14295
14296 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14297
14298 static rtx
14299 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14300 rtx target)
14301 {
14302 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14303 if (fcode == RS6000_BUILTIN_CPU_INIT)
14304 return const0_rtx;
14305
14306 if (target == 0 || GET_MODE (target) != SImode)
14307 target = gen_reg_rtx (SImode);
14308
14309 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14310 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14311 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14312 to a STRING_CST. */
14313 if (TREE_CODE (arg) == ARRAY_REF
14314 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14315 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14316 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14317 arg = TREE_OPERAND (arg, 0);
14318
14319 if (TREE_CODE (arg) != STRING_CST)
14320 {
14321 error ("builtin %qs only accepts a string argument",
14322 rs6000_builtin_info[(size_t) fcode].name);
14323 return const0_rtx;
14324 }
14325
14326 if (fcode == RS6000_BUILTIN_CPU_IS)
14327 {
14328 const char *cpu = TREE_STRING_POINTER (arg);
14329 rtx cpuid = NULL_RTX;
14330 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14331 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14332 {
14333 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14334 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14335 break;
14336 }
14337 if (cpuid == NULL_RTX)
14338 {
14339 /* Invalid CPU argument. */
14340 error ("cpu %qs is an invalid argument to builtin %qs",
14341 cpu, rs6000_builtin_info[(size_t) fcode].name);
14342 return const0_rtx;
14343 }
14344
14345 rtx platform = gen_reg_rtx (SImode);
14346 rtx tcbmem = gen_const_mem (SImode,
14347 gen_rtx_PLUS (Pmode,
14348 gen_rtx_REG (Pmode, TLS_REGNUM),
14349 GEN_INT (TCB_PLATFORM_OFFSET)));
14350 emit_move_insn (platform, tcbmem);
14351 emit_insn (gen_eqsi3 (target, platform, cpuid));
14352 }
14353 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14354 {
14355 const char *hwcap = TREE_STRING_POINTER (arg);
14356 rtx mask = NULL_RTX;
14357 int hwcap_offset;
14358 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14359 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14360 {
14361 mask = GEN_INT (cpu_supports_info[i].mask);
14362 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14363 break;
14364 }
14365 if (mask == NULL_RTX)
14366 {
14367 /* Invalid HWCAP argument. */
14368 error ("%s %qs is an invalid argument to builtin %qs",
14369 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14370 return const0_rtx;
14371 }
14372
14373 rtx tcb_hwcap = gen_reg_rtx (SImode);
14374 rtx tcbmem = gen_const_mem (SImode,
14375 gen_rtx_PLUS (Pmode,
14376 gen_rtx_REG (Pmode, TLS_REGNUM),
14377 GEN_INT (hwcap_offset)));
14378 emit_move_insn (tcb_hwcap, tcbmem);
14379 rtx scratch1 = gen_reg_rtx (SImode);
14380 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14381 rtx scratch2 = gen_reg_rtx (SImode);
14382 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14383 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14384 }
14385 else
14386 gcc_unreachable ();
14387
14388 /* Record that we have expanded a CPU builtin, so that we can later
14389 emit a reference to the special symbol exported by LIBC to ensure we
14390 do not link against an old LIBC that doesn't support this feature. */
14391 cpu_builtin_p = true;
14392
14393 #else
14394 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14395 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14396
14397 /* For old LIBCs, always return FALSE. */
14398 emit_move_insn (target, GEN_INT (0));
14399 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14400
14401 return target;
14402 }
14403
14404 static rtx
14405 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14406 {
14407 rtx pat;
14408 tree arg0 = CALL_EXPR_ARG (exp, 0);
14409 tree arg1 = CALL_EXPR_ARG (exp, 1);
14410 tree arg2 = CALL_EXPR_ARG (exp, 2);
14411 rtx op0 = expand_normal (arg0);
14412 rtx op1 = expand_normal (arg1);
14413 rtx op2 = expand_normal (arg2);
14414 machine_mode tmode = insn_data[icode].operand[0].mode;
14415 machine_mode mode0 = insn_data[icode].operand[1].mode;
14416 machine_mode mode1 = insn_data[icode].operand[2].mode;
14417 machine_mode mode2 = insn_data[icode].operand[3].mode;
14418
14419 if (icode == CODE_FOR_nothing)
14420 /* Builtin not supported on this processor. */
14421 return 0;
14422
14423 /* If we got invalid arguments bail out before generating bad rtl. */
14424 if (arg0 == error_mark_node
14425 || arg1 == error_mark_node
14426 || arg2 == error_mark_node)
14427 return const0_rtx;
14428
14429 /* Check and prepare argument depending on the instruction code.
14430
14431 Note that a switch statement instead of the sequence of tests
14432 would be incorrect as many of the CODE_FOR values could be
14433 CODE_FOR_nothing and that would yield multiple alternatives
14434 with identical values. We'd never reach here at runtime in
14435 this case. */
14436 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14437 || icode == CODE_FOR_altivec_vsldoi_v2df
14438 || icode == CODE_FOR_altivec_vsldoi_v4si
14439 || icode == CODE_FOR_altivec_vsldoi_v8hi
14440 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14441 {
14442 /* Only allow 4-bit unsigned literals. */
14443 STRIP_NOPS (arg2);
14444 if (TREE_CODE (arg2) != INTEGER_CST
14445 || TREE_INT_CST_LOW (arg2) & ~0xf)
14446 {
14447 error ("argument 3 must be a 4-bit unsigned literal");
14448 return CONST0_RTX (tmode);
14449 }
14450 }
14451 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14452 || icode == CODE_FOR_vsx_xxpermdi_v2di
14453 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14454 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14455 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14456 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14457 || icode == CODE_FOR_vsx_xxpermdi_v4si
14458 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14459 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14460 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14461 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14462 || icode == CODE_FOR_vsx_xxsldwi_v4si
14463 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14464 || icode == CODE_FOR_vsx_xxsldwi_v2di
14465 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14466 {
14467 /* Only allow 2-bit unsigned literals. */
14468 STRIP_NOPS (arg2);
14469 if (TREE_CODE (arg2) != INTEGER_CST
14470 || TREE_INT_CST_LOW (arg2) & ~0x3)
14471 {
14472 error ("argument 3 must be a 2-bit unsigned literal");
14473 return CONST0_RTX (tmode);
14474 }
14475 }
14476 else if (icode == CODE_FOR_vsx_set_v2df
14477 || icode == CODE_FOR_vsx_set_v2di
14478 || icode == CODE_FOR_bcdadd
14479 || icode == CODE_FOR_bcdadd_lt
14480 || icode == CODE_FOR_bcdadd_eq
14481 || icode == CODE_FOR_bcdadd_gt
14482 || icode == CODE_FOR_bcdsub
14483 || icode == CODE_FOR_bcdsub_lt
14484 || icode == CODE_FOR_bcdsub_eq
14485 || icode == CODE_FOR_bcdsub_gt)
14486 {
14487 /* Only allow 1-bit unsigned literals. */
14488 STRIP_NOPS (arg2);
14489 if (TREE_CODE (arg2) != INTEGER_CST
14490 || TREE_INT_CST_LOW (arg2) & ~0x1)
14491 {
14492 error ("argument 3 must be a 1-bit unsigned literal");
14493 return CONST0_RTX (tmode);
14494 }
14495 }
14496 else if (icode == CODE_FOR_dfp_ddedpd_dd
14497 || icode == CODE_FOR_dfp_ddedpd_td)
14498 {
14499 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14500 STRIP_NOPS (arg0);
14501 if (TREE_CODE (arg0) != INTEGER_CST
14502 || TREE_INT_CST_LOW (arg2) & ~0x3)
14503 {
14504 error ("argument 1 must be 0 or 2");
14505 return CONST0_RTX (tmode);
14506 }
14507 }
14508 else if (icode == CODE_FOR_dfp_denbcd_dd
14509 || icode == CODE_FOR_dfp_denbcd_td)
14510 {
14511 /* Only allow 1-bit unsigned literals. */
14512 STRIP_NOPS (arg0);
14513 if (TREE_CODE (arg0) != INTEGER_CST
14514 || TREE_INT_CST_LOW (arg0) & ~0x1)
14515 {
14516 error ("argument 1 must be a 1-bit unsigned literal");
14517 return CONST0_RTX (tmode);
14518 }
14519 }
14520 else if (icode == CODE_FOR_dfp_dscli_dd
14521 || icode == CODE_FOR_dfp_dscli_td
14522 || icode == CODE_FOR_dfp_dscri_dd
14523 || icode == CODE_FOR_dfp_dscri_td)
14524 {
14525 /* Only allow 6-bit unsigned literals. */
14526 STRIP_NOPS (arg1);
14527 if (TREE_CODE (arg1) != INTEGER_CST
14528 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14529 {
14530 error ("argument 2 must be a 6-bit unsigned literal");
14531 return CONST0_RTX (tmode);
14532 }
14533 }
14534 else if (icode == CODE_FOR_crypto_vshasigmaw
14535 || icode == CODE_FOR_crypto_vshasigmad)
14536 {
14537 /* Check whether the 2nd and 3rd arguments are integer constants and in
14538 range and prepare arguments. */
14539 STRIP_NOPS (arg1);
14540 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14541 {
14542 error ("argument 2 must be 0 or 1");
14543 return CONST0_RTX (tmode);
14544 }
14545
14546 STRIP_NOPS (arg2);
14547 if (TREE_CODE (arg2) != INTEGER_CST
14548 || wi::geu_p (wi::to_wide (arg2), 16))
14549 {
14550 error ("argument 3 must be in the range 0..15");
14551 return CONST0_RTX (tmode);
14552 }
14553 }
14554
14555 if (target == 0
14556 || GET_MODE (target) != tmode
14557 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14558 target = gen_reg_rtx (tmode);
14559
14560 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14561 op0 = copy_to_mode_reg (mode0, op0);
14562 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14563 op1 = copy_to_mode_reg (mode1, op1);
14564 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14565 op2 = copy_to_mode_reg (mode2, op2);
14566
14567 pat = GEN_FCN (icode) (target, op0, op1, op2);
14568 if (! pat)
14569 return 0;
14570 emit_insn (pat);
14571
14572 return target;
14573 }
14574
14575
14576 /* Expand the dst builtins. */
14577 static rtx
14578 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14579 bool *expandedp)
14580 {
14581 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14582 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14583 tree arg0, arg1, arg2;
14584 machine_mode mode0, mode1;
14585 rtx pat, op0, op1, op2;
14586 const struct builtin_description *d;
14587 size_t i;
14588
14589 *expandedp = false;
14590
14591 /* Handle DST variants. */
14592 d = bdesc_dst;
14593 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14594 if (d->code == fcode)
14595 {
14596 arg0 = CALL_EXPR_ARG (exp, 0);
14597 arg1 = CALL_EXPR_ARG (exp, 1);
14598 arg2 = CALL_EXPR_ARG (exp, 2);
14599 op0 = expand_normal (arg0);
14600 op1 = expand_normal (arg1);
14601 op2 = expand_normal (arg2);
14602 mode0 = insn_data[d->icode].operand[0].mode;
14603 mode1 = insn_data[d->icode].operand[1].mode;
14604
14605 /* Invalid arguments, bail out before generating bad rtl. */
14606 if (arg0 == error_mark_node
14607 || arg1 == error_mark_node
14608 || arg2 == error_mark_node)
14609 return const0_rtx;
14610
14611 *expandedp = true;
14612 STRIP_NOPS (arg2);
14613 if (TREE_CODE (arg2) != INTEGER_CST
14614 || TREE_INT_CST_LOW (arg2) & ~0x3)
14615 {
14616 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14617 return const0_rtx;
14618 }
14619
14620 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14621 op0 = copy_to_mode_reg (Pmode, op0);
14622 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14623 op1 = copy_to_mode_reg (mode1, op1);
14624
14625 pat = GEN_FCN (d->icode) (op0, op1, op2);
14626 if (pat != 0)
14627 emit_insn (pat);
14628
14629 return NULL_RTX;
14630 }
14631
14632 return NULL_RTX;
14633 }
14634
14635 /* Expand vec_init builtin. */
14636 static rtx
14637 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14638 {
14639 machine_mode tmode = TYPE_MODE (type);
14640 machine_mode inner_mode = GET_MODE_INNER (tmode);
14641 int i, n_elt = GET_MODE_NUNITS (tmode);
14642
14643 gcc_assert (VECTOR_MODE_P (tmode));
14644 gcc_assert (n_elt == call_expr_nargs (exp));
14645
14646 if (!target || !register_operand (target, tmode))
14647 target = gen_reg_rtx (tmode);
14648
14649 /* If we have a vector compromised of a single element, such as V1TImode, do
14650 the initialization directly. */
14651 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14652 {
14653 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14654 emit_move_insn (target, gen_lowpart (tmode, x));
14655 }
14656 else
14657 {
14658 rtvec v = rtvec_alloc (n_elt);
14659
14660 for (i = 0; i < n_elt; ++i)
14661 {
14662 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14663 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14664 }
14665
14666 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14667 }
14668
14669 return target;
14670 }
14671
14672 /* Return the integer constant in ARG. Constrain it to be in the range
14673 of the subparts of VEC_TYPE; issue an error if not. */
14674
14675 static int
14676 get_element_number (tree vec_type, tree arg)
14677 {
14678 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14679
14680 if (!tree_fits_uhwi_p (arg)
14681 || (elt = tree_to_uhwi (arg), elt > max))
14682 {
14683 error ("selector must be an integer constant in the range 0..%wi", max);
14684 return 0;
14685 }
14686
14687 return elt;
14688 }
14689
14690 /* Expand vec_set builtin. */
14691 static rtx
14692 altivec_expand_vec_set_builtin (tree exp)
14693 {
14694 machine_mode tmode, mode1;
14695 tree arg0, arg1, arg2;
14696 int elt;
14697 rtx op0, op1;
14698
14699 arg0 = CALL_EXPR_ARG (exp, 0);
14700 arg1 = CALL_EXPR_ARG (exp, 1);
14701 arg2 = CALL_EXPR_ARG (exp, 2);
14702
14703 tmode = TYPE_MODE (TREE_TYPE (arg0));
14704 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14705 gcc_assert (VECTOR_MODE_P (tmode));
14706
14707 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14708 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14709 elt = get_element_number (TREE_TYPE (arg0), arg2);
14710
14711 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14712 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14713
14714 op0 = force_reg (tmode, op0);
14715 op1 = force_reg (mode1, op1);
14716
14717 rs6000_expand_vector_set (op0, op1, elt);
14718
14719 return op0;
14720 }
14721
14722 /* Expand vec_ext builtin. */
14723 static rtx
14724 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14725 {
14726 machine_mode tmode, mode0;
14727 tree arg0, arg1;
14728 rtx op0;
14729 rtx op1;
14730
14731 arg0 = CALL_EXPR_ARG (exp, 0);
14732 arg1 = CALL_EXPR_ARG (exp, 1);
14733
14734 op0 = expand_normal (arg0);
14735 op1 = expand_normal (arg1);
14736
14737 /* Call get_element_number to validate arg1 if it is a constant. */
14738 if (TREE_CODE (arg1) == INTEGER_CST)
14739 (void) get_element_number (TREE_TYPE (arg0), arg1);
14740
14741 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14742 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14743 gcc_assert (VECTOR_MODE_P (mode0));
14744
14745 op0 = force_reg (mode0, op0);
14746
14747 if (optimize || !target || !register_operand (target, tmode))
14748 target = gen_reg_rtx (tmode);
14749
14750 rs6000_expand_vector_extract (target, op0, op1);
14751
14752 return target;
14753 }
14754
14755 /* Expand the builtin in EXP and store the result in TARGET. Store
14756 true in *EXPANDEDP if we found a builtin to expand. */
14757 static rtx
14758 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14759 {
14760 const struct builtin_description *d;
14761 size_t i;
14762 enum insn_code icode;
14763 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14764 tree arg0, arg1, arg2;
14765 rtx op0, pat;
14766 machine_mode tmode, mode0;
14767 enum rs6000_builtins fcode
14768 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14769
14770 if (rs6000_overloaded_builtin_p (fcode))
14771 {
14772 *expandedp = true;
14773 error ("unresolved overload for Altivec builtin %qF", fndecl);
14774
14775 /* Given it is invalid, just generate a normal call. */
14776 return expand_call (exp, target, false);
14777 }
14778
14779 target = altivec_expand_dst_builtin (exp, target, expandedp);
14780 if (*expandedp)
14781 return target;
14782
14783 *expandedp = true;
14784
14785 switch (fcode)
14786 {
14787 case ALTIVEC_BUILTIN_STVX_V2DF:
14788 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14789 case ALTIVEC_BUILTIN_STVX_V2DI:
14790 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14791 case ALTIVEC_BUILTIN_STVX_V4SF:
14792 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14793 case ALTIVEC_BUILTIN_STVX:
14794 case ALTIVEC_BUILTIN_STVX_V4SI:
14795 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14796 case ALTIVEC_BUILTIN_STVX_V8HI:
14797 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14798 case ALTIVEC_BUILTIN_STVX_V16QI:
14799 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14800 case ALTIVEC_BUILTIN_STVEBX:
14801 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14802 case ALTIVEC_BUILTIN_STVEHX:
14803 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14804 case ALTIVEC_BUILTIN_STVEWX:
14805 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14806 case ALTIVEC_BUILTIN_STVXL_V2DF:
14807 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14808 case ALTIVEC_BUILTIN_STVXL_V2DI:
14809 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14810 case ALTIVEC_BUILTIN_STVXL_V4SF:
14811 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14812 case ALTIVEC_BUILTIN_STVXL:
14813 case ALTIVEC_BUILTIN_STVXL_V4SI:
14814 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14815 case ALTIVEC_BUILTIN_STVXL_V8HI:
14816 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14817 case ALTIVEC_BUILTIN_STVXL_V16QI:
14818 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14819
14820 case ALTIVEC_BUILTIN_STVLX:
14821 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14822 case ALTIVEC_BUILTIN_STVLXL:
14823 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14824 case ALTIVEC_BUILTIN_STVRX:
14825 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14826 case ALTIVEC_BUILTIN_STVRXL:
14827 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14828
14829 case P9V_BUILTIN_STXVL:
14830 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14831
14832 case P9V_BUILTIN_XST_LEN_R:
14833 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14834
14835 case VSX_BUILTIN_STXVD2X_V1TI:
14836 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14837 case VSX_BUILTIN_STXVD2X_V2DF:
14838 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14839 case VSX_BUILTIN_STXVD2X_V2DI:
14840 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14841 case VSX_BUILTIN_STXVW4X_V4SF:
14842 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14843 case VSX_BUILTIN_STXVW4X_V4SI:
14844 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14845 case VSX_BUILTIN_STXVW4X_V8HI:
14846 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14847 case VSX_BUILTIN_STXVW4X_V16QI:
14848 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14849
14850 /* For the following on big endian, it's ok to use any appropriate
14851 unaligned-supporting store, so use a generic expander. For
14852 little-endian, the exact element-reversing instruction must
14853 be used. */
14854 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14855 {
14856 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14857 : CODE_FOR_vsx_st_elemrev_v1ti);
14858 return altivec_expand_stv_builtin (code, exp);
14859 }
14860 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14861 {
14862 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14863 : CODE_FOR_vsx_st_elemrev_v2df);
14864 return altivec_expand_stv_builtin (code, exp);
14865 }
14866 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14867 {
14868 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14869 : CODE_FOR_vsx_st_elemrev_v2di);
14870 return altivec_expand_stv_builtin (code, exp);
14871 }
14872 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14873 {
14874 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14875 : CODE_FOR_vsx_st_elemrev_v4sf);
14876 return altivec_expand_stv_builtin (code, exp);
14877 }
14878 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14879 {
14880 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14881 : CODE_FOR_vsx_st_elemrev_v4si);
14882 return altivec_expand_stv_builtin (code, exp);
14883 }
14884 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14885 {
14886 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14887 : CODE_FOR_vsx_st_elemrev_v8hi);
14888 return altivec_expand_stv_builtin (code, exp);
14889 }
14890 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14891 {
14892 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14893 : CODE_FOR_vsx_st_elemrev_v16qi);
14894 return altivec_expand_stv_builtin (code, exp);
14895 }
14896
14897 case ALTIVEC_BUILTIN_MFVSCR:
14898 icode = CODE_FOR_altivec_mfvscr;
14899 tmode = insn_data[icode].operand[0].mode;
14900
14901 if (target == 0
14902 || GET_MODE (target) != tmode
14903 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14904 target = gen_reg_rtx (tmode);
14905
14906 pat = GEN_FCN (icode) (target);
14907 if (! pat)
14908 return 0;
14909 emit_insn (pat);
14910 return target;
14911
14912 case ALTIVEC_BUILTIN_MTVSCR:
14913 icode = CODE_FOR_altivec_mtvscr;
14914 arg0 = CALL_EXPR_ARG (exp, 0);
14915 op0 = expand_normal (arg0);
14916 mode0 = insn_data[icode].operand[0].mode;
14917
14918 /* If we got invalid arguments bail out before generating bad rtl. */
14919 if (arg0 == error_mark_node)
14920 return const0_rtx;
14921
14922 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14923 op0 = copy_to_mode_reg (mode0, op0);
14924
14925 pat = GEN_FCN (icode) (op0);
14926 if (pat)
14927 emit_insn (pat);
14928 return NULL_RTX;
14929
14930 case ALTIVEC_BUILTIN_DSSALL:
14931 emit_insn (gen_altivec_dssall ());
14932 return NULL_RTX;
14933
14934 case ALTIVEC_BUILTIN_DSS:
14935 icode = CODE_FOR_altivec_dss;
14936 arg0 = CALL_EXPR_ARG (exp, 0);
14937 STRIP_NOPS (arg0);
14938 op0 = expand_normal (arg0);
14939 mode0 = insn_data[icode].operand[0].mode;
14940
14941 /* If we got invalid arguments bail out before generating bad rtl. */
14942 if (arg0 == error_mark_node)
14943 return const0_rtx;
14944
14945 if (TREE_CODE (arg0) != INTEGER_CST
14946 || TREE_INT_CST_LOW (arg0) & ~0x3)
14947 {
14948 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14949 return const0_rtx;
14950 }
14951
14952 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14953 op0 = copy_to_mode_reg (mode0, op0);
14954
14955 emit_insn (gen_altivec_dss (op0));
14956 return NULL_RTX;
14957
14958 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14959 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14960 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14961 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14962 case VSX_BUILTIN_VEC_INIT_V2DF:
14963 case VSX_BUILTIN_VEC_INIT_V2DI:
14964 case VSX_BUILTIN_VEC_INIT_V1TI:
14965 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14966
14967 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14968 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14969 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14970 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14971 case VSX_BUILTIN_VEC_SET_V2DF:
14972 case VSX_BUILTIN_VEC_SET_V2DI:
14973 case VSX_BUILTIN_VEC_SET_V1TI:
14974 return altivec_expand_vec_set_builtin (exp);
14975
14976 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14977 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14978 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14979 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14980 case VSX_BUILTIN_VEC_EXT_V2DF:
14981 case VSX_BUILTIN_VEC_EXT_V2DI:
14982 case VSX_BUILTIN_VEC_EXT_V1TI:
14983 return altivec_expand_vec_ext_builtin (exp, target);
14984
14985 case P9V_BUILTIN_VEC_EXTRACT4B:
14986 arg1 = CALL_EXPR_ARG (exp, 1);
14987 STRIP_NOPS (arg1);
14988
14989 /* Generate a normal call if it is invalid. */
14990 if (arg1 == error_mark_node)
14991 return expand_call (exp, target, false);
14992
14993 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14994 {
14995 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14996 return expand_call (exp, target, false);
14997 }
14998 break;
14999
15000 case P9V_BUILTIN_VEC_INSERT4B:
15001 arg2 = CALL_EXPR_ARG (exp, 2);
15002 STRIP_NOPS (arg2);
15003
15004 /* Generate a normal call if it is invalid. */
15005 if (arg2 == error_mark_node)
15006 return expand_call (exp, target, false);
15007
15008 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15009 {
15010 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15011 return expand_call (exp, target, false);
15012 }
15013 break;
15014
15015 default:
15016 break;
15017 /* Fall through. */
15018 }
15019
15020 /* Expand abs* operations. */
15021 d = bdesc_abs;
15022 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15023 if (d->code == fcode)
15024 return altivec_expand_abs_builtin (d->icode, exp, target);
15025
15026 /* Expand the AltiVec predicates. */
15027 d = bdesc_altivec_preds;
15028 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15029 if (d->code == fcode)
15030 return altivec_expand_predicate_builtin (d->icode, exp, target);
15031
15032 /* LV* are funky. We initialized them differently. */
15033 switch (fcode)
15034 {
15035 case ALTIVEC_BUILTIN_LVSL:
15036 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15037 exp, target, false);
15038 case ALTIVEC_BUILTIN_LVSR:
15039 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15040 exp, target, false);
15041 case ALTIVEC_BUILTIN_LVEBX:
15042 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15043 exp, target, false);
15044 case ALTIVEC_BUILTIN_LVEHX:
15045 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15046 exp, target, false);
15047 case ALTIVEC_BUILTIN_LVEWX:
15048 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15049 exp, target, false);
15050 case ALTIVEC_BUILTIN_LVXL_V2DF:
15051 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15052 exp, target, false);
15053 case ALTIVEC_BUILTIN_LVXL_V2DI:
15054 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15055 exp, target, false);
15056 case ALTIVEC_BUILTIN_LVXL_V4SF:
15057 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15058 exp, target, false);
15059 case ALTIVEC_BUILTIN_LVXL:
15060 case ALTIVEC_BUILTIN_LVXL_V4SI:
15061 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15062 exp, target, false);
15063 case ALTIVEC_BUILTIN_LVXL_V8HI:
15064 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15065 exp, target, false);
15066 case ALTIVEC_BUILTIN_LVXL_V16QI:
15067 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15068 exp, target, false);
15069 case ALTIVEC_BUILTIN_LVX_V1TI:
15070 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15071 exp, target, false);
15072 case ALTIVEC_BUILTIN_LVX_V2DF:
15073 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15074 exp, target, false);
15075 case ALTIVEC_BUILTIN_LVX_V2DI:
15076 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15077 exp, target, false);
15078 case ALTIVEC_BUILTIN_LVX_V4SF:
15079 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15080 exp, target, false);
15081 case ALTIVEC_BUILTIN_LVX:
15082 case ALTIVEC_BUILTIN_LVX_V4SI:
15083 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15084 exp, target, false);
15085 case ALTIVEC_BUILTIN_LVX_V8HI:
15086 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15087 exp, target, false);
15088 case ALTIVEC_BUILTIN_LVX_V16QI:
15089 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15090 exp, target, false);
15091 case ALTIVEC_BUILTIN_LVLX:
15092 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15093 exp, target, true);
15094 case ALTIVEC_BUILTIN_LVLXL:
15095 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15096 exp, target, true);
15097 case ALTIVEC_BUILTIN_LVRX:
15098 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15099 exp, target, true);
15100 case ALTIVEC_BUILTIN_LVRXL:
15101 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15102 exp, target, true);
15103 case VSX_BUILTIN_LXVD2X_V1TI:
15104 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15105 exp, target, false);
15106 case VSX_BUILTIN_LXVD2X_V2DF:
15107 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15108 exp, target, false);
15109 case VSX_BUILTIN_LXVD2X_V2DI:
15110 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15111 exp, target, false);
15112 case VSX_BUILTIN_LXVW4X_V4SF:
15113 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15114 exp, target, false);
15115 case VSX_BUILTIN_LXVW4X_V4SI:
15116 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15117 exp, target, false);
15118 case VSX_BUILTIN_LXVW4X_V8HI:
15119 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15120 exp, target, false);
15121 case VSX_BUILTIN_LXVW4X_V16QI:
15122 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15123 exp, target, false);
15124 /* For the following on big endian, it's ok to use any appropriate
15125 unaligned-supporting load, so use a generic expander. For
15126 little-endian, the exact element-reversing instruction must
15127 be used. */
15128 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15129 {
15130 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15131 : CODE_FOR_vsx_ld_elemrev_v2df);
15132 return altivec_expand_lv_builtin (code, exp, target, false);
15133 }
15134 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15135 {
15136 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15137 : CODE_FOR_vsx_ld_elemrev_v1ti);
15138 return altivec_expand_lv_builtin (code, exp, target, false);
15139 }
15140 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15141 {
15142 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15143 : CODE_FOR_vsx_ld_elemrev_v2di);
15144 return altivec_expand_lv_builtin (code, exp, target, false);
15145 }
15146 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15147 {
15148 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15149 : CODE_FOR_vsx_ld_elemrev_v4sf);
15150 return altivec_expand_lv_builtin (code, exp, target, false);
15151 }
15152 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15153 {
15154 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15155 : CODE_FOR_vsx_ld_elemrev_v4si);
15156 return altivec_expand_lv_builtin (code, exp, target, false);
15157 }
15158 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15159 {
15160 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15161 : CODE_FOR_vsx_ld_elemrev_v8hi);
15162 return altivec_expand_lv_builtin (code, exp, target, false);
15163 }
15164 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15165 {
15166 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15167 : CODE_FOR_vsx_ld_elemrev_v16qi);
15168 return altivec_expand_lv_builtin (code, exp, target, false);
15169 }
15170 break;
15171 default:
15172 break;
15173 /* Fall through. */
15174 }
15175
15176 *expandedp = false;
15177 return NULL_RTX;
15178 }
15179
15180 /* Check whether a builtin function is supported in this target
15181 configuration. */
15182 bool
15183 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15184 {
15185 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15186 if ((fnmask & rs6000_builtin_mask) != fnmask)
15187 return false;
15188 else
15189 return true;
15190 }
15191
15192 /* Raise an error message for a builtin function that is called without the
15193 appropriate target options being set. */
15194
15195 static void
15196 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15197 {
15198 size_t uns_fncode = (size_t) fncode;
15199 const char *name = rs6000_builtin_info[uns_fncode].name;
15200 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15201
15202 gcc_assert (name != NULL);
15203 if ((fnmask & RS6000_BTM_CELL) != 0)
15204 error ("builtin function %qs is only valid for the cell processor", name);
15205 else if ((fnmask & RS6000_BTM_VSX) != 0)
15206 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15207 else if ((fnmask & RS6000_BTM_HTM) != 0)
15208 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15209 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15210 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15211 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15212 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15213 error ("builtin function %qs requires the %qs and %qs options",
15214 name, "-mhard-dfp", "-mpower8-vector");
15215 else if ((fnmask & RS6000_BTM_DFP) != 0)
15216 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15217 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15218 error ("builtin function %qs requires the %qs option", name,
15219 "-mpower8-vector");
15220 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15221 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15222 error ("builtin function %qs requires the %qs and %qs options",
15223 name, "-mcpu=power9", "-m64");
15224 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15225 error ("builtin function %qs requires the %qs option", name,
15226 "-mcpu=power9");
15227 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15228 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15229 error ("builtin function %qs requires the %qs and %qs options",
15230 name, "-mcpu=power9", "-m64");
15231 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15232 error ("builtin function %qs requires the %qs option", name,
15233 "-mcpu=power9");
15234 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15235 {
15236 if (!TARGET_HARD_FLOAT)
15237 error ("builtin function %qs requires the %qs option", name,
15238 "-mhard-float");
15239 else
15240 error ("builtin function %qs requires the %qs option", name,
15241 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15242 }
15243 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15244 error ("builtin function %qs requires the %qs option", name,
15245 "-mhard-float");
15246 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15247 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15248 name);
15249 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15250 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15251 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15252 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15253 error ("builtin function %qs requires the %qs (or newer), and "
15254 "%qs or %qs options",
15255 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15256 else
15257 error ("builtin function %qs is not supported with the current options",
15258 name);
15259 }
15260
15261 /* Target hook for early folding of built-ins, shamelessly stolen
15262 from ia64.c. */
15263
15264 static tree
15265 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15266 int n_args ATTRIBUTE_UNUSED,
15267 tree *args ATTRIBUTE_UNUSED,
15268 bool ignore ATTRIBUTE_UNUSED)
15269 {
15270 #ifdef SUBTARGET_FOLD_BUILTIN
15271 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15272 #else
15273 return NULL_TREE;
15274 #endif
15275 }
15276
15277 /* Helper function to sort out which built-ins may be valid without having
15278 a LHS. */
15279 static bool
15280 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15281 {
15282 switch (fn_code)
15283 {
15284 case ALTIVEC_BUILTIN_STVX_V16QI:
15285 case ALTIVEC_BUILTIN_STVX_V8HI:
15286 case ALTIVEC_BUILTIN_STVX_V4SI:
15287 case ALTIVEC_BUILTIN_STVX_V4SF:
15288 case ALTIVEC_BUILTIN_STVX_V2DI:
15289 case ALTIVEC_BUILTIN_STVX_V2DF:
15290 case VSX_BUILTIN_STXVW4X_V16QI:
15291 case VSX_BUILTIN_STXVW4X_V8HI:
15292 case VSX_BUILTIN_STXVW4X_V4SF:
15293 case VSX_BUILTIN_STXVW4X_V4SI:
15294 case VSX_BUILTIN_STXVD2X_V2DF:
15295 case VSX_BUILTIN_STXVD2X_V2DI:
15296 return true;
15297 default:
15298 return false;
15299 }
15300 }
15301
15302 /* Helper function to handle the gimple folding of a vector compare
15303 operation. This sets up true/false vectors, and uses the
15304 VEC_COND_EXPR operation.
15305 CODE indicates which comparison is to be made. (EQ, GT, ...).
15306 TYPE indicates the type of the result. */
15307 static tree
15308 fold_build_vec_cmp (tree_code code, tree type,
15309 tree arg0, tree arg1)
15310 {
15311 tree cmp_type = build_same_sized_truth_vector_type (type);
15312 tree zero_vec = build_zero_cst (type);
15313 tree minus_one_vec = build_minus_one_cst (type);
15314 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15315 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15316 }
15317
15318 /* Helper function to handle the in-between steps for the
15319 vector compare built-ins. */
15320 static void
15321 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15322 {
15323 tree arg0 = gimple_call_arg (stmt, 0);
15324 tree arg1 = gimple_call_arg (stmt, 1);
15325 tree lhs = gimple_call_lhs (stmt);
15326 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15327 gimple *g = gimple_build_assign (lhs, cmp);
15328 gimple_set_location (g, gimple_location (stmt));
15329 gsi_replace (gsi, g, true);
15330 }
15331
15332 /* Helper function to map V2DF and V4SF types to their
15333 integral equivalents (V2DI and V4SI). */
15334 tree map_to_integral_tree_type (tree input_tree_type)
15335 {
15336 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15337 return input_tree_type;
15338 else
15339 {
15340 if (types_compatible_p (TREE_TYPE (input_tree_type),
15341 TREE_TYPE (V2DF_type_node)))
15342 return V2DI_type_node;
15343 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15344 TREE_TYPE (V4SF_type_node)))
15345 return V4SI_type_node;
15346 else
15347 gcc_unreachable ();
15348 }
15349 }
15350
15351 /* Helper function to handle the vector merge[hl] built-ins. The
15352 implementation difference between h and l versions for this code are in
15353 the values used when building of the permute vector for high word versus
15354 low word merge. The variance is keyed off the use_high parameter. */
15355 static void
15356 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15357 {
15358 tree arg0 = gimple_call_arg (stmt, 0);
15359 tree arg1 = gimple_call_arg (stmt, 1);
15360 tree lhs = gimple_call_lhs (stmt);
15361 tree lhs_type = TREE_TYPE (lhs);
15362 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15363 int midpoint = n_elts / 2;
15364 int offset = 0;
15365
15366 if (use_high == 1)
15367 offset = midpoint;
15368
15369 /* The permute_type will match the lhs for integral types. For double and
15370 float types, the permute type needs to map to the V2 or V4 type that
15371 matches size. */
15372 tree permute_type;
15373 permute_type = map_to_integral_tree_type (lhs_type);
15374 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15375
15376 for (int i = 0; i < midpoint; i++)
15377 {
15378 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15379 offset + i));
15380 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15381 offset + n_elts + i));
15382 }
15383
15384 tree permute = elts.build ();
15385
15386 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15387 gimple_set_location (g, gimple_location (stmt));
15388 gsi_replace (gsi, g, true);
15389 }
15390
15391 /* Helper function to handle the vector merge[eo] built-ins. */
15392 static void
15393 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15394 {
15395 tree arg0 = gimple_call_arg (stmt, 0);
15396 tree arg1 = gimple_call_arg (stmt, 1);
15397 tree lhs = gimple_call_lhs (stmt);
15398 tree lhs_type = TREE_TYPE (lhs);
15399 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15400
15401 /* The permute_type will match the lhs for integral types. For double and
15402 float types, the permute type needs to map to the V2 or V4 type that
15403 matches size. */
15404 tree permute_type;
15405 permute_type = map_to_integral_tree_type (lhs_type);
15406
15407 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15408
15409 /* Build the permute vector. */
15410 for (int i = 0; i < n_elts / 2; i++)
15411 {
15412 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15413 2*i + use_odd));
15414 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15415 2*i + use_odd + n_elts));
15416 }
15417
15418 tree permute = elts.build ();
15419
15420 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15421 gimple_set_location (g, gimple_location (stmt));
15422 gsi_replace (gsi, g, true);
15423 }
15424
15425 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15426 a constant, use rs6000_fold_builtin.) */
15427
15428 bool
15429 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15430 {
15431 gimple *stmt = gsi_stmt (*gsi);
15432 tree fndecl = gimple_call_fndecl (stmt);
15433 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15434 enum rs6000_builtins fn_code
15435 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15436 tree arg0, arg1, lhs, temp;
15437 enum tree_code bcode;
15438 gimple *g;
15439
15440 size_t uns_fncode = (size_t) fn_code;
15441 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15442 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15443 const char *fn_name2 = (icode != CODE_FOR_nothing)
15444 ? get_insn_name ((int) icode)
15445 : "nothing";
15446
15447 if (TARGET_DEBUG_BUILTIN)
15448 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15449 fn_code, fn_name1, fn_name2);
15450
15451 if (!rs6000_fold_gimple)
15452 return false;
15453
15454 /* Prevent gimple folding for code that does not have a LHS, unless it is
15455 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15456 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15457 return false;
15458
15459 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15460 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15461 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15462 if (!func_valid_p)
15463 return false;
15464
15465 switch (fn_code)
15466 {
15467 /* Flavors of vec_add. We deliberately don't expand
15468 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15469 TImode, resulting in much poorer code generation. */
15470 case ALTIVEC_BUILTIN_VADDUBM:
15471 case ALTIVEC_BUILTIN_VADDUHM:
15472 case ALTIVEC_BUILTIN_VADDUWM:
15473 case P8V_BUILTIN_VADDUDM:
15474 case ALTIVEC_BUILTIN_VADDFP:
15475 case VSX_BUILTIN_XVADDDP:
15476 bcode = PLUS_EXPR;
15477 do_binary:
15478 arg0 = gimple_call_arg (stmt, 0);
15479 arg1 = gimple_call_arg (stmt, 1);
15480 lhs = gimple_call_lhs (stmt);
15481 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15482 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15483 {
15484 /* Ensure the binary operation is performed in a type
15485 that wraps if it is integral type. */
15486 gimple_seq stmts = NULL;
15487 tree type = unsigned_type_for (TREE_TYPE (lhs));
15488 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15489 type, arg0);
15490 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15491 type, arg1);
15492 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15493 type, uarg0, uarg1);
15494 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15495 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15496 build1 (VIEW_CONVERT_EXPR,
15497 TREE_TYPE (lhs), res));
15498 gsi_replace (gsi, g, true);
15499 return true;
15500 }
15501 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15502 gimple_set_location (g, gimple_location (stmt));
15503 gsi_replace (gsi, g, true);
15504 return true;
15505 /* Flavors of vec_sub. We deliberately don't expand
15506 P8V_BUILTIN_VSUBUQM. */
15507 case ALTIVEC_BUILTIN_VSUBUBM:
15508 case ALTIVEC_BUILTIN_VSUBUHM:
15509 case ALTIVEC_BUILTIN_VSUBUWM:
15510 case P8V_BUILTIN_VSUBUDM:
15511 case ALTIVEC_BUILTIN_VSUBFP:
15512 case VSX_BUILTIN_XVSUBDP:
15513 bcode = MINUS_EXPR;
15514 goto do_binary;
15515 case VSX_BUILTIN_XVMULSP:
15516 case VSX_BUILTIN_XVMULDP:
15517 arg0 = gimple_call_arg (stmt, 0);
15518 arg1 = gimple_call_arg (stmt, 1);
15519 lhs = gimple_call_lhs (stmt);
15520 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15521 gimple_set_location (g, gimple_location (stmt));
15522 gsi_replace (gsi, g, true);
15523 return true;
15524 /* Even element flavors of vec_mul (signed). */
15525 case ALTIVEC_BUILTIN_VMULESB:
15526 case ALTIVEC_BUILTIN_VMULESH:
15527 case P8V_BUILTIN_VMULESW:
15528 /* Even element flavors of vec_mul (unsigned). */
15529 case ALTIVEC_BUILTIN_VMULEUB:
15530 case ALTIVEC_BUILTIN_VMULEUH:
15531 case P8V_BUILTIN_VMULEUW:
15532 arg0 = gimple_call_arg (stmt, 0);
15533 arg1 = gimple_call_arg (stmt, 1);
15534 lhs = gimple_call_lhs (stmt);
15535 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15536 gimple_set_location (g, gimple_location (stmt));
15537 gsi_replace (gsi, g, true);
15538 return true;
15539 /* Odd element flavors of vec_mul (signed). */
15540 case ALTIVEC_BUILTIN_VMULOSB:
15541 case ALTIVEC_BUILTIN_VMULOSH:
15542 case P8V_BUILTIN_VMULOSW:
15543 /* Odd element flavors of vec_mul (unsigned). */
15544 case ALTIVEC_BUILTIN_VMULOUB:
15545 case ALTIVEC_BUILTIN_VMULOUH:
15546 case P8V_BUILTIN_VMULOUW:
15547 arg0 = gimple_call_arg (stmt, 0);
15548 arg1 = gimple_call_arg (stmt, 1);
15549 lhs = gimple_call_lhs (stmt);
15550 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15551 gimple_set_location (g, gimple_location (stmt));
15552 gsi_replace (gsi, g, true);
15553 return true;
15554 /* Flavors of vec_div (Integer). */
15555 case VSX_BUILTIN_DIV_V2DI:
15556 case VSX_BUILTIN_UDIV_V2DI:
15557 arg0 = gimple_call_arg (stmt, 0);
15558 arg1 = gimple_call_arg (stmt, 1);
15559 lhs = gimple_call_lhs (stmt);
15560 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15561 gimple_set_location (g, gimple_location (stmt));
15562 gsi_replace (gsi, g, true);
15563 return true;
15564 /* Flavors of vec_div (Float). */
15565 case VSX_BUILTIN_XVDIVSP:
15566 case VSX_BUILTIN_XVDIVDP:
15567 arg0 = gimple_call_arg (stmt, 0);
15568 arg1 = gimple_call_arg (stmt, 1);
15569 lhs = gimple_call_lhs (stmt);
15570 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15571 gimple_set_location (g, gimple_location (stmt));
15572 gsi_replace (gsi, g, true);
15573 return true;
15574 /* Flavors of vec_and. */
15575 case ALTIVEC_BUILTIN_VAND:
15576 arg0 = gimple_call_arg (stmt, 0);
15577 arg1 = gimple_call_arg (stmt, 1);
15578 lhs = gimple_call_lhs (stmt);
15579 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15580 gimple_set_location (g, gimple_location (stmt));
15581 gsi_replace (gsi, g, true);
15582 return true;
15583 /* Flavors of vec_andc. */
15584 case ALTIVEC_BUILTIN_VANDC:
15585 arg0 = gimple_call_arg (stmt, 0);
15586 arg1 = gimple_call_arg (stmt, 1);
15587 lhs = gimple_call_lhs (stmt);
15588 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15589 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15590 gimple_set_location (g, gimple_location (stmt));
15591 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15592 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15593 gimple_set_location (g, gimple_location (stmt));
15594 gsi_replace (gsi, g, true);
15595 return true;
15596 /* Flavors of vec_nand. */
15597 case P8V_BUILTIN_VEC_NAND:
15598 case P8V_BUILTIN_NAND_V16QI:
15599 case P8V_BUILTIN_NAND_V8HI:
15600 case P8V_BUILTIN_NAND_V4SI:
15601 case P8V_BUILTIN_NAND_V4SF:
15602 case P8V_BUILTIN_NAND_V2DF:
15603 case P8V_BUILTIN_NAND_V2DI:
15604 arg0 = gimple_call_arg (stmt, 0);
15605 arg1 = gimple_call_arg (stmt, 1);
15606 lhs = gimple_call_lhs (stmt);
15607 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15608 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15609 gimple_set_location (g, gimple_location (stmt));
15610 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15611 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15612 gimple_set_location (g, gimple_location (stmt));
15613 gsi_replace (gsi, g, true);
15614 return true;
15615 /* Flavors of vec_or. */
15616 case ALTIVEC_BUILTIN_VOR:
15617 arg0 = gimple_call_arg (stmt, 0);
15618 arg1 = gimple_call_arg (stmt, 1);
15619 lhs = gimple_call_lhs (stmt);
15620 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15621 gimple_set_location (g, gimple_location (stmt));
15622 gsi_replace (gsi, g, true);
15623 return true;
15624 /* flavors of vec_orc. */
15625 case P8V_BUILTIN_ORC_V16QI:
15626 case P8V_BUILTIN_ORC_V8HI:
15627 case P8V_BUILTIN_ORC_V4SI:
15628 case P8V_BUILTIN_ORC_V4SF:
15629 case P8V_BUILTIN_ORC_V2DF:
15630 case P8V_BUILTIN_ORC_V2DI:
15631 arg0 = gimple_call_arg (stmt, 0);
15632 arg1 = gimple_call_arg (stmt, 1);
15633 lhs = gimple_call_lhs (stmt);
15634 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15635 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15636 gimple_set_location (g, gimple_location (stmt));
15637 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15638 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15639 gimple_set_location (g, gimple_location (stmt));
15640 gsi_replace (gsi, g, true);
15641 return true;
15642 /* Flavors of vec_xor. */
15643 case ALTIVEC_BUILTIN_VXOR:
15644 arg0 = gimple_call_arg (stmt, 0);
15645 arg1 = gimple_call_arg (stmt, 1);
15646 lhs = gimple_call_lhs (stmt);
15647 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15648 gimple_set_location (g, gimple_location (stmt));
15649 gsi_replace (gsi, g, true);
15650 return true;
15651 /* Flavors of vec_nor. */
15652 case ALTIVEC_BUILTIN_VNOR:
15653 arg0 = gimple_call_arg (stmt, 0);
15654 arg1 = gimple_call_arg (stmt, 1);
15655 lhs = gimple_call_lhs (stmt);
15656 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15657 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15658 gimple_set_location (g, gimple_location (stmt));
15659 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15660 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15661 gimple_set_location (g, gimple_location (stmt));
15662 gsi_replace (gsi, g, true);
15663 return true;
15664 /* flavors of vec_abs. */
15665 case ALTIVEC_BUILTIN_ABS_V16QI:
15666 case ALTIVEC_BUILTIN_ABS_V8HI:
15667 case ALTIVEC_BUILTIN_ABS_V4SI:
15668 case ALTIVEC_BUILTIN_ABS_V4SF:
15669 case P8V_BUILTIN_ABS_V2DI:
15670 case VSX_BUILTIN_XVABSDP:
15671 arg0 = gimple_call_arg (stmt, 0);
15672 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15673 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15674 return false;
15675 lhs = gimple_call_lhs (stmt);
15676 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15677 gimple_set_location (g, gimple_location (stmt));
15678 gsi_replace (gsi, g, true);
15679 return true;
15680 /* flavors of vec_min. */
15681 case VSX_BUILTIN_XVMINDP:
15682 case P8V_BUILTIN_VMINSD:
15683 case P8V_BUILTIN_VMINUD:
15684 case ALTIVEC_BUILTIN_VMINSB:
15685 case ALTIVEC_BUILTIN_VMINSH:
15686 case ALTIVEC_BUILTIN_VMINSW:
15687 case ALTIVEC_BUILTIN_VMINUB:
15688 case ALTIVEC_BUILTIN_VMINUH:
15689 case ALTIVEC_BUILTIN_VMINUW:
15690 case ALTIVEC_BUILTIN_VMINFP:
15691 arg0 = gimple_call_arg (stmt, 0);
15692 arg1 = gimple_call_arg (stmt, 1);
15693 lhs = gimple_call_lhs (stmt);
15694 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15695 gimple_set_location (g, gimple_location (stmt));
15696 gsi_replace (gsi, g, true);
15697 return true;
15698 /* flavors of vec_max. */
15699 case VSX_BUILTIN_XVMAXDP:
15700 case P8V_BUILTIN_VMAXSD:
15701 case P8V_BUILTIN_VMAXUD:
15702 case ALTIVEC_BUILTIN_VMAXSB:
15703 case ALTIVEC_BUILTIN_VMAXSH:
15704 case ALTIVEC_BUILTIN_VMAXSW:
15705 case ALTIVEC_BUILTIN_VMAXUB:
15706 case ALTIVEC_BUILTIN_VMAXUH:
15707 case ALTIVEC_BUILTIN_VMAXUW:
15708 case ALTIVEC_BUILTIN_VMAXFP:
15709 arg0 = gimple_call_arg (stmt, 0);
15710 arg1 = gimple_call_arg (stmt, 1);
15711 lhs = gimple_call_lhs (stmt);
15712 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15713 gimple_set_location (g, gimple_location (stmt));
15714 gsi_replace (gsi, g, true);
15715 return true;
15716 /* Flavors of vec_eqv. */
15717 case P8V_BUILTIN_EQV_V16QI:
15718 case P8V_BUILTIN_EQV_V8HI:
15719 case P8V_BUILTIN_EQV_V4SI:
15720 case P8V_BUILTIN_EQV_V4SF:
15721 case P8V_BUILTIN_EQV_V2DF:
15722 case P8V_BUILTIN_EQV_V2DI:
15723 arg0 = gimple_call_arg (stmt, 0);
15724 arg1 = gimple_call_arg (stmt, 1);
15725 lhs = gimple_call_lhs (stmt);
15726 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15727 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15728 gimple_set_location (g, gimple_location (stmt));
15729 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15730 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15731 gimple_set_location (g, gimple_location (stmt));
15732 gsi_replace (gsi, g, true);
15733 return true;
15734 /* Flavors of vec_rotate_left. */
15735 case ALTIVEC_BUILTIN_VRLB:
15736 case ALTIVEC_BUILTIN_VRLH:
15737 case ALTIVEC_BUILTIN_VRLW:
15738 case P8V_BUILTIN_VRLD:
15739 arg0 = gimple_call_arg (stmt, 0);
15740 arg1 = gimple_call_arg (stmt, 1);
15741 lhs = gimple_call_lhs (stmt);
15742 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15743 gimple_set_location (g, gimple_location (stmt));
15744 gsi_replace (gsi, g, true);
15745 return true;
15746 /* Flavors of vector shift right algebraic.
15747 vec_sra{b,h,w} -> vsra{b,h,w}. */
15748 case ALTIVEC_BUILTIN_VSRAB:
15749 case ALTIVEC_BUILTIN_VSRAH:
15750 case ALTIVEC_BUILTIN_VSRAW:
15751 case P8V_BUILTIN_VSRAD:
15752 arg0 = gimple_call_arg (stmt, 0);
15753 arg1 = gimple_call_arg (stmt, 1);
15754 lhs = gimple_call_lhs (stmt);
15755 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
15756 gimple_set_location (g, gimple_location (stmt));
15757 gsi_replace (gsi, g, true);
15758 return true;
15759 /* Flavors of vector shift left.
15760 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15761 case ALTIVEC_BUILTIN_VSLB:
15762 case ALTIVEC_BUILTIN_VSLH:
15763 case ALTIVEC_BUILTIN_VSLW:
15764 case P8V_BUILTIN_VSLD:
15765 {
15766 location_t loc;
15767 gimple_seq stmts = NULL;
15768 arg0 = gimple_call_arg (stmt, 0);
15769 tree arg0_type = TREE_TYPE (arg0);
15770 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15771 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15772 return false;
15773 arg1 = gimple_call_arg (stmt, 1);
15774 tree arg1_type = TREE_TYPE (arg1);
15775 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15776 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15777 loc = gimple_location (stmt);
15778 lhs = gimple_call_lhs (stmt);
15779 /* Force arg1 into the range valid matching the arg0 type. */
15780 /* Build a vector consisting of the max valid bit-size values. */
15781 int n_elts = VECTOR_CST_NELTS (arg1);
15782 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15783 * BITS_PER_UNIT;
15784 tree element_size = build_int_cst (unsigned_element_type,
15785 tree_size_in_bits / n_elts);
15786 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15787 for (int i = 0; i < n_elts; i++)
15788 elts.safe_push (element_size);
15789 tree modulo_tree = elts.build ();
15790 /* Modulo the provided shift value against that vector. */
15791 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15792 unsigned_arg1_type, arg1);
15793 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15794 unsigned_arg1_type, unsigned_arg1,
15795 modulo_tree);
15796 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15797 /* And finally, do the shift. */
15798 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15799 gimple_set_location (g, gimple_location (stmt));
15800 gsi_replace (gsi, g, true);
15801 return true;
15802 }
15803 /* Flavors of vector shift right. */
15804 case ALTIVEC_BUILTIN_VSRB:
15805 case ALTIVEC_BUILTIN_VSRH:
15806 case ALTIVEC_BUILTIN_VSRW:
15807 case P8V_BUILTIN_VSRD:
15808 {
15809 arg0 = gimple_call_arg (stmt, 0);
15810 arg1 = gimple_call_arg (stmt, 1);
15811 lhs = gimple_call_lhs (stmt);
15812 gimple_seq stmts = NULL;
15813 /* Convert arg0 to unsigned. */
15814 tree arg0_unsigned
15815 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15816 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15817 tree res
15818 = gimple_build (&stmts, RSHIFT_EXPR,
15819 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
15820 /* Convert result back to the lhs type. */
15821 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15822 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15823 update_call_from_tree (gsi, res);
15824 return true;
15825 }
15826 /* Vector loads. */
15827 case ALTIVEC_BUILTIN_LVX_V16QI:
15828 case ALTIVEC_BUILTIN_LVX_V8HI:
15829 case ALTIVEC_BUILTIN_LVX_V4SI:
15830 case ALTIVEC_BUILTIN_LVX_V4SF:
15831 case ALTIVEC_BUILTIN_LVX_V2DI:
15832 case ALTIVEC_BUILTIN_LVX_V2DF:
15833 case ALTIVEC_BUILTIN_LVX_V1TI:
15834 {
15835 arg0 = gimple_call_arg (stmt, 0); // offset
15836 arg1 = gimple_call_arg (stmt, 1); // address
15837 lhs = gimple_call_lhs (stmt);
15838 location_t loc = gimple_location (stmt);
15839 /* Since arg1 may be cast to a different type, just use ptr_type_node
15840 here instead of trying to enforce TBAA on pointer types. */
15841 tree arg1_type = ptr_type_node;
15842 tree lhs_type = TREE_TYPE (lhs);
15843 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15844 the tree using the value from arg0. The resulting type will match
15845 the type of arg1. */
15846 gimple_seq stmts = NULL;
15847 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15848 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15849 arg1_type, arg1, temp_offset);
15850 /* Mask off any lower bits from the address. */
15851 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15852 arg1_type, temp_addr,
15853 build_int_cst (arg1_type, -16));
15854 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15855 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15856 take an offset, but since we've already incorporated the offset
15857 above, here we just pass in a zero. */
15858 gimple *g
15859 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15860 build_int_cst (arg1_type, 0)));
15861 gimple_set_location (g, loc);
15862 gsi_replace (gsi, g, true);
15863 return true;
15864 }
15865 /* Vector stores. */
15866 case ALTIVEC_BUILTIN_STVX_V16QI:
15867 case ALTIVEC_BUILTIN_STVX_V8HI:
15868 case ALTIVEC_BUILTIN_STVX_V4SI:
15869 case ALTIVEC_BUILTIN_STVX_V4SF:
15870 case ALTIVEC_BUILTIN_STVX_V2DI:
15871 case ALTIVEC_BUILTIN_STVX_V2DF:
15872 {
15873 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15874 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15875 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15876 location_t loc = gimple_location (stmt);
15877 tree arg0_type = TREE_TYPE (arg0);
15878 /* Use ptr_type_node (no TBAA) for the arg2_type.
15879 FIXME: (Richard) "A proper fix would be to transition this type as
15880 seen from the frontend to GIMPLE, for example in a similar way we
15881 do for MEM_REFs by piggy-backing that on an extra argument, a
15882 constant zero pointer of the alias pointer type to use (which would
15883 also serve as a type indicator of the store itself). I'd use a
15884 target specific internal function for this (not sure if we can have
15885 those target specific, but I guess if it's folded away then that's
15886 fine) and get away with the overload set." */
15887 tree arg2_type = ptr_type_node;
15888 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15889 the tree using the value from arg0. The resulting type will match
15890 the type of arg2. */
15891 gimple_seq stmts = NULL;
15892 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15893 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15894 arg2_type, arg2, temp_offset);
15895 /* Mask off any lower bits from the address. */
15896 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15897 arg2_type, temp_addr,
15898 build_int_cst (arg2_type, -16));
15899 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15900 /* The desired gimple result should be similar to:
15901 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15902 gimple *g
15903 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15904 build_int_cst (arg2_type, 0)), arg0);
15905 gimple_set_location (g, loc);
15906 gsi_replace (gsi, g, true);
15907 return true;
15908 }
15909
15910 /* unaligned Vector loads. */
15911 case VSX_BUILTIN_LXVW4X_V16QI:
15912 case VSX_BUILTIN_LXVW4X_V8HI:
15913 case VSX_BUILTIN_LXVW4X_V4SF:
15914 case VSX_BUILTIN_LXVW4X_V4SI:
15915 case VSX_BUILTIN_LXVD2X_V2DF:
15916 case VSX_BUILTIN_LXVD2X_V2DI:
15917 {
15918 arg0 = gimple_call_arg (stmt, 0); // offset
15919 arg1 = gimple_call_arg (stmt, 1); // address
15920 lhs = gimple_call_lhs (stmt);
15921 location_t loc = gimple_location (stmt);
15922 /* Since arg1 may be cast to a different type, just use ptr_type_node
15923 here instead of trying to enforce TBAA on pointer types. */
15924 tree arg1_type = ptr_type_node;
15925 tree lhs_type = TREE_TYPE (lhs);
15926 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15927 required alignment (power) is 4 bytes regardless of data type. */
15928 tree align_ltype = build_aligned_type (lhs_type, 4);
15929 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15930 the tree using the value from arg0. The resulting type will match
15931 the type of arg1. */
15932 gimple_seq stmts = NULL;
15933 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15934 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15935 arg1_type, arg1, temp_offset);
15936 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15937 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15938 take an offset, but since we've already incorporated the offset
15939 above, here we just pass in a zero. */
15940 gimple *g;
15941 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15942 build_int_cst (arg1_type, 0)));
15943 gimple_set_location (g, loc);
15944 gsi_replace (gsi, g, true);
15945 return true;
15946 }
15947
15948 /* unaligned Vector stores. */
15949 case VSX_BUILTIN_STXVW4X_V16QI:
15950 case VSX_BUILTIN_STXVW4X_V8HI:
15951 case VSX_BUILTIN_STXVW4X_V4SF:
15952 case VSX_BUILTIN_STXVW4X_V4SI:
15953 case VSX_BUILTIN_STXVD2X_V2DF:
15954 case VSX_BUILTIN_STXVD2X_V2DI:
15955 {
15956 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15957 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15958 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15959 location_t loc = gimple_location (stmt);
15960 tree arg0_type = TREE_TYPE (arg0);
15961 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15962 tree arg2_type = ptr_type_node;
15963 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15964 required alignment (power) is 4 bytes regardless of data type. */
15965 tree align_stype = build_aligned_type (arg0_type, 4);
15966 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15967 the tree using the value from arg1. */
15968 gimple_seq stmts = NULL;
15969 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15970 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15971 arg2_type, arg2, temp_offset);
15972 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15973 gimple *g;
15974 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15975 build_int_cst (arg2_type, 0)), arg0);
15976 gimple_set_location (g, loc);
15977 gsi_replace (gsi, g, true);
15978 return true;
15979 }
15980
15981 /* Vector Fused multiply-add (fma). */
15982 case ALTIVEC_BUILTIN_VMADDFP:
15983 case VSX_BUILTIN_XVMADDDP:
15984 case ALTIVEC_BUILTIN_VMLADDUHM:
15985 {
15986 arg0 = gimple_call_arg (stmt, 0);
15987 arg1 = gimple_call_arg (stmt, 1);
15988 tree arg2 = gimple_call_arg (stmt, 2);
15989 lhs = gimple_call_lhs (stmt);
15990 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15991 gimple_call_set_lhs (g, lhs);
15992 gimple_call_set_nothrow (g, true);
15993 gimple_set_location (g, gimple_location (stmt));
15994 gsi_replace (gsi, g, true);
15995 return true;
15996 }
15997
15998 /* Vector compares; EQ, NE, GE, GT, LE. */
15999 case ALTIVEC_BUILTIN_VCMPEQUB:
16000 case ALTIVEC_BUILTIN_VCMPEQUH:
16001 case ALTIVEC_BUILTIN_VCMPEQUW:
16002 case P8V_BUILTIN_VCMPEQUD:
16003 fold_compare_helper (gsi, EQ_EXPR, stmt);
16004 return true;
16005
16006 case P9V_BUILTIN_CMPNEB:
16007 case P9V_BUILTIN_CMPNEH:
16008 case P9V_BUILTIN_CMPNEW:
16009 fold_compare_helper (gsi, NE_EXPR, stmt);
16010 return true;
16011
16012 case VSX_BUILTIN_CMPGE_16QI:
16013 case VSX_BUILTIN_CMPGE_U16QI:
16014 case VSX_BUILTIN_CMPGE_8HI:
16015 case VSX_BUILTIN_CMPGE_U8HI:
16016 case VSX_BUILTIN_CMPGE_4SI:
16017 case VSX_BUILTIN_CMPGE_U4SI:
16018 case VSX_BUILTIN_CMPGE_2DI:
16019 case VSX_BUILTIN_CMPGE_U2DI:
16020 fold_compare_helper (gsi, GE_EXPR, stmt);
16021 return true;
16022
16023 case ALTIVEC_BUILTIN_VCMPGTSB:
16024 case ALTIVEC_BUILTIN_VCMPGTUB:
16025 case ALTIVEC_BUILTIN_VCMPGTSH:
16026 case ALTIVEC_BUILTIN_VCMPGTUH:
16027 case ALTIVEC_BUILTIN_VCMPGTSW:
16028 case ALTIVEC_BUILTIN_VCMPGTUW:
16029 case P8V_BUILTIN_VCMPGTUD:
16030 case P8V_BUILTIN_VCMPGTSD:
16031 fold_compare_helper (gsi, GT_EXPR, stmt);
16032 return true;
16033
16034 case VSX_BUILTIN_CMPLE_16QI:
16035 case VSX_BUILTIN_CMPLE_U16QI:
16036 case VSX_BUILTIN_CMPLE_8HI:
16037 case VSX_BUILTIN_CMPLE_U8HI:
16038 case VSX_BUILTIN_CMPLE_4SI:
16039 case VSX_BUILTIN_CMPLE_U4SI:
16040 case VSX_BUILTIN_CMPLE_2DI:
16041 case VSX_BUILTIN_CMPLE_U2DI:
16042 fold_compare_helper (gsi, LE_EXPR, stmt);
16043 return true;
16044
16045 /* flavors of vec_splat_[us]{8,16,32}. */
16046 case ALTIVEC_BUILTIN_VSPLTISB:
16047 case ALTIVEC_BUILTIN_VSPLTISH:
16048 case ALTIVEC_BUILTIN_VSPLTISW:
16049 {
16050 int size;
16051 if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
16052 size = 8;
16053 else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
16054 size = 16;
16055 else
16056 size = 32;
16057
16058 arg0 = gimple_call_arg (stmt, 0);
16059 lhs = gimple_call_lhs (stmt);
16060
16061 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
16062 5-bit signed constant in range -16 to +15. */
16063 if (TREE_CODE (arg0) != INTEGER_CST
16064 || !IN_RANGE (sext_hwi (TREE_INT_CST_LOW (arg0), size),
16065 -16, 15))
16066 return false;
16067 gimple_seq stmts = NULL;
16068 location_t loc = gimple_location (stmt);
16069 tree splat_value = gimple_convert (&stmts, loc,
16070 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16071 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16072 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16073 g = gimple_build_assign (lhs, splat_tree);
16074 gimple_set_location (g, gimple_location (stmt));
16075 gsi_replace (gsi, g, true);
16076 return true;
16077 }
16078
16079 /* Flavors of vec_splat. */
16080 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
16081 case ALTIVEC_BUILTIN_VSPLTB:
16082 case ALTIVEC_BUILTIN_VSPLTH:
16083 case ALTIVEC_BUILTIN_VSPLTW:
16084 case VSX_BUILTIN_XXSPLTD_V2DI:
16085 case VSX_BUILTIN_XXSPLTD_V2DF:
16086 {
16087 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
16088 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
16089 /* Only fold the vec_splat_*() if arg1 is both a constant value and
16090 is a valid index into the arg0 vector. */
16091 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
16092 if (TREE_CODE (arg1) != INTEGER_CST
16093 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
16094 return false;
16095 lhs = gimple_call_lhs (stmt);
16096 tree lhs_type = TREE_TYPE (lhs);
16097 tree arg0_type = TREE_TYPE (arg0);
16098 tree splat;
16099 if (TREE_CODE (arg0) == VECTOR_CST)
16100 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
16101 else
16102 {
16103 /* Determine (in bits) the length and start location of the
16104 splat value for a call to the tree_vec_extract helper. */
16105 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
16106 * BITS_PER_UNIT / n_elts;
16107 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16108 tree len = build_int_cst (bitsizetype, splat_elem_size);
16109 tree start = build_int_cst (bitsizetype, splat_start_bit);
16110 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16111 len, start);
16112 }
16113 /* And finally, build the new vector. */
16114 tree splat_tree = build_vector_from_val (lhs_type, splat);
16115 g = gimple_build_assign (lhs, splat_tree);
16116 gimple_set_location (g, gimple_location (stmt));
16117 gsi_replace (gsi, g, true);
16118 return true;
16119 }
16120
16121 /* vec_mergel (integrals). */
16122 case ALTIVEC_BUILTIN_VMRGLH:
16123 case ALTIVEC_BUILTIN_VMRGLW:
16124 case VSX_BUILTIN_XXMRGLW_4SI:
16125 case ALTIVEC_BUILTIN_VMRGLB:
16126 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16127 case VSX_BUILTIN_XXMRGLW_4SF:
16128 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16129 fold_mergehl_helper (gsi, stmt, 1);
16130 return true;
16131 /* vec_mergeh (integrals). */
16132 case ALTIVEC_BUILTIN_VMRGHH:
16133 case ALTIVEC_BUILTIN_VMRGHW:
16134 case VSX_BUILTIN_XXMRGHW_4SI:
16135 case ALTIVEC_BUILTIN_VMRGHB:
16136 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16137 case VSX_BUILTIN_XXMRGHW_4SF:
16138 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16139 fold_mergehl_helper (gsi, stmt, 0);
16140 return true;
16141
16142 /* Flavors of vec_mergee. */
16143 case P8V_BUILTIN_VMRGEW_V4SI:
16144 case P8V_BUILTIN_VMRGEW_V2DI:
16145 case P8V_BUILTIN_VMRGEW_V4SF:
16146 case P8V_BUILTIN_VMRGEW_V2DF:
16147 fold_mergeeo_helper (gsi, stmt, 0);
16148 return true;
16149 /* Flavors of vec_mergeo. */
16150 case P8V_BUILTIN_VMRGOW_V4SI:
16151 case P8V_BUILTIN_VMRGOW_V2DI:
16152 case P8V_BUILTIN_VMRGOW_V4SF:
16153 case P8V_BUILTIN_VMRGOW_V2DF:
16154 fold_mergeeo_helper (gsi, stmt, 1);
16155 return true;
16156
16157 /* d = vec_pack (a, b) */
16158 case P8V_BUILTIN_VPKUDUM:
16159 case ALTIVEC_BUILTIN_VPKUHUM:
16160 case ALTIVEC_BUILTIN_VPKUWUM:
16161 {
16162 arg0 = gimple_call_arg (stmt, 0);
16163 arg1 = gimple_call_arg (stmt, 1);
16164 lhs = gimple_call_lhs (stmt);
16165 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16166 gimple_set_location (g, gimple_location (stmt));
16167 gsi_replace (gsi, g, true);
16168 return true;
16169 }
16170
16171 /* d = vec_unpackh (a) */
16172 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16173 in this code is sensitive to endian-ness, and needs to be inverted to
16174 handle both LE and BE targets. */
16175 case ALTIVEC_BUILTIN_VUPKHSB:
16176 case ALTIVEC_BUILTIN_VUPKHSH:
16177 case P8V_BUILTIN_VUPKHSW:
16178 {
16179 arg0 = gimple_call_arg (stmt, 0);
16180 lhs = gimple_call_lhs (stmt);
16181 if (BYTES_BIG_ENDIAN)
16182 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16183 else
16184 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16185 gimple_set_location (g, gimple_location (stmt));
16186 gsi_replace (gsi, g, true);
16187 return true;
16188 }
16189 /* d = vec_unpackl (a) */
16190 case ALTIVEC_BUILTIN_VUPKLSB:
16191 case ALTIVEC_BUILTIN_VUPKLSH:
16192 case P8V_BUILTIN_VUPKLSW:
16193 {
16194 arg0 = gimple_call_arg (stmt, 0);
16195 lhs = gimple_call_lhs (stmt);
16196 if (BYTES_BIG_ENDIAN)
16197 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16198 else
16199 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16200 gimple_set_location (g, gimple_location (stmt));
16201 gsi_replace (gsi, g, true);
16202 return true;
16203 }
16204 /* There is no gimple type corresponding with pixel, so just return. */
16205 case ALTIVEC_BUILTIN_VUPKHPX:
16206 case ALTIVEC_BUILTIN_VUPKLPX:
16207 return false;
16208
16209 /* vec_perm. */
16210 case ALTIVEC_BUILTIN_VPERM_16QI:
16211 case ALTIVEC_BUILTIN_VPERM_8HI:
16212 case ALTIVEC_BUILTIN_VPERM_4SI:
16213 case ALTIVEC_BUILTIN_VPERM_2DI:
16214 case ALTIVEC_BUILTIN_VPERM_4SF:
16215 case ALTIVEC_BUILTIN_VPERM_2DF:
16216 {
16217 arg0 = gimple_call_arg (stmt, 0);
16218 arg1 = gimple_call_arg (stmt, 1);
16219 tree permute = gimple_call_arg (stmt, 2);
16220 lhs = gimple_call_lhs (stmt);
16221 location_t loc = gimple_location (stmt);
16222 gimple_seq stmts = NULL;
16223 // convert arg0 and arg1 to match the type of the permute
16224 // for the VEC_PERM_EXPR operation.
16225 tree permute_type = (TREE_TYPE (permute));
16226 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16227 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16228 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16229 permute_type, arg0_ptype, arg1_ptype,
16230 permute);
16231 // Convert the result back to the desired lhs type upon completion.
16232 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16233 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16234 g = gimple_build_assign (lhs, temp);
16235 gimple_set_location (g, loc);
16236 gsi_replace (gsi, g, true);
16237 return true;
16238 }
16239
16240 default:
16241 if (TARGET_DEBUG_BUILTIN)
16242 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16243 fn_code, fn_name1, fn_name2);
16244 break;
16245 }
16246
16247 return false;
16248 }
16249
16250 /* Expand an expression EXP that calls a built-in function,
16251 with result going to TARGET if that's convenient
16252 (and in mode MODE if that's convenient).
16253 SUBTARGET may be used as the target for computing one of EXP's operands.
16254 IGNORE is nonzero if the value is to be ignored. */
16255
16256 static rtx
16257 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16258 machine_mode mode ATTRIBUTE_UNUSED,
16259 int ignore ATTRIBUTE_UNUSED)
16260 {
16261 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16262 enum rs6000_builtins fcode
16263 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16264 size_t uns_fcode = (size_t)fcode;
16265 const struct builtin_description *d;
16266 size_t i;
16267 rtx ret;
16268 bool success;
16269 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16270 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16271 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16272
16273 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16274 floating point type, depending on whether long double is the IBM extended
16275 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16276 we only define one variant of the built-in function, and switch the code
16277 when defining it, rather than defining two built-ins and using the
16278 overload table in rs6000-c.c to switch between the two. If we don't have
16279 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16280 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16281 if (FLOAT128_IEEE_P (TFmode))
16282 switch (icode)
16283 {
16284 default:
16285 break;
16286
16287 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16288 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16289 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16290 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16291 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16292 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16293 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16294 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16295 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16296 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16297 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16298 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16299 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16300 }
16301
16302 if (TARGET_DEBUG_BUILTIN)
16303 {
16304 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16305 const char *name2 = (icode != CODE_FOR_nothing)
16306 ? get_insn_name ((int) icode)
16307 : "nothing";
16308 const char *name3;
16309
16310 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16311 {
16312 default: name3 = "unknown"; break;
16313 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16314 case RS6000_BTC_UNARY: name3 = "unary"; break;
16315 case RS6000_BTC_BINARY: name3 = "binary"; break;
16316 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16317 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16318 case RS6000_BTC_ABS: name3 = "abs"; break;
16319 case RS6000_BTC_DST: name3 = "dst"; break;
16320 }
16321
16322
16323 fprintf (stderr,
16324 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16325 (name1) ? name1 : "---", fcode,
16326 (name2) ? name2 : "---", (int) icode,
16327 name3,
16328 func_valid_p ? "" : ", not valid");
16329 }
16330
16331 if (!func_valid_p)
16332 {
16333 rs6000_invalid_builtin (fcode);
16334
16335 /* Given it is invalid, just generate a normal call. */
16336 return expand_call (exp, target, ignore);
16337 }
16338
16339 switch (fcode)
16340 {
16341 case RS6000_BUILTIN_RECIP:
16342 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16343
16344 case RS6000_BUILTIN_RECIPF:
16345 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16346
16347 case RS6000_BUILTIN_RSQRTF:
16348 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16349
16350 case RS6000_BUILTIN_RSQRT:
16351 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16352
16353 case POWER7_BUILTIN_BPERMD:
16354 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16355 ? CODE_FOR_bpermd_di
16356 : CODE_FOR_bpermd_si), exp, target);
16357
16358 case RS6000_BUILTIN_GET_TB:
16359 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16360 target);
16361
16362 case RS6000_BUILTIN_MFTB:
16363 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16364 ? CODE_FOR_rs6000_mftb_di
16365 : CODE_FOR_rs6000_mftb_si),
16366 target);
16367
16368 case RS6000_BUILTIN_MFFS:
16369 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16370
16371 case RS6000_BUILTIN_MTFSB0:
16372 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16373
16374 case RS6000_BUILTIN_MTFSB1:
16375 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16376
16377 case RS6000_BUILTIN_SET_FPSCR_RN:
16378 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16379 exp);
16380
16381 case RS6000_BUILTIN_SET_FPSCR_DRN:
16382 return
16383 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16384 exp);
16385
16386 case RS6000_BUILTIN_MFFSL:
16387 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16388
16389 case RS6000_BUILTIN_MTFSF:
16390 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16391
16392 case RS6000_BUILTIN_CPU_INIT:
16393 case RS6000_BUILTIN_CPU_IS:
16394 case RS6000_BUILTIN_CPU_SUPPORTS:
16395 return cpu_expand_builtin (fcode, exp, target);
16396
16397 case MISC_BUILTIN_SPEC_BARRIER:
16398 {
16399 emit_insn (gen_speculation_barrier ());
16400 return NULL_RTX;
16401 }
16402
16403 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16404 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16405 {
16406 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16407 : (int) CODE_FOR_altivec_lvsl_direct);
16408 machine_mode tmode = insn_data[icode2].operand[0].mode;
16409 machine_mode mode = insn_data[icode2].operand[1].mode;
16410 tree arg;
16411 rtx op, addr, pat;
16412
16413 gcc_assert (TARGET_ALTIVEC);
16414
16415 arg = CALL_EXPR_ARG (exp, 0);
16416 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16417 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16418 addr = memory_address (mode, op);
16419 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16420 op = addr;
16421 else
16422 {
16423 /* For the load case need to negate the address. */
16424 op = gen_reg_rtx (GET_MODE (addr));
16425 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16426 }
16427 op = gen_rtx_MEM (mode, op);
16428
16429 if (target == 0
16430 || GET_MODE (target) != tmode
16431 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16432 target = gen_reg_rtx (tmode);
16433
16434 pat = GEN_FCN (icode2) (target, op);
16435 if (!pat)
16436 return 0;
16437 emit_insn (pat);
16438
16439 return target;
16440 }
16441
16442 case ALTIVEC_BUILTIN_VCFUX:
16443 case ALTIVEC_BUILTIN_VCFSX:
16444 case ALTIVEC_BUILTIN_VCTUXS:
16445 case ALTIVEC_BUILTIN_VCTSXS:
16446 /* FIXME: There's got to be a nicer way to handle this case than
16447 constructing a new CALL_EXPR. */
16448 if (call_expr_nargs (exp) == 1)
16449 {
16450 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16451 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16452 }
16453 break;
16454
16455 /* For the pack and unpack int128 routines, fix up the builtin so it
16456 uses the correct IBM128 type. */
16457 case MISC_BUILTIN_PACK_IF:
16458 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16459 {
16460 icode = CODE_FOR_packtf;
16461 fcode = MISC_BUILTIN_PACK_TF;
16462 uns_fcode = (size_t)fcode;
16463 }
16464 break;
16465
16466 case MISC_BUILTIN_UNPACK_IF:
16467 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16468 {
16469 icode = CODE_FOR_unpacktf;
16470 fcode = MISC_BUILTIN_UNPACK_TF;
16471 uns_fcode = (size_t)fcode;
16472 }
16473 break;
16474
16475 default:
16476 break;
16477 }
16478
16479 if (TARGET_ALTIVEC)
16480 {
16481 ret = altivec_expand_builtin (exp, target, &success);
16482
16483 if (success)
16484 return ret;
16485 }
16486 if (TARGET_HTM)
16487 {
16488 ret = htm_expand_builtin (exp, target, &success);
16489
16490 if (success)
16491 return ret;
16492 }
16493
16494 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16495 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16496 gcc_assert (attr == RS6000_BTC_UNARY
16497 || attr == RS6000_BTC_BINARY
16498 || attr == RS6000_BTC_TERNARY
16499 || attr == RS6000_BTC_SPECIAL);
16500
16501 /* Handle simple unary operations. */
16502 d = bdesc_1arg;
16503 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16504 if (d->code == fcode)
16505 return rs6000_expand_unop_builtin (icode, exp, target);
16506
16507 /* Handle simple binary operations. */
16508 d = bdesc_2arg;
16509 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16510 if (d->code == fcode)
16511 return rs6000_expand_binop_builtin (icode, exp, target);
16512
16513 /* Handle simple ternary operations. */
16514 d = bdesc_3arg;
16515 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16516 if (d->code == fcode)
16517 return rs6000_expand_ternop_builtin (icode, exp, target);
16518
16519 /* Handle simple no-argument operations. */
16520 d = bdesc_0arg;
16521 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16522 if (d->code == fcode)
16523 return rs6000_expand_zeroop_builtin (icode, target);
16524
16525 gcc_unreachable ();
16526 }
16527
16528 /* Create a builtin vector type with a name. Taking care not to give
16529 the canonical type a name. */
16530
16531 static tree
16532 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16533 {
16534 tree result = build_vector_type (elt_type, num_elts);
16535
16536 /* Copy so we don't give the canonical type a name. */
16537 result = build_variant_type_copy (result);
16538
16539 add_builtin_type (name, result);
16540
16541 return result;
16542 }
16543
16544 static void
16545 rs6000_init_builtins (void)
16546 {
16547 tree tdecl;
16548 tree ftype;
16549 machine_mode mode;
16550
16551 if (TARGET_DEBUG_BUILTIN)
16552 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16553 (TARGET_ALTIVEC) ? ", altivec" : "",
16554 (TARGET_VSX) ? ", vsx" : "");
16555
16556 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16557 : "__vector long long",
16558 intDI_type_node, 2);
16559 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16560 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16561 intSI_type_node, 4);
16562 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16563 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16564 intHI_type_node, 8);
16565 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16566 intQI_type_node, 16);
16567
16568 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16569 unsigned_intQI_type_node, 16);
16570 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16571 unsigned_intHI_type_node, 8);
16572 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16573 unsigned_intSI_type_node, 4);
16574 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16575 ? "__vector unsigned long"
16576 : "__vector unsigned long long",
16577 unsigned_intDI_type_node, 2);
16578
16579 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16580
16581 const_str_type_node
16582 = build_pointer_type (build_qualified_type (char_type_node,
16583 TYPE_QUAL_CONST));
16584
16585 /* We use V1TI mode as a special container to hold __int128_t items that
16586 must live in VSX registers. */
16587 if (intTI_type_node)
16588 {
16589 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16590 intTI_type_node, 1);
16591 unsigned_V1TI_type_node
16592 = rs6000_vector_type ("__vector unsigned __int128",
16593 unsigned_intTI_type_node, 1);
16594 }
16595
16596 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16597 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16598 'vector unsigned short'. */
16599
16600 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16601 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16602 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16603 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16604 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16605
16606 long_integer_type_internal_node = long_integer_type_node;
16607 long_unsigned_type_internal_node = long_unsigned_type_node;
16608 long_long_integer_type_internal_node = long_long_integer_type_node;
16609 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16610 intQI_type_internal_node = intQI_type_node;
16611 uintQI_type_internal_node = unsigned_intQI_type_node;
16612 intHI_type_internal_node = intHI_type_node;
16613 uintHI_type_internal_node = unsigned_intHI_type_node;
16614 intSI_type_internal_node = intSI_type_node;
16615 uintSI_type_internal_node = unsigned_intSI_type_node;
16616 intDI_type_internal_node = intDI_type_node;
16617 uintDI_type_internal_node = unsigned_intDI_type_node;
16618 intTI_type_internal_node = intTI_type_node;
16619 uintTI_type_internal_node = unsigned_intTI_type_node;
16620 float_type_internal_node = float_type_node;
16621 double_type_internal_node = double_type_node;
16622 long_double_type_internal_node = long_double_type_node;
16623 dfloat64_type_internal_node = dfloat64_type_node;
16624 dfloat128_type_internal_node = dfloat128_type_node;
16625 void_type_internal_node = void_type_node;
16626
16627 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16628 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16629 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16630 format that uses a pair of doubles, depending on the switches and
16631 defaults.
16632
16633 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16634 floating point, we need make sure the type is non-zero or else self-test
16635 fails during bootstrap.
16636
16637 Always create __ibm128 as a separate type, even if the current long double
16638 format is IBM extended double.
16639
16640 For IEEE 128-bit floating point, always create the type __ieee128. If the
16641 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16642 __ieee128. */
16643 if (TARGET_FLOAT128_TYPE)
16644 {
16645 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16646 ibm128_float_type_node = long_double_type_node;
16647 else
16648 {
16649 ibm128_float_type_node = make_node (REAL_TYPE);
16650 TYPE_PRECISION (ibm128_float_type_node) = 128;
16651 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16652 layout_type (ibm128_float_type_node);
16653 }
16654
16655 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16656 "__ibm128");
16657
16658 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16659 ieee128_float_type_node = long_double_type_node;
16660 else
16661 ieee128_float_type_node = float128_type_node;
16662
16663 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16664 "__ieee128");
16665 }
16666
16667 else
16668 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16669
16670 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16671 tree type node. */
16672 builtin_mode_to_type[QImode][0] = integer_type_node;
16673 builtin_mode_to_type[HImode][0] = integer_type_node;
16674 builtin_mode_to_type[SImode][0] = intSI_type_node;
16675 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16676 builtin_mode_to_type[DImode][0] = intDI_type_node;
16677 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16678 builtin_mode_to_type[TImode][0] = intTI_type_node;
16679 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16680 builtin_mode_to_type[SFmode][0] = float_type_node;
16681 builtin_mode_to_type[DFmode][0] = double_type_node;
16682 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16683 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16684 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16685 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16686 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16687 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16688 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16689 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16690 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16691 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16692 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16693 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16694 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16695 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16696 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16697 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16698 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16699
16700 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16701 TYPE_NAME (bool_char_type_node) = tdecl;
16702
16703 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16704 TYPE_NAME (bool_short_type_node) = tdecl;
16705
16706 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16707 TYPE_NAME (bool_int_type_node) = tdecl;
16708
16709 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16710 TYPE_NAME (pixel_type_node) = tdecl;
16711
16712 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16713 bool_char_type_node, 16);
16714 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16715 bool_short_type_node, 8);
16716 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16717 bool_int_type_node, 4);
16718 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16719 ? "__vector __bool long"
16720 : "__vector __bool long long",
16721 bool_long_long_type_node, 2);
16722 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16723 pixel_type_node, 8);
16724
16725 /* Create Altivec and VSX builtins on machines with at least the
16726 general purpose extensions (970 and newer) to allow the use of
16727 the target attribute. */
16728 if (TARGET_EXTRA_BUILTINS)
16729 altivec_init_builtins ();
16730 if (TARGET_HTM)
16731 htm_init_builtins ();
16732
16733 if (TARGET_EXTRA_BUILTINS)
16734 rs6000_common_init_builtins ();
16735
16736 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16737 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16738 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16739
16740 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16741 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16742 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16743
16744 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16745 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16746 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16747
16748 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16749 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16750 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16751
16752 mode = (TARGET_64BIT) ? DImode : SImode;
16753 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16754 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16755 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16756
16757 ftype = build_function_type_list (unsigned_intDI_type_node,
16758 NULL_TREE);
16759 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16760
16761 if (TARGET_64BIT)
16762 ftype = build_function_type_list (unsigned_intDI_type_node,
16763 NULL_TREE);
16764 else
16765 ftype = build_function_type_list (unsigned_intSI_type_node,
16766 NULL_TREE);
16767 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16768
16769 ftype = build_function_type_list (double_type_node, NULL_TREE);
16770 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16771
16772 ftype = build_function_type_list (double_type_node, NULL_TREE);
16773 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16774
16775 ftype = build_function_type_list (void_type_node,
16776 intSI_type_node,
16777 NULL_TREE);
16778 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16779
16780 ftype = build_function_type_list (void_type_node,
16781 intSI_type_node,
16782 NULL_TREE);
16783 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16784
16785 ftype = build_function_type_list (void_type_node,
16786 intDI_type_node,
16787 NULL_TREE);
16788 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16789
16790 ftype = build_function_type_list (void_type_node,
16791 intDI_type_node,
16792 NULL_TREE);
16793 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16794
16795 ftype = build_function_type_list (void_type_node,
16796 intSI_type_node, double_type_node,
16797 NULL_TREE);
16798 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16799
16800 ftype = build_function_type_list (void_type_node, NULL_TREE);
16801 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16802 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16803 MISC_BUILTIN_SPEC_BARRIER);
16804
16805 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16806 NULL_TREE);
16807 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16808 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16809
16810 /* AIX libm provides clog as __clog. */
16811 if (TARGET_XCOFF &&
16812 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16813 set_user_assembler_name (tdecl, "__clog");
16814
16815 #ifdef SUBTARGET_INIT_BUILTINS
16816 SUBTARGET_INIT_BUILTINS;
16817 #endif
16818 }
16819
16820 /* Returns the rs6000 builtin decl for CODE. */
16821
16822 static tree
16823 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16824 {
16825 HOST_WIDE_INT fnmask;
16826
16827 if (code >= RS6000_BUILTIN_COUNT)
16828 return error_mark_node;
16829
16830 fnmask = rs6000_builtin_info[code].mask;
16831 if ((fnmask & rs6000_builtin_mask) != fnmask)
16832 {
16833 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16834 return error_mark_node;
16835 }
16836
16837 return rs6000_builtin_decls[code];
16838 }
16839
16840 static void
16841 altivec_init_builtins (void)
16842 {
16843 const struct builtin_description *d;
16844 size_t i;
16845 tree ftype;
16846 tree decl;
16847 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16848
16849 tree pvoid_type_node = build_pointer_type (void_type_node);
16850
16851 tree pcvoid_type_node
16852 = build_pointer_type (build_qualified_type (void_type_node,
16853 TYPE_QUAL_CONST));
16854
16855 tree int_ftype_opaque
16856 = build_function_type_list (integer_type_node,
16857 opaque_V4SI_type_node, NULL_TREE);
16858 tree opaque_ftype_opaque
16859 = build_function_type_list (integer_type_node, NULL_TREE);
16860 tree opaque_ftype_opaque_int
16861 = build_function_type_list (opaque_V4SI_type_node,
16862 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16863 tree opaque_ftype_opaque_opaque_int
16864 = build_function_type_list (opaque_V4SI_type_node,
16865 opaque_V4SI_type_node, opaque_V4SI_type_node,
16866 integer_type_node, NULL_TREE);
16867 tree opaque_ftype_opaque_opaque_opaque
16868 = build_function_type_list (opaque_V4SI_type_node,
16869 opaque_V4SI_type_node, opaque_V4SI_type_node,
16870 opaque_V4SI_type_node, NULL_TREE);
16871 tree opaque_ftype_opaque_opaque
16872 = build_function_type_list (opaque_V4SI_type_node,
16873 opaque_V4SI_type_node, opaque_V4SI_type_node,
16874 NULL_TREE);
16875 tree int_ftype_int_opaque_opaque
16876 = build_function_type_list (integer_type_node,
16877 integer_type_node, opaque_V4SI_type_node,
16878 opaque_V4SI_type_node, NULL_TREE);
16879 tree int_ftype_int_v4si_v4si
16880 = build_function_type_list (integer_type_node,
16881 integer_type_node, V4SI_type_node,
16882 V4SI_type_node, NULL_TREE);
16883 tree int_ftype_int_v2di_v2di
16884 = build_function_type_list (integer_type_node,
16885 integer_type_node, V2DI_type_node,
16886 V2DI_type_node, NULL_TREE);
16887 tree void_ftype_v4si
16888 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16889 tree v8hi_ftype_void
16890 = build_function_type_list (V8HI_type_node, NULL_TREE);
16891 tree void_ftype_void
16892 = build_function_type_list (void_type_node, NULL_TREE);
16893 tree void_ftype_int
16894 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16895
16896 tree opaque_ftype_long_pcvoid
16897 = build_function_type_list (opaque_V4SI_type_node,
16898 long_integer_type_node, pcvoid_type_node,
16899 NULL_TREE);
16900 tree v16qi_ftype_long_pcvoid
16901 = build_function_type_list (V16QI_type_node,
16902 long_integer_type_node, pcvoid_type_node,
16903 NULL_TREE);
16904 tree v8hi_ftype_long_pcvoid
16905 = build_function_type_list (V8HI_type_node,
16906 long_integer_type_node, pcvoid_type_node,
16907 NULL_TREE);
16908 tree v4si_ftype_long_pcvoid
16909 = build_function_type_list (V4SI_type_node,
16910 long_integer_type_node, pcvoid_type_node,
16911 NULL_TREE);
16912 tree v4sf_ftype_long_pcvoid
16913 = build_function_type_list (V4SF_type_node,
16914 long_integer_type_node, pcvoid_type_node,
16915 NULL_TREE);
16916 tree v2df_ftype_long_pcvoid
16917 = build_function_type_list (V2DF_type_node,
16918 long_integer_type_node, pcvoid_type_node,
16919 NULL_TREE);
16920 tree v2di_ftype_long_pcvoid
16921 = build_function_type_list (V2DI_type_node,
16922 long_integer_type_node, pcvoid_type_node,
16923 NULL_TREE);
16924 tree v1ti_ftype_long_pcvoid
16925 = build_function_type_list (V1TI_type_node,
16926 long_integer_type_node, pcvoid_type_node,
16927 NULL_TREE);
16928
16929 tree void_ftype_opaque_long_pvoid
16930 = build_function_type_list (void_type_node,
16931 opaque_V4SI_type_node, long_integer_type_node,
16932 pvoid_type_node, NULL_TREE);
16933 tree void_ftype_v4si_long_pvoid
16934 = build_function_type_list (void_type_node,
16935 V4SI_type_node, long_integer_type_node,
16936 pvoid_type_node, NULL_TREE);
16937 tree void_ftype_v16qi_long_pvoid
16938 = build_function_type_list (void_type_node,
16939 V16QI_type_node, long_integer_type_node,
16940 pvoid_type_node, NULL_TREE);
16941
16942 tree void_ftype_v16qi_pvoid_long
16943 = build_function_type_list (void_type_node,
16944 V16QI_type_node, pvoid_type_node,
16945 long_integer_type_node, NULL_TREE);
16946
16947 tree void_ftype_v8hi_long_pvoid
16948 = build_function_type_list (void_type_node,
16949 V8HI_type_node, long_integer_type_node,
16950 pvoid_type_node, NULL_TREE);
16951 tree void_ftype_v4sf_long_pvoid
16952 = build_function_type_list (void_type_node,
16953 V4SF_type_node, long_integer_type_node,
16954 pvoid_type_node, NULL_TREE);
16955 tree void_ftype_v2df_long_pvoid
16956 = build_function_type_list (void_type_node,
16957 V2DF_type_node, long_integer_type_node,
16958 pvoid_type_node, NULL_TREE);
16959 tree void_ftype_v1ti_long_pvoid
16960 = build_function_type_list (void_type_node,
16961 V1TI_type_node, long_integer_type_node,
16962 pvoid_type_node, NULL_TREE);
16963 tree void_ftype_v2di_long_pvoid
16964 = build_function_type_list (void_type_node,
16965 V2DI_type_node, long_integer_type_node,
16966 pvoid_type_node, NULL_TREE);
16967 tree int_ftype_int_v8hi_v8hi
16968 = build_function_type_list (integer_type_node,
16969 integer_type_node, V8HI_type_node,
16970 V8HI_type_node, NULL_TREE);
16971 tree int_ftype_int_v16qi_v16qi
16972 = build_function_type_list (integer_type_node,
16973 integer_type_node, V16QI_type_node,
16974 V16QI_type_node, NULL_TREE);
16975 tree int_ftype_int_v4sf_v4sf
16976 = build_function_type_list (integer_type_node,
16977 integer_type_node, V4SF_type_node,
16978 V4SF_type_node, NULL_TREE);
16979 tree int_ftype_int_v2df_v2df
16980 = build_function_type_list (integer_type_node,
16981 integer_type_node, V2DF_type_node,
16982 V2DF_type_node, NULL_TREE);
16983 tree v2di_ftype_v2di
16984 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16985 tree v4si_ftype_v4si
16986 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16987 tree v8hi_ftype_v8hi
16988 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16989 tree v16qi_ftype_v16qi
16990 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16991 tree v4sf_ftype_v4sf
16992 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16993 tree v2df_ftype_v2df
16994 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16995 tree void_ftype_pcvoid_int_int
16996 = build_function_type_list (void_type_node,
16997 pcvoid_type_node, integer_type_node,
16998 integer_type_node, NULL_TREE);
16999
17000 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17001 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17002 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17003 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17004 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17005 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17006 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17007 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17008 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17009 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17010 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17011 ALTIVEC_BUILTIN_LVXL_V2DF);
17012 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17013 ALTIVEC_BUILTIN_LVXL_V2DI);
17014 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17015 ALTIVEC_BUILTIN_LVXL_V4SF);
17016 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17017 ALTIVEC_BUILTIN_LVXL_V4SI);
17018 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17019 ALTIVEC_BUILTIN_LVXL_V8HI);
17020 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17021 ALTIVEC_BUILTIN_LVXL_V16QI);
17022 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17023 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
17024 ALTIVEC_BUILTIN_LVX_V1TI);
17025 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17026 ALTIVEC_BUILTIN_LVX_V2DF);
17027 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17028 ALTIVEC_BUILTIN_LVX_V2DI);
17029 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17030 ALTIVEC_BUILTIN_LVX_V4SF);
17031 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17032 ALTIVEC_BUILTIN_LVX_V4SI);
17033 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17034 ALTIVEC_BUILTIN_LVX_V8HI);
17035 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17036 ALTIVEC_BUILTIN_LVX_V16QI);
17037 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17038 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17039 ALTIVEC_BUILTIN_STVX_V2DF);
17040 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17041 ALTIVEC_BUILTIN_STVX_V2DI);
17042 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17043 ALTIVEC_BUILTIN_STVX_V4SF);
17044 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17045 ALTIVEC_BUILTIN_STVX_V4SI);
17046 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17047 ALTIVEC_BUILTIN_STVX_V8HI);
17048 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17049 ALTIVEC_BUILTIN_STVX_V16QI);
17050 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17051 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17052 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17053 ALTIVEC_BUILTIN_STVXL_V2DF);
17054 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17055 ALTIVEC_BUILTIN_STVXL_V2DI);
17056 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17057 ALTIVEC_BUILTIN_STVXL_V4SF);
17058 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17059 ALTIVEC_BUILTIN_STVXL_V4SI);
17060 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17061 ALTIVEC_BUILTIN_STVXL_V8HI);
17062 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17063 ALTIVEC_BUILTIN_STVXL_V16QI);
17064 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17065 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17066 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17067 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17068 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17069 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17070 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17071 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17072 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17073 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17074 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17075 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17076 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17077 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17078 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17079 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17080
17081 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17082 VSX_BUILTIN_LXVD2X_V2DF);
17083 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17084 VSX_BUILTIN_LXVD2X_V2DI);
17085 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17086 VSX_BUILTIN_LXVW4X_V4SF);
17087 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17088 VSX_BUILTIN_LXVW4X_V4SI);
17089 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17090 VSX_BUILTIN_LXVW4X_V8HI);
17091 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17092 VSX_BUILTIN_LXVW4X_V16QI);
17093 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17094 VSX_BUILTIN_STXVD2X_V2DF);
17095 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17096 VSX_BUILTIN_STXVD2X_V2DI);
17097 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17098 VSX_BUILTIN_STXVW4X_V4SF);
17099 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17100 VSX_BUILTIN_STXVW4X_V4SI);
17101 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17102 VSX_BUILTIN_STXVW4X_V8HI);
17103 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17104 VSX_BUILTIN_STXVW4X_V16QI);
17105
17106 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17107 VSX_BUILTIN_LD_ELEMREV_V2DF);
17108 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17109 VSX_BUILTIN_LD_ELEMREV_V2DI);
17110 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17111 VSX_BUILTIN_LD_ELEMREV_V4SF);
17112 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17113 VSX_BUILTIN_LD_ELEMREV_V4SI);
17114 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17115 VSX_BUILTIN_LD_ELEMREV_V8HI);
17116 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17117 VSX_BUILTIN_LD_ELEMREV_V16QI);
17118 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17119 VSX_BUILTIN_ST_ELEMREV_V2DF);
17120 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17121 VSX_BUILTIN_ST_ELEMREV_V1TI);
17122 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17123 VSX_BUILTIN_ST_ELEMREV_V2DI);
17124 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17125 VSX_BUILTIN_ST_ELEMREV_V4SF);
17126 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17127 VSX_BUILTIN_ST_ELEMREV_V4SI);
17128 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17129 VSX_BUILTIN_ST_ELEMREV_V8HI);
17130 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17131 VSX_BUILTIN_ST_ELEMREV_V16QI);
17132
17133 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17134 VSX_BUILTIN_VEC_LD);
17135 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17136 VSX_BUILTIN_VEC_ST);
17137 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17138 VSX_BUILTIN_VEC_XL);
17139 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17140 VSX_BUILTIN_VEC_XL_BE);
17141 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17142 VSX_BUILTIN_VEC_XST);
17143 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17144 VSX_BUILTIN_VEC_XST_BE);
17145
17146 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17147 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17148 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17149
17150 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17151 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17152 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17153 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17154 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17155 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17156 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17157 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17158 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17159 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17160 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17161 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17162
17163 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17164 ALTIVEC_BUILTIN_VEC_ADDE);
17165 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17166 ALTIVEC_BUILTIN_VEC_ADDEC);
17167 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17168 ALTIVEC_BUILTIN_VEC_CMPNE);
17169 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17170 ALTIVEC_BUILTIN_VEC_MUL);
17171 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17172 ALTIVEC_BUILTIN_VEC_SUBE);
17173 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17174 ALTIVEC_BUILTIN_VEC_SUBEC);
17175
17176 /* Cell builtins. */
17177 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17178 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17179 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17180 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17181
17182 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17183 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17184 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17185 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17186
17187 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17188 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17189 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17190 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17191
17192 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17193 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17194 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17195 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17196
17197 if (TARGET_P9_VECTOR)
17198 {
17199 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17200 P9V_BUILTIN_STXVL);
17201 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17202 P9V_BUILTIN_XST_LEN_R);
17203 }
17204
17205 /* Add the DST variants. */
17206 d = bdesc_dst;
17207 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17208 {
17209 HOST_WIDE_INT mask = d->mask;
17210
17211 /* It is expected that these dst built-in functions may have
17212 d->icode equal to CODE_FOR_nothing. */
17213 if ((mask & builtin_mask) != mask)
17214 {
17215 if (TARGET_DEBUG_BUILTIN)
17216 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17217 d->name);
17218 continue;
17219 }
17220 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17221 }
17222
17223 /* Initialize the predicates. */
17224 d = bdesc_altivec_preds;
17225 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17226 {
17227 machine_mode mode1;
17228 tree type;
17229 HOST_WIDE_INT mask = d->mask;
17230
17231 if ((mask & builtin_mask) != mask)
17232 {
17233 if (TARGET_DEBUG_BUILTIN)
17234 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17235 d->name);
17236 continue;
17237 }
17238
17239 if (rs6000_overloaded_builtin_p (d->code))
17240 mode1 = VOIDmode;
17241 else
17242 {
17243 /* Cannot define builtin if the instruction is disabled. */
17244 gcc_assert (d->icode != CODE_FOR_nothing);
17245 mode1 = insn_data[d->icode].operand[1].mode;
17246 }
17247
17248 switch (mode1)
17249 {
17250 case E_VOIDmode:
17251 type = int_ftype_int_opaque_opaque;
17252 break;
17253 case E_V2DImode:
17254 type = int_ftype_int_v2di_v2di;
17255 break;
17256 case E_V4SImode:
17257 type = int_ftype_int_v4si_v4si;
17258 break;
17259 case E_V8HImode:
17260 type = int_ftype_int_v8hi_v8hi;
17261 break;
17262 case E_V16QImode:
17263 type = int_ftype_int_v16qi_v16qi;
17264 break;
17265 case E_V4SFmode:
17266 type = int_ftype_int_v4sf_v4sf;
17267 break;
17268 case E_V2DFmode:
17269 type = int_ftype_int_v2df_v2df;
17270 break;
17271 default:
17272 gcc_unreachable ();
17273 }
17274
17275 def_builtin (d->name, type, d->code);
17276 }
17277
17278 /* Initialize the abs* operators. */
17279 d = bdesc_abs;
17280 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17281 {
17282 machine_mode mode0;
17283 tree type;
17284 HOST_WIDE_INT mask = d->mask;
17285
17286 if ((mask & builtin_mask) != mask)
17287 {
17288 if (TARGET_DEBUG_BUILTIN)
17289 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17290 d->name);
17291 continue;
17292 }
17293
17294 /* Cannot define builtin if the instruction is disabled. */
17295 gcc_assert (d->icode != CODE_FOR_nothing);
17296 mode0 = insn_data[d->icode].operand[0].mode;
17297
17298 switch (mode0)
17299 {
17300 case E_V2DImode:
17301 type = v2di_ftype_v2di;
17302 break;
17303 case E_V4SImode:
17304 type = v4si_ftype_v4si;
17305 break;
17306 case E_V8HImode:
17307 type = v8hi_ftype_v8hi;
17308 break;
17309 case E_V16QImode:
17310 type = v16qi_ftype_v16qi;
17311 break;
17312 case E_V4SFmode:
17313 type = v4sf_ftype_v4sf;
17314 break;
17315 case E_V2DFmode:
17316 type = v2df_ftype_v2df;
17317 break;
17318 default:
17319 gcc_unreachable ();
17320 }
17321
17322 def_builtin (d->name, type, d->code);
17323 }
17324
17325 /* Initialize target builtin that implements
17326 targetm.vectorize.builtin_mask_for_load. */
17327
17328 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17329 v16qi_ftype_long_pcvoid,
17330 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17331 BUILT_IN_MD, NULL, NULL_TREE);
17332 TREE_READONLY (decl) = 1;
17333 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17334 altivec_builtin_mask_for_load = decl;
17335
17336 /* Access to the vec_init patterns. */
17337 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17338 integer_type_node, integer_type_node,
17339 integer_type_node, NULL_TREE);
17340 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17341
17342 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17343 short_integer_type_node,
17344 short_integer_type_node,
17345 short_integer_type_node,
17346 short_integer_type_node,
17347 short_integer_type_node,
17348 short_integer_type_node,
17349 short_integer_type_node, NULL_TREE);
17350 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17351
17352 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17353 char_type_node, char_type_node,
17354 char_type_node, char_type_node,
17355 char_type_node, char_type_node,
17356 char_type_node, char_type_node,
17357 char_type_node, char_type_node,
17358 char_type_node, char_type_node,
17359 char_type_node, char_type_node,
17360 char_type_node, NULL_TREE);
17361 def_builtin ("__builtin_vec_init_v16qi", ftype,
17362 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17363
17364 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17365 float_type_node, float_type_node,
17366 float_type_node, NULL_TREE);
17367 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17368
17369 /* VSX builtins. */
17370 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17371 double_type_node, NULL_TREE);
17372 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17373
17374 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17375 intDI_type_node, NULL_TREE);
17376 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17377
17378 /* Access to the vec_set patterns. */
17379 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17380 intSI_type_node,
17381 integer_type_node, NULL_TREE);
17382 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17383
17384 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17385 intHI_type_node,
17386 integer_type_node, NULL_TREE);
17387 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17388
17389 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17390 intQI_type_node,
17391 integer_type_node, NULL_TREE);
17392 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17393
17394 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17395 float_type_node,
17396 integer_type_node, NULL_TREE);
17397 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17398
17399 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17400 double_type_node,
17401 integer_type_node, NULL_TREE);
17402 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17403
17404 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17405 intDI_type_node,
17406 integer_type_node, NULL_TREE);
17407 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17408
17409 /* Access to the vec_extract patterns. */
17410 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17411 integer_type_node, NULL_TREE);
17412 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17413
17414 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17415 integer_type_node, NULL_TREE);
17416 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17417
17418 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17419 integer_type_node, NULL_TREE);
17420 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17421
17422 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17423 integer_type_node, NULL_TREE);
17424 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17425
17426 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17427 integer_type_node, NULL_TREE);
17428 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17429
17430 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17431 integer_type_node, NULL_TREE);
17432 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17433
17434
17435 if (V1TI_type_node)
17436 {
17437 tree v1ti_ftype_long_pcvoid
17438 = build_function_type_list (V1TI_type_node,
17439 long_integer_type_node, pcvoid_type_node,
17440 NULL_TREE);
17441 tree void_ftype_v1ti_long_pvoid
17442 = build_function_type_list (void_type_node,
17443 V1TI_type_node, long_integer_type_node,
17444 pvoid_type_node, NULL_TREE);
17445 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17446 VSX_BUILTIN_LD_ELEMREV_V1TI);
17447 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17448 VSX_BUILTIN_LXVD2X_V1TI);
17449 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17450 VSX_BUILTIN_STXVD2X_V1TI);
17451 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17452 NULL_TREE, NULL_TREE);
17453 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17454 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17455 intTI_type_node,
17456 integer_type_node, NULL_TREE);
17457 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17458 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17459 integer_type_node, NULL_TREE);
17460 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17461 }
17462
17463 }
17464
17465 static void
17466 htm_init_builtins (void)
17467 {
17468 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17469 const struct builtin_description *d;
17470 size_t i;
17471
17472 d = bdesc_htm;
17473 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17474 {
17475 tree op[MAX_HTM_OPERANDS], type;
17476 HOST_WIDE_INT mask = d->mask;
17477 unsigned attr = rs6000_builtin_info[d->code].attr;
17478 bool void_func = (attr & RS6000_BTC_VOID);
17479 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17480 int nopnds = 0;
17481 tree gpr_type_node;
17482 tree rettype;
17483 tree argtype;
17484
17485 /* It is expected that these htm built-in functions may have
17486 d->icode equal to CODE_FOR_nothing. */
17487
17488 if (TARGET_32BIT && TARGET_POWERPC64)
17489 gpr_type_node = long_long_unsigned_type_node;
17490 else
17491 gpr_type_node = long_unsigned_type_node;
17492
17493 if (attr & RS6000_BTC_SPR)
17494 {
17495 rettype = gpr_type_node;
17496 argtype = gpr_type_node;
17497 }
17498 else if (d->code == HTM_BUILTIN_TABORTDC
17499 || d->code == HTM_BUILTIN_TABORTDCI)
17500 {
17501 rettype = unsigned_type_node;
17502 argtype = gpr_type_node;
17503 }
17504 else
17505 {
17506 rettype = unsigned_type_node;
17507 argtype = unsigned_type_node;
17508 }
17509
17510 if ((mask & builtin_mask) != mask)
17511 {
17512 if (TARGET_DEBUG_BUILTIN)
17513 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17514 continue;
17515 }
17516
17517 if (d->name == 0)
17518 {
17519 if (TARGET_DEBUG_BUILTIN)
17520 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17521 (long unsigned) i);
17522 continue;
17523 }
17524
17525 op[nopnds++] = (void_func) ? void_type_node : rettype;
17526
17527 if (attr_args == RS6000_BTC_UNARY)
17528 op[nopnds++] = argtype;
17529 else if (attr_args == RS6000_BTC_BINARY)
17530 {
17531 op[nopnds++] = argtype;
17532 op[nopnds++] = argtype;
17533 }
17534 else if (attr_args == RS6000_BTC_TERNARY)
17535 {
17536 op[nopnds++] = argtype;
17537 op[nopnds++] = argtype;
17538 op[nopnds++] = argtype;
17539 }
17540
17541 switch (nopnds)
17542 {
17543 case 1:
17544 type = build_function_type_list (op[0], NULL_TREE);
17545 break;
17546 case 2:
17547 type = build_function_type_list (op[0], op[1], NULL_TREE);
17548 break;
17549 case 3:
17550 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17551 break;
17552 case 4:
17553 type = build_function_type_list (op[0], op[1], op[2], op[3],
17554 NULL_TREE);
17555 break;
17556 default:
17557 gcc_unreachable ();
17558 }
17559
17560 def_builtin (d->name, type, d->code);
17561 }
17562 }
17563
17564 /* Hash function for builtin functions with up to 3 arguments and a return
17565 type. */
17566 hashval_t
17567 builtin_hasher::hash (builtin_hash_struct *bh)
17568 {
17569 unsigned ret = 0;
17570 int i;
17571
17572 for (i = 0; i < 4; i++)
17573 {
17574 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17575 ret = (ret * 2) + bh->uns_p[i];
17576 }
17577
17578 return ret;
17579 }
17580
17581 /* Compare builtin hash entries H1 and H2 for equivalence. */
17582 bool
17583 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17584 {
17585 return ((p1->mode[0] == p2->mode[0])
17586 && (p1->mode[1] == p2->mode[1])
17587 && (p1->mode[2] == p2->mode[2])
17588 && (p1->mode[3] == p2->mode[3])
17589 && (p1->uns_p[0] == p2->uns_p[0])
17590 && (p1->uns_p[1] == p2->uns_p[1])
17591 && (p1->uns_p[2] == p2->uns_p[2])
17592 && (p1->uns_p[3] == p2->uns_p[3]));
17593 }
17594
17595 /* Map types for builtin functions with an explicit return type and up to 3
17596 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17597 of the argument. */
17598 static tree
17599 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17600 machine_mode mode_arg1, machine_mode mode_arg2,
17601 enum rs6000_builtins builtin, const char *name)
17602 {
17603 struct builtin_hash_struct h;
17604 struct builtin_hash_struct *h2;
17605 int num_args = 3;
17606 int i;
17607 tree ret_type = NULL_TREE;
17608 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17609
17610 /* Create builtin_hash_table. */
17611 if (builtin_hash_table == NULL)
17612 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17613
17614 h.type = NULL_TREE;
17615 h.mode[0] = mode_ret;
17616 h.mode[1] = mode_arg0;
17617 h.mode[2] = mode_arg1;
17618 h.mode[3] = mode_arg2;
17619 h.uns_p[0] = 0;
17620 h.uns_p[1] = 0;
17621 h.uns_p[2] = 0;
17622 h.uns_p[3] = 0;
17623
17624 /* If the builtin is a type that produces unsigned results or takes unsigned
17625 arguments, and it is returned as a decl for the vectorizer (such as
17626 widening multiplies, permute), make sure the arguments and return value
17627 are type correct. */
17628 switch (builtin)
17629 {
17630 /* unsigned 1 argument functions. */
17631 case CRYPTO_BUILTIN_VSBOX:
17632 case P8V_BUILTIN_VGBBD:
17633 case MISC_BUILTIN_CDTBCD:
17634 case MISC_BUILTIN_CBCDTD:
17635 h.uns_p[0] = 1;
17636 h.uns_p[1] = 1;
17637 break;
17638
17639 /* unsigned 2 argument functions. */
17640 case ALTIVEC_BUILTIN_VMULEUB:
17641 case ALTIVEC_BUILTIN_VMULEUH:
17642 case P8V_BUILTIN_VMULEUW:
17643 case ALTIVEC_BUILTIN_VMULOUB:
17644 case ALTIVEC_BUILTIN_VMULOUH:
17645 case P8V_BUILTIN_VMULOUW:
17646 case CRYPTO_BUILTIN_VCIPHER:
17647 case CRYPTO_BUILTIN_VCIPHERLAST:
17648 case CRYPTO_BUILTIN_VNCIPHER:
17649 case CRYPTO_BUILTIN_VNCIPHERLAST:
17650 case CRYPTO_BUILTIN_VPMSUMB:
17651 case CRYPTO_BUILTIN_VPMSUMH:
17652 case CRYPTO_BUILTIN_VPMSUMW:
17653 case CRYPTO_BUILTIN_VPMSUMD:
17654 case CRYPTO_BUILTIN_VPMSUM:
17655 case MISC_BUILTIN_ADDG6S:
17656 case MISC_BUILTIN_DIVWEU:
17657 case MISC_BUILTIN_DIVDEU:
17658 case VSX_BUILTIN_UDIV_V2DI:
17659 case ALTIVEC_BUILTIN_VMAXUB:
17660 case ALTIVEC_BUILTIN_VMINUB:
17661 case ALTIVEC_BUILTIN_VMAXUH:
17662 case ALTIVEC_BUILTIN_VMINUH:
17663 case ALTIVEC_BUILTIN_VMAXUW:
17664 case ALTIVEC_BUILTIN_VMINUW:
17665 case P8V_BUILTIN_VMAXUD:
17666 case P8V_BUILTIN_VMINUD:
17667 h.uns_p[0] = 1;
17668 h.uns_p[1] = 1;
17669 h.uns_p[2] = 1;
17670 break;
17671
17672 /* unsigned 3 argument functions. */
17673 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17674 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17675 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17676 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17677 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17678 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17679 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17680 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17681 case VSX_BUILTIN_VPERM_16QI_UNS:
17682 case VSX_BUILTIN_VPERM_8HI_UNS:
17683 case VSX_BUILTIN_VPERM_4SI_UNS:
17684 case VSX_BUILTIN_VPERM_2DI_UNS:
17685 case VSX_BUILTIN_XXSEL_16QI_UNS:
17686 case VSX_BUILTIN_XXSEL_8HI_UNS:
17687 case VSX_BUILTIN_XXSEL_4SI_UNS:
17688 case VSX_BUILTIN_XXSEL_2DI_UNS:
17689 case CRYPTO_BUILTIN_VPERMXOR:
17690 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17691 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17692 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17693 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17694 case CRYPTO_BUILTIN_VSHASIGMAW:
17695 case CRYPTO_BUILTIN_VSHASIGMAD:
17696 case CRYPTO_BUILTIN_VSHASIGMA:
17697 h.uns_p[0] = 1;
17698 h.uns_p[1] = 1;
17699 h.uns_p[2] = 1;
17700 h.uns_p[3] = 1;
17701 break;
17702
17703 /* signed permute functions with unsigned char mask. */
17704 case ALTIVEC_BUILTIN_VPERM_16QI:
17705 case ALTIVEC_BUILTIN_VPERM_8HI:
17706 case ALTIVEC_BUILTIN_VPERM_4SI:
17707 case ALTIVEC_BUILTIN_VPERM_4SF:
17708 case ALTIVEC_BUILTIN_VPERM_2DI:
17709 case ALTIVEC_BUILTIN_VPERM_2DF:
17710 case VSX_BUILTIN_VPERM_16QI:
17711 case VSX_BUILTIN_VPERM_8HI:
17712 case VSX_BUILTIN_VPERM_4SI:
17713 case VSX_BUILTIN_VPERM_4SF:
17714 case VSX_BUILTIN_VPERM_2DI:
17715 case VSX_BUILTIN_VPERM_2DF:
17716 h.uns_p[3] = 1;
17717 break;
17718
17719 /* unsigned args, signed return. */
17720 case VSX_BUILTIN_XVCVUXDSP:
17721 case VSX_BUILTIN_XVCVUXDDP_UNS:
17722 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17723 h.uns_p[1] = 1;
17724 break;
17725
17726 /* signed args, unsigned return. */
17727 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17728 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17729 case MISC_BUILTIN_UNPACK_TD:
17730 case MISC_BUILTIN_UNPACK_V1TI:
17731 h.uns_p[0] = 1;
17732 break;
17733
17734 /* unsigned arguments, bool return (compares). */
17735 case ALTIVEC_BUILTIN_VCMPEQUB:
17736 case ALTIVEC_BUILTIN_VCMPEQUH:
17737 case ALTIVEC_BUILTIN_VCMPEQUW:
17738 case P8V_BUILTIN_VCMPEQUD:
17739 case VSX_BUILTIN_CMPGE_U16QI:
17740 case VSX_BUILTIN_CMPGE_U8HI:
17741 case VSX_BUILTIN_CMPGE_U4SI:
17742 case VSX_BUILTIN_CMPGE_U2DI:
17743 case ALTIVEC_BUILTIN_VCMPGTUB:
17744 case ALTIVEC_BUILTIN_VCMPGTUH:
17745 case ALTIVEC_BUILTIN_VCMPGTUW:
17746 case P8V_BUILTIN_VCMPGTUD:
17747 h.uns_p[1] = 1;
17748 h.uns_p[2] = 1;
17749 break;
17750
17751 /* unsigned arguments for 128-bit pack instructions. */
17752 case MISC_BUILTIN_PACK_TD:
17753 case MISC_BUILTIN_PACK_V1TI:
17754 h.uns_p[1] = 1;
17755 h.uns_p[2] = 1;
17756 break;
17757
17758 /* unsigned second arguments (vector shift right). */
17759 case ALTIVEC_BUILTIN_VSRB:
17760 case ALTIVEC_BUILTIN_VSRH:
17761 case ALTIVEC_BUILTIN_VSRW:
17762 case P8V_BUILTIN_VSRD:
17763 h.uns_p[2] = 1;
17764 break;
17765
17766 default:
17767 break;
17768 }
17769
17770 /* Figure out how many args are present. */
17771 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17772 num_args--;
17773
17774 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17775 if (!ret_type && h.uns_p[0])
17776 ret_type = builtin_mode_to_type[h.mode[0]][0];
17777
17778 if (!ret_type)
17779 fatal_error (input_location,
17780 "internal error: builtin function %qs had an unexpected "
17781 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17782
17783 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17784 arg_type[i] = NULL_TREE;
17785
17786 for (i = 0; i < num_args; i++)
17787 {
17788 int m = (int) h.mode[i+1];
17789 int uns_p = h.uns_p[i+1];
17790
17791 arg_type[i] = builtin_mode_to_type[m][uns_p];
17792 if (!arg_type[i] && uns_p)
17793 arg_type[i] = builtin_mode_to_type[m][0];
17794
17795 if (!arg_type[i])
17796 fatal_error (input_location,
17797 "internal error: builtin function %qs, argument %d "
17798 "had unexpected argument type %qs", name, i,
17799 GET_MODE_NAME (m));
17800 }
17801
17802 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17803 if (*found == NULL)
17804 {
17805 h2 = ggc_alloc<builtin_hash_struct> ();
17806 *h2 = h;
17807 *found = h2;
17808
17809 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17810 arg_type[2], NULL_TREE);
17811 }
17812
17813 return (*found)->type;
17814 }
17815
17816 static void
17817 rs6000_common_init_builtins (void)
17818 {
17819 const struct builtin_description *d;
17820 size_t i;
17821
17822 tree opaque_ftype_opaque = NULL_TREE;
17823 tree opaque_ftype_opaque_opaque = NULL_TREE;
17824 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17825 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17826
17827 /* Create Altivec and VSX builtins on machines with at least the
17828 general purpose extensions (970 and newer) to allow the use of
17829 the target attribute. */
17830
17831 if (TARGET_EXTRA_BUILTINS)
17832 builtin_mask |= RS6000_BTM_COMMON;
17833
17834 /* Add the ternary operators. */
17835 d = bdesc_3arg;
17836 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17837 {
17838 tree type;
17839 HOST_WIDE_INT mask = d->mask;
17840
17841 if ((mask & builtin_mask) != mask)
17842 {
17843 if (TARGET_DEBUG_BUILTIN)
17844 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17845 continue;
17846 }
17847
17848 if (rs6000_overloaded_builtin_p (d->code))
17849 {
17850 if (! (type = opaque_ftype_opaque_opaque_opaque))
17851 type = opaque_ftype_opaque_opaque_opaque
17852 = build_function_type_list (opaque_V4SI_type_node,
17853 opaque_V4SI_type_node,
17854 opaque_V4SI_type_node,
17855 opaque_V4SI_type_node,
17856 NULL_TREE);
17857 }
17858 else
17859 {
17860 enum insn_code icode = d->icode;
17861 if (d->name == 0)
17862 {
17863 if (TARGET_DEBUG_BUILTIN)
17864 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17865 (long unsigned)i);
17866
17867 continue;
17868 }
17869
17870 if (icode == CODE_FOR_nothing)
17871 {
17872 if (TARGET_DEBUG_BUILTIN)
17873 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17874 d->name);
17875
17876 continue;
17877 }
17878
17879 type = builtin_function_type (insn_data[icode].operand[0].mode,
17880 insn_data[icode].operand[1].mode,
17881 insn_data[icode].operand[2].mode,
17882 insn_data[icode].operand[3].mode,
17883 d->code, d->name);
17884 }
17885
17886 def_builtin (d->name, type, d->code);
17887 }
17888
17889 /* Add the binary operators. */
17890 d = bdesc_2arg;
17891 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17892 {
17893 machine_mode mode0, mode1, mode2;
17894 tree type;
17895 HOST_WIDE_INT mask = d->mask;
17896
17897 if ((mask & builtin_mask) != mask)
17898 {
17899 if (TARGET_DEBUG_BUILTIN)
17900 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17901 continue;
17902 }
17903
17904 if (rs6000_overloaded_builtin_p (d->code))
17905 {
17906 if (! (type = opaque_ftype_opaque_opaque))
17907 type = opaque_ftype_opaque_opaque
17908 = build_function_type_list (opaque_V4SI_type_node,
17909 opaque_V4SI_type_node,
17910 opaque_V4SI_type_node,
17911 NULL_TREE);
17912 }
17913 else
17914 {
17915 enum insn_code icode = d->icode;
17916 if (d->name == 0)
17917 {
17918 if (TARGET_DEBUG_BUILTIN)
17919 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17920 (long unsigned)i);
17921
17922 continue;
17923 }
17924
17925 if (icode == CODE_FOR_nothing)
17926 {
17927 if (TARGET_DEBUG_BUILTIN)
17928 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17929 d->name);
17930
17931 continue;
17932 }
17933
17934 mode0 = insn_data[icode].operand[0].mode;
17935 mode1 = insn_data[icode].operand[1].mode;
17936 mode2 = insn_data[icode].operand[2].mode;
17937
17938 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17939 d->code, d->name);
17940 }
17941
17942 def_builtin (d->name, type, d->code);
17943 }
17944
17945 /* Add the simple unary operators. */
17946 d = bdesc_1arg;
17947 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17948 {
17949 machine_mode mode0, mode1;
17950 tree type;
17951 HOST_WIDE_INT mask = d->mask;
17952
17953 if ((mask & builtin_mask) != mask)
17954 {
17955 if (TARGET_DEBUG_BUILTIN)
17956 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17957 continue;
17958 }
17959
17960 if (rs6000_overloaded_builtin_p (d->code))
17961 {
17962 if (! (type = opaque_ftype_opaque))
17963 type = opaque_ftype_opaque
17964 = build_function_type_list (opaque_V4SI_type_node,
17965 opaque_V4SI_type_node,
17966 NULL_TREE);
17967 }
17968 else
17969 {
17970 enum insn_code icode = d->icode;
17971 if (d->name == 0)
17972 {
17973 if (TARGET_DEBUG_BUILTIN)
17974 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17975 (long unsigned)i);
17976
17977 continue;
17978 }
17979
17980 if (icode == CODE_FOR_nothing)
17981 {
17982 if (TARGET_DEBUG_BUILTIN)
17983 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17984 d->name);
17985
17986 continue;
17987 }
17988
17989 mode0 = insn_data[icode].operand[0].mode;
17990 mode1 = insn_data[icode].operand[1].mode;
17991
17992 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17993 d->code, d->name);
17994 }
17995
17996 def_builtin (d->name, type, d->code);
17997 }
17998
17999 /* Add the simple no-argument operators. */
18000 d = bdesc_0arg;
18001 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18002 {
18003 machine_mode mode0;
18004 tree type;
18005 HOST_WIDE_INT mask = d->mask;
18006
18007 if ((mask & builtin_mask) != mask)
18008 {
18009 if (TARGET_DEBUG_BUILTIN)
18010 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18011 continue;
18012 }
18013 if (rs6000_overloaded_builtin_p (d->code))
18014 {
18015 if (!opaque_ftype_opaque)
18016 opaque_ftype_opaque
18017 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18018 type = opaque_ftype_opaque;
18019 }
18020 else
18021 {
18022 enum insn_code icode = d->icode;
18023 if (d->name == 0)
18024 {
18025 if (TARGET_DEBUG_BUILTIN)
18026 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18027 (long unsigned) i);
18028 continue;
18029 }
18030 if (icode == CODE_FOR_nothing)
18031 {
18032 if (TARGET_DEBUG_BUILTIN)
18033 fprintf (stderr,
18034 "rs6000_builtin, skip no-argument %s (no code)\n",
18035 d->name);
18036 continue;
18037 }
18038 mode0 = insn_data[icode].operand[0].mode;
18039 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18040 d->code, d->name);
18041 }
18042 def_builtin (d->name, type, d->code);
18043 }
18044 }
18045
18046 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18047 static void
18048 init_float128_ibm (machine_mode mode)
18049 {
18050 if (!TARGET_XL_COMPAT)
18051 {
18052 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18053 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18054 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18055 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18056
18057 if (!TARGET_HARD_FLOAT)
18058 {
18059 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18060 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18061 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18062 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18063 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18064 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18065 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18066 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18067
18068 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18069 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18070 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18071 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18072 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18073 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18074 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18075 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18076 }
18077 }
18078 else
18079 {
18080 set_optab_libfunc (add_optab, mode, "_xlqadd");
18081 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18082 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18083 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18084 }
18085
18086 /* Add various conversions for IFmode to use the traditional TFmode
18087 names. */
18088 if (mode == IFmode)
18089 {
18090 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
18091 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
18092 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
18093 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
18094 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
18095 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
18096
18097 if (TARGET_POWERPC64)
18098 {
18099 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18100 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18101 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18102 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18103 }
18104 }
18105 }
18106
18107 /* Create a decl for either complex long double multiply or complex long double
18108 divide when long double is IEEE 128-bit floating point. We can't use
18109 __multc3 and __divtc3 because the original long double using IBM extended
18110 double used those names. The complex multiply/divide functions are encoded
18111 as builtin functions with a complex result and 4 scalar inputs. */
18112
18113 static void
18114 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18115 {
18116 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18117 name, NULL_TREE);
18118
18119 set_builtin_decl (fncode, fndecl, true);
18120
18121 if (TARGET_DEBUG_BUILTIN)
18122 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18123
18124 return;
18125 }
18126
18127 /* Set up IEEE 128-bit floating point routines. Use different names if the
18128 arguments can be passed in a vector register. The historical PowerPC
18129 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18130 continue to use that if we aren't using vector registers to pass IEEE
18131 128-bit floating point. */
18132
18133 static void
18134 init_float128_ieee (machine_mode mode)
18135 {
18136 if (FLOAT128_VECTOR_P (mode))
18137 {
18138 static bool complex_muldiv_init_p = false;
18139
18140 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18141 we have clone or target attributes, this will be called a second
18142 time. We want to create the built-in function only once. */
18143 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18144 {
18145 complex_muldiv_init_p = true;
18146 built_in_function fncode_mul =
18147 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18148 - MIN_MODE_COMPLEX_FLOAT);
18149 built_in_function fncode_div =
18150 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18151 - MIN_MODE_COMPLEX_FLOAT);
18152
18153 tree fntype = build_function_type_list (complex_long_double_type_node,
18154 long_double_type_node,
18155 long_double_type_node,
18156 long_double_type_node,
18157 long_double_type_node,
18158 NULL_TREE);
18159
18160 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18161 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18162 }
18163
18164 set_optab_libfunc (add_optab, mode, "__addkf3");
18165 set_optab_libfunc (sub_optab, mode, "__subkf3");
18166 set_optab_libfunc (neg_optab, mode, "__negkf2");
18167 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18168 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18169 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18170 set_optab_libfunc (abs_optab, mode, "__abskf2");
18171 set_optab_libfunc (powi_optab, mode, "__powikf2");
18172
18173 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18174 set_optab_libfunc (ne_optab, mode, "__nekf2");
18175 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18176 set_optab_libfunc (ge_optab, mode, "__gekf2");
18177 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18178 set_optab_libfunc (le_optab, mode, "__lekf2");
18179 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18180
18181 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18182 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18183 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18184 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18185
18186 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18187 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18188 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18189
18190 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18191 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18192 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18193
18194 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18195 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18196 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18197 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18198 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18199 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18200
18201 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18202 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18203 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18204 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18205
18206 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18207 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18208 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18209 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18210
18211 if (TARGET_POWERPC64)
18212 {
18213 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18214 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18215 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18216 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18217 }
18218 }
18219
18220 else
18221 {
18222 set_optab_libfunc (add_optab, mode, "_q_add");
18223 set_optab_libfunc (sub_optab, mode, "_q_sub");
18224 set_optab_libfunc (neg_optab, mode, "_q_neg");
18225 set_optab_libfunc (smul_optab, mode, "_q_mul");
18226 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18227 if (TARGET_PPC_GPOPT)
18228 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18229
18230 set_optab_libfunc (eq_optab, mode, "_q_feq");
18231 set_optab_libfunc (ne_optab, mode, "_q_fne");
18232 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18233 set_optab_libfunc (ge_optab, mode, "_q_fge");
18234 set_optab_libfunc (lt_optab, mode, "_q_flt");
18235 set_optab_libfunc (le_optab, mode, "_q_fle");
18236
18237 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18238 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18239 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18240 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18241 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18242 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18243 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18244 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18245 }
18246 }
18247
18248 static void
18249 rs6000_init_libfuncs (void)
18250 {
18251 /* __float128 support. */
18252 if (TARGET_FLOAT128_TYPE)
18253 {
18254 init_float128_ibm (IFmode);
18255 init_float128_ieee (KFmode);
18256 }
18257
18258 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18259 if (TARGET_LONG_DOUBLE_128)
18260 {
18261 if (!TARGET_IEEEQUAD)
18262 init_float128_ibm (TFmode);
18263
18264 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18265 else
18266 init_float128_ieee (TFmode);
18267 }
18268 }
18269
18270 /* Emit a potentially record-form instruction, setting DST from SRC.
18271 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18272 signed comparison of DST with zero. If DOT is 1, the generated RTL
18273 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18274 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18275 a separate COMPARE. */
18276
18277 void
18278 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18279 {
18280 if (dot == 0)
18281 {
18282 emit_move_insn (dst, src);
18283 return;
18284 }
18285
18286 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18287 {
18288 emit_move_insn (dst, src);
18289 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18290 return;
18291 }
18292
18293 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18294 if (dot == 1)
18295 {
18296 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18297 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18298 }
18299 else
18300 {
18301 rtx set = gen_rtx_SET (dst, src);
18302 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18303 }
18304 }
18305
18306 \f
18307 /* A validation routine: say whether CODE, a condition code, and MODE
18308 match. The other alternatives either don't make sense or should
18309 never be generated. */
18310
18311 void
18312 validate_condition_mode (enum rtx_code code, machine_mode mode)
18313 {
18314 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18315 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18316 && GET_MODE_CLASS (mode) == MODE_CC);
18317
18318 /* These don't make sense. */
18319 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18320 || mode != CCUNSmode);
18321
18322 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18323 || mode == CCUNSmode);
18324
18325 gcc_assert (mode == CCFPmode
18326 || (code != ORDERED && code != UNORDERED
18327 && code != UNEQ && code != LTGT
18328 && code != UNGT && code != UNLT
18329 && code != UNGE && code != UNLE));
18330
18331 /* These should never be generated except for
18332 flag_finite_math_only. */
18333 gcc_assert (mode != CCFPmode
18334 || flag_finite_math_only
18335 || (code != LE && code != GE
18336 && code != UNEQ && code != LTGT
18337 && code != UNGT && code != UNLT));
18338
18339 /* These are invalid; the information is not there. */
18340 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18341 }
18342
18343 \f
18344 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18345 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18346 not zero, store there the bit offset (counted from the right) where
18347 the single stretch of 1 bits begins; and similarly for B, the bit
18348 offset where it ends. */
18349
18350 bool
18351 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18352 {
18353 unsigned HOST_WIDE_INT val = INTVAL (mask);
18354 unsigned HOST_WIDE_INT bit;
18355 int nb, ne;
18356 int n = GET_MODE_PRECISION (mode);
18357
18358 if (mode != DImode && mode != SImode)
18359 return false;
18360
18361 if (INTVAL (mask) >= 0)
18362 {
18363 bit = val & -val;
18364 ne = exact_log2 (bit);
18365 nb = exact_log2 (val + bit);
18366 }
18367 else if (val + 1 == 0)
18368 {
18369 nb = n;
18370 ne = 0;
18371 }
18372 else if (val & 1)
18373 {
18374 val = ~val;
18375 bit = val & -val;
18376 nb = exact_log2 (bit);
18377 ne = exact_log2 (val + bit);
18378 }
18379 else
18380 {
18381 bit = val & -val;
18382 ne = exact_log2 (bit);
18383 if (val + bit == 0)
18384 nb = n;
18385 else
18386 nb = 0;
18387 }
18388
18389 nb--;
18390
18391 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18392 return false;
18393
18394 if (b)
18395 *b = nb;
18396 if (e)
18397 *e = ne;
18398
18399 return true;
18400 }
18401
18402 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18403 or rldicr instruction, to implement an AND with it in mode MODE. */
18404
18405 bool
18406 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18407 {
18408 int nb, ne;
18409
18410 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18411 return false;
18412
18413 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18414 does not wrap. */
18415 if (mode == DImode)
18416 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18417
18418 /* For SImode, rlwinm can do everything. */
18419 if (mode == SImode)
18420 return (nb < 32 && ne < 32);
18421
18422 return false;
18423 }
18424
18425 /* Return the instruction template for an AND with mask in mode MODE, with
18426 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18427
18428 const char *
18429 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18430 {
18431 int nb, ne;
18432
18433 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18434 gcc_unreachable ();
18435
18436 if (mode == DImode && ne == 0)
18437 {
18438 operands[3] = GEN_INT (63 - nb);
18439 if (dot)
18440 return "rldicl. %0,%1,0,%3";
18441 return "rldicl %0,%1,0,%3";
18442 }
18443
18444 if (mode == DImode && nb == 63)
18445 {
18446 operands[3] = GEN_INT (63 - ne);
18447 if (dot)
18448 return "rldicr. %0,%1,0,%3";
18449 return "rldicr %0,%1,0,%3";
18450 }
18451
18452 if (nb < 32 && ne < 32)
18453 {
18454 operands[3] = GEN_INT (31 - nb);
18455 operands[4] = GEN_INT (31 - ne);
18456 if (dot)
18457 return "rlwinm. %0,%1,0,%3,%4";
18458 return "rlwinm %0,%1,0,%3,%4";
18459 }
18460
18461 gcc_unreachable ();
18462 }
18463
18464 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18465 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18466 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18467
18468 bool
18469 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18470 {
18471 int nb, ne;
18472
18473 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18474 return false;
18475
18476 int n = GET_MODE_PRECISION (mode);
18477 int sh = -1;
18478
18479 if (CONST_INT_P (XEXP (shift, 1)))
18480 {
18481 sh = INTVAL (XEXP (shift, 1));
18482 if (sh < 0 || sh >= n)
18483 return false;
18484 }
18485
18486 rtx_code code = GET_CODE (shift);
18487
18488 /* Convert any shift by 0 to a rotate, to simplify below code. */
18489 if (sh == 0)
18490 code = ROTATE;
18491
18492 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18493 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18494 code = ASHIFT;
18495 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18496 {
18497 code = LSHIFTRT;
18498 sh = n - sh;
18499 }
18500
18501 /* DImode rotates need rld*. */
18502 if (mode == DImode && code == ROTATE)
18503 return (nb == 63 || ne == 0 || ne == sh);
18504
18505 /* SImode rotates need rlw*. */
18506 if (mode == SImode && code == ROTATE)
18507 return (nb < 32 && ne < 32 && sh < 32);
18508
18509 /* Wrap-around masks are only okay for rotates. */
18510 if (ne > nb)
18511 return false;
18512
18513 /* Variable shifts are only okay for rotates. */
18514 if (sh < 0)
18515 return false;
18516
18517 /* Don't allow ASHIFT if the mask is wrong for that. */
18518 if (code == ASHIFT && ne < sh)
18519 return false;
18520
18521 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18522 if the mask is wrong for that. */
18523 if (nb < 32 && ne < 32 && sh < 32
18524 && !(code == LSHIFTRT && nb >= 32 - sh))
18525 return true;
18526
18527 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18528 if the mask is wrong for that. */
18529 if (code == LSHIFTRT)
18530 sh = 64 - sh;
18531 if (nb == 63 || ne == 0 || ne == sh)
18532 return !(code == LSHIFTRT && nb >= sh);
18533
18534 return false;
18535 }
18536
18537 /* Return the instruction template for a shift with mask in mode MODE, with
18538 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18539
18540 const char *
18541 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18542 {
18543 int nb, ne;
18544
18545 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18546 gcc_unreachable ();
18547
18548 if (mode == DImode && ne == 0)
18549 {
18550 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18551 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18552 operands[3] = GEN_INT (63 - nb);
18553 if (dot)
18554 return "rld%I2cl. %0,%1,%2,%3";
18555 return "rld%I2cl %0,%1,%2,%3";
18556 }
18557
18558 if (mode == DImode && nb == 63)
18559 {
18560 operands[3] = GEN_INT (63 - ne);
18561 if (dot)
18562 return "rld%I2cr. %0,%1,%2,%3";
18563 return "rld%I2cr %0,%1,%2,%3";
18564 }
18565
18566 if (mode == DImode
18567 && GET_CODE (operands[4]) != LSHIFTRT
18568 && CONST_INT_P (operands[2])
18569 && ne == INTVAL (operands[2]))
18570 {
18571 operands[3] = GEN_INT (63 - nb);
18572 if (dot)
18573 return "rld%I2c. %0,%1,%2,%3";
18574 return "rld%I2c %0,%1,%2,%3";
18575 }
18576
18577 if (nb < 32 && ne < 32)
18578 {
18579 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18580 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18581 operands[3] = GEN_INT (31 - nb);
18582 operands[4] = GEN_INT (31 - ne);
18583 /* This insn can also be a 64-bit rotate with mask that really makes
18584 it just a shift right (with mask); the %h below are to adjust for
18585 that situation (shift count is >= 32 in that case). */
18586 if (dot)
18587 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18588 return "rlw%I2nm %0,%1,%h2,%3,%4";
18589 }
18590
18591 gcc_unreachable ();
18592 }
18593
18594 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18595 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18596 ASHIFT, or LSHIFTRT) in mode MODE. */
18597
18598 bool
18599 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18600 {
18601 int nb, ne;
18602
18603 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18604 return false;
18605
18606 int n = GET_MODE_PRECISION (mode);
18607
18608 int sh = INTVAL (XEXP (shift, 1));
18609 if (sh < 0 || sh >= n)
18610 return false;
18611
18612 rtx_code code = GET_CODE (shift);
18613
18614 /* Convert any shift by 0 to a rotate, to simplify below code. */
18615 if (sh == 0)
18616 code = ROTATE;
18617
18618 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18619 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18620 code = ASHIFT;
18621 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18622 {
18623 code = LSHIFTRT;
18624 sh = n - sh;
18625 }
18626
18627 /* DImode rotates need rldimi. */
18628 if (mode == DImode && code == ROTATE)
18629 return (ne == sh);
18630
18631 /* SImode rotates need rlwimi. */
18632 if (mode == SImode && code == ROTATE)
18633 return (nb < 32 && ne < 32 && sh < 32);
18634
18635 /* Wrap-around masks are only okay for rotates. */
18636 if (ne > nb)
18637 return false;
18638
18639 /* Don't allow ASHIFT if the mask is wrong for that. */
18640 if (code == ASHIFT && ne < sh)
18641 return false;
18642
18643 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18644 if the mask is wrong for that. */
18645 if (nb < 32 && ne < 32 && sh < 32
18646 && !(code == LSHIFTRT && nb >= 32 - sh))
18647 return true;
18648
18649 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18650 if the mask is wrong for that. */
18651 if (code == LSHIFTRT)
18652 sh = 64 - sh;
18653 if (ne == sh)
18654 return !(code == LSHIFTRT && nb >= sh);
18655
18656 return false;
18657 }
18658
18659 /* Return the instruction template for an insert with mask in mode MODE, with
18660 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18661
18662 const char *
18663 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18664 {
18665 int nb, ne;
18666
18667 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18668 gcc_unreachable ();
18669
18670 /* Prefer rldimi because rlwimi is cracked. */
18671 if (TARGET_POWERPC64
18672 && (!dot || mode == DImode)
18673 && GET_CODE (operands[4]) != LSHIFTRT
18674 && ne == INTVAL (operands[2]))
18675 {
18676 operands[3] = GEN_INT (63 - nb);
18677 if (dot)
18678 return "rldimi. %0,%1,%2,%3";
18679 return "rldimi %0,%1,%2,%3";
18680 }
18681
18682 if (nb < 32 && ne < 32)
18683 {
18684 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18685 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18686 operands[3] = GEN_INT (31 - nb);
18687 operands[4] = GEN_INT (31 - ne);
18688 if (dot)
18689 return "rlwimi. %0,%1,%2,%3,%4";
18690 return "rlwimi %0,%1,%2,%3,%4";
18691 }
18692
18693 gcc_unreachable ();
18694 }
18695
18696 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18697 using two machine instructions. */
18698
18699 bool
18700 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18701 {
18702 /* There are two kinds of AND we can handle with two insns:
18703 1) those we can do with two rl* insn;
18704 2) ori[s];xori[s].
18705
18706 We do not handle that last case yet. */
18707
18708 /* If there is just one stretch of ones, we can do it. */
18709 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18710 return true;
18711
18712 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18713 one insn, we can do the whole thing with two. */
18714 unsigned HOST_WIDE_INT val = INTVAL (c);
18715 unsigned HOST_WIDE_INT bit1 = val & -val;
18716 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18717 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18718 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18719 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18720 }
18721
18722 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18723 If EXPAND is true, split rotate-and-mask instructions we generate to
18724 their constituent parts as well (this is used during expand); if DOT
18725 is 1, make the last insn a record-form instruction clobbering the
18726 destination GPR and setting the CC reg (from operands[3]); if 2, set
18727 that GPR as well as the CC reg. */
18728
18729 void
18730 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18731 {
18732 gcc_assert (!(expand && dot));
18733
18734 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18735
18736 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18737 shift right. This generates better code than doing the masks without
18738 shifts, or shifting first right and then left. */
18739 int nb, ne;
18740 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18741 {
18742 gcc_assert (mode == DImode);
18743
18744 int shift = 63 - nb;
18745 if (expand)
18746 {
18747 rtx tmp1 = gen_reg_rtx (DImode);
18748 rtx tmp2 = gen_reg_rtx (DImode);
18749 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18750 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18751 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18752 }
18753 else
18754 {
18755 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18756 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18757 emit_move_insn (operands[0], tmp);
18758 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18759 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18760 }
18761 return;
18762 }
18763
18764 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18765 that does the rest. */
18766 unsigned HOST_WIDE_INT bit1 = val & -val;
18767 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18768 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18769 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18770
18771 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18772 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18773
18774 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18775
18776 /* Two "no-rotate"-and-mask instructions, for SImode. */
18777 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18778 {
18779 gcc_assert (mode == SImode);
18780
18781 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18782 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18783 emit_move_insn (reg, tmp);
18784 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18785 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18786 return;
18787 }
18788
18789 gcc_assert (mode == DImode);
18790
18791 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18792 insns; we have to do the first in SImode, because it wraps. */
18793 if (mask2 <= 0xffffffff
18794 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18795 {
18796 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18797 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18798 GEN_INT (mask1));
18799 rtx reg_low = gen_lowpart (SImode, reg);
18800 emit_move_insn (reg_low, tmp);
18801 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18802 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18803 return;
18804 }
18805
18806 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18807 at the top end), rotate back and clear the other hole. */
18808 int right = exact_log2 (bit3);
18809 int left = 64 - right;
18810
18811 /* Rotate the mask too. */
18812 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18813
18814 if (expand)
18815 {
18816 rtx tmp1 = gen_reg_rtx (DImode);
18817 rtx tmp2 = gen_reg_rtx (DImode);
18818 rtx tmp3 = gen_reg_rtx (DImode);
18819 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18820 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18821 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18822 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18823 }
18824 else
18825 {
18826 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18827 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18828 emit_move_insn (operands[0], tmp);
18829 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18830 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18831 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18832 }
18833 }
18834 \f
18835 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18836 for lfq and stfq insns iff the registers are hard registers. */
18837
18838 int
18839 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18840 {
18841 /* We might have been passed a SUBREG. */
18842 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18843 return 0;
18844
18845 /* We might have been passed non floating point registers. */
18846 if (!FP_REGNO_P (REGNO (reg1))
18847 || !FP_REGNO_P (REGNO (reg2)))
18848 return 0;
18849
18850 return (REGNO (reg1) == REGNO (reg2) - 1);
18851 }
18852
18853 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18854 addr1 and addr2 must be in consecutive memory locations
18855 (addr2 == addr1 + 8). */
18856
18857 int
18858 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18859 {
18860 rtx addr1, addr2;
18861 unsigned int reg1, reg2;
18862 int offset1, offset2;
18863
18864 /* The mems cannot be volatile. */
18865 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18866 return 0;
18867
18868 addr1 = XEXP (mem1, 0);
18869 addr2 = XEXP (mem2, 0);
18870
18871 /* Extract an offset (if used) from the first addr. */
18872 if (GET_CODE (addr1) == PLUS)
18873 {
18874 /* If not a REG, return zero. */
18875 if (GET_CODE (XEXP (addr1, 0)) != REG)
18876 return 0;
18877 else
18878 {
18879 reg1 = REGNO (XEXP (addr1, 0));
18880 /* The offset must be constant! */
18881 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18882 return 0;
18883 offset1 = INTVAL (XEXP (addr1, 1));
18884 }
18885 }
18886 else if (GET_CODE (addr1) != REG)
18887 return 0;
18888 else
18889 {
18890 reg1 = REGNO (addr1);
18891 /* This was a simple (mem (reg)) expression. Offset is 0. */
18892 offset1 = 0;
18893 }
18894
18895 /* And now for the second addr. */
18896 if (GET_CODE (addr2) == PLUS)
18897 {
18898 /* If not a REG, return zero. */
18899 if (GET_CODE (XEXP (addr2, 0)) != REG)
18900 return 0;
18901 else
18902 {
18903 reg2 = REGNO (XEXP (addr2, 0));
18904 /* The offset must be constant. */
18905 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18906 return 0;
18907 offset2 = INTVAL (XEXP (addr2, 1));
18908 }
18909 }
18910 else if (GET_CODE (addr2) != REG)
18911 return 0;
18912 else
18913 {
18914 reg2 = REGNO (addr2);
18915 /* This was a simple (mem (reg)) expression. Offset is 0. */
18916 offset2 = 0;
18917 }
18918
18919 /* Both of these must have the same base register. */
18920 if (reg1 != reg2)
18921 return 0;
18922
18923 /* The offset for the second addr must be 8 more than the first addr. */
18924 if (offset2 != offset1 + 8)
18925 return 0;
18926
18927 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18928 instructions. */
18929 return 1;
18930 }
18931 \f
18932 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18933 need to use DDmode, in all other cases we can use the same mode. */
18934 static machine_mode
18935 rs6000_secondary_memory_needed_mode (machine_mode mode)
18936 {
18937 if (lra_in_progress && mode == SDmode)
18938 return DDmode;
18939 return mode;
18940 }
18941
18942 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18943 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18944 only work on the traditional altivec registers, note if an altivec register
18945 was chosen. */
18946
18947 static enum rs6000_reg_type
18948 register_to_reg_type (rtx reg, bool *is_altivec)
18949 {
18950 HOST_WIDE_INT regno;
18951 enum reg_class rclass;
18952
18953 if (GET_CODE (reg) == SUBREG)
18954 reg = SUBREG_REG (reg);
18955
18956 if (!REG_P (reg))
18957 return NO_REG_TYPE;
18958
18959 regno = REGNO (reg);
18960 if (regno >= FIRST_PSEUDO_REGISTER)
18961 {
18962 if (!lra_in_progress && !reload_completed)
18963 return PSEUDO_REG_TYPE;
18964
18965 regno = true_regnum (reg);
18966 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18967 return PSEUDO_REG_TYPE;
18968 }
18969
18970 gcc_assert (regno >= 0);
18971
18972 if (is_altivec && ALTIVEC_REGNO_P (regno))
18973 *is_altivec = true;
18974
18975 rclass = rs6000_regno_regclass[regno];
18976 return reg_class_to_reg_type[(int)rclass];
18977 }
18978
18979 /* Helper function to return the cost of adding a TOC entry address. */
18980
18981 static inline int
18982 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18983 {
18984 int ret;
18985
18986 if (TARGET_CMODEL != CMODEL_SMALL)
18987 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18988
18989 else
18990 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18991
18992 return ret;
18993 }
18994
18995 /* Helper function for rs6000_secondary_reload to determine whether the memory
18996 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18997 needs reloading. Return negative if the memory is not handled by the memory
18998 helper functions and to try a different reload method, 0 if no additional
18999 instructions are need, and positive to give the extra cost for the
19000 memory. */
19001
19002 static int
19003 rs6000_secondary_reload_memory (rtx addr,
19004 enum reg_class rclass,
19005 machine_mode mode)
19006 {
19007 int extra_cost = 0;
19008 rtx reg, and_arg, plus_arg0, plus_arg1;
19009 addr_mask_type addr_mask;
19010 const char *type = NULL;
19011 const char *fail_msg = NULL;
19012
19013 if (GPR_REG_CLASS_P (rclass))
19014 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19015
19016 else if (rclass == FLOAT_REGS)
19017 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19018
19019 else if (rclass == ALTIVEC_REGS)
19020 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19021
19022 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19023 else if (rclass == VSX_REGS)
19024 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19025 & ~RELOAD_REG_AND_M16);
19026
19027 /* If the register allocator hasn't made up its mind yet on the register
19028 class to use, settle on defaults to use. */
19029 else if (rclass == NO_REGS)
19030 {
19031 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19032 & ~RELOAD_REG_AND_M16);
19033
19034 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19035 addr_mask &= ~(RELOAD_REG_INDEXED
19036 | RELOAD_REG_PRE_INCDEC
19037 | RELOAD_REG_PRE_MODIFY);
19038 }
19039
19040 else
19041 addr_mask = 0;
19042
19043 /* If the register isn't valid in this register class, just return now. */
19044 if ((addr_mask & RELOAD_REG_VALID) == 0)
19045 {
19046 if (TARGET_DEBUG_ADDR)
19047 {
19048 fprintf (stderr,
19049 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19050 "not valid in class\n",
19051 GET_MODE_NAME (mode), reg_class_names[rclass]);
19052 debug_rtx (addr);
19053 }
19054
19055 return -1;
19056 }
19057
19058 switch (GET_CODE (addr))
19059 {
19060 /* Does the register class supports auto update forms for this mode? We
19061 don't need a scratch register, since the powerpc only supports
19062 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19063 case PRE_INC:
19064 case PRE_DEC:
19065 reg = XEXP (addr, 0);
19066 if (!base_reg_operand (addr, GET_MODE (reg)))
19067 {
19068 fail_msg = "no base register #1";
19069 extra_cost = -1;
19070 }
19071
19072 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19073 {
19074 extra_cost = 1;
19075 type = "update";
19076 }
19077 break;
19078
19079 case PRE_MODIFY:
19080 reg = XEXP (addr, 0);
19081 plus_arg1 = XEXP (addr, 1);
19082 if (!base_reg_operand (reg, GET_MODE (reg))
19083 || GET_CODE (plus_arg1) != PLUS
19084 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19085 {
19086 fail_msg = "bad PRE_MODIFY";
19087 extra_cost = -1;
19088 }
19089
19090 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19091 {
19092 extra_cost = 1;
19093 type = "update";
19094 }
19095 break;
19096
19097 /* Do we need to simulate AND -16 to clear the bottom address bits used
19098 in VMX load/stores? Only allow the AND for vector sizes. */
19099 case AND:
19100 and_arg = XEXP (addr, 0);
19101 if (GET_MODE_SIZE (mode) != 16
19102 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19103 || INTVAL (XEXP (addr, 1)) != -16)
19104 {
19105 fail_msg = "bad Altivec AND #1";
19106 extra_cost = -1;
19107 }
19108
19109 if (rclass != ALTIVEC_REGS)
19110 {
19111 if (legitimate_indirect_address_p (and_arg, false))
19112 extra_cost = 1;
19113
19114 else if (legitimate_indexed_address_p (and_arg, false))
19115 extra_cost = 2;
19116
19117 else
19118 {
19119 fail_msg = "bad Altivec AND #2";
19120 extra_cost = -1;
19121 }
19122
19123 type = "and";
19124 }
19125 break;
19126
19127 /* If this is an indirect address, make sure it is a base register. */
19128 case REG:
19129 case SUBREG:
19130 if (!legitimate_indirect_address_p (addr, false))
19131 {
19132 extra_cost = 1;
19133 type = "move";
19134 }
19135 break;
19136
19137 /* If this is an indexed address, make sure the register class can handle
19138 indexed addresses for this mode. */
19139 case PLUS:
19140 plus_arg0 = XEXP (addr, 0);
19141 plus_arg1 = XEXP (addr, 1);
19142
19143 /* (plus (plus (reg) (constant)) (constant)) is generated during
19144 push_reload processing, so handle it now. */
19145 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19146 {
19147 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19148 {
19149 extra_cost = 1;
19150 type = "offset";
19151 }
19152 }
19153
19154 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19155 push_reload processing, so handle it now. */
19156 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19157 {
19158 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19159 {
19160 extra_cost = 1;
19161 type = "indexed #2";
19162 }
19163 }
19164
19165 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19166 {
19167 fail_msg = "no base register #2";
19168 extra_cost = -1;
19169 }
19170
19171 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19172 {
19173 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19174 || !legitimate_indexed_address_p (addr, false))
19175 {
19176 extra_cost = 1;
19177 type = "indexed";
19178 }
19179 }
19180
19181 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19182 && CONST_INT_P (plus_arg1))
19183 {
19184 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19185 {
19186 extra_cost = 1;
19187 type = "vector d-form offset";
19188 }
19189 }
19190
19191 /* Make sure the register class can handle offset addresses. */
19192 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19193 {
19194 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19195 {
19196 extra_cost = 1;
19197 type = "offset #2";
19198 }
19199 }
19200
19201 else
19202 {
19203 fail_msg = "bad PLUS";
19204 extra_cost = -1;
19205 }
19206
19207 break;
19208
19209 case LO_SUM:
19210 /* Quad offsets are restricted and can't handle normal addresses. */
19211 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19212 {
19213 extra_cost = -1;
19214 type = "vector d-form lo_sum";
19215 }
19216
19217 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19218 {
19219 fail_msg = "bad LO_SUM";
19220 extra_cost = -1;
19221 }
19222
19223 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19224 {
19225 extra_cost = 1;
19226 type = "lo_sum";
19227 }
19228 break;
19229
19230 /* Static addresses need to create a TOC entry. */
19231 case CONST:
19232 case SYMBOL_REF:
19233 case LABEL_REF:
19234 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19235 {
19236 extra_cost = -1;
19237 type = "vector d-form lo_sum #2";
19238 }
19239
19240 else
19241 {
19242 type = "address";
19243 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19244 }
19245 break;
19246
19247 /* TOC references look like offsetable memory. */
19248 case UNSPEC:
19249 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19250 {
19251 fail_msg = "bad UNSPEC";
19252 extra_cost = -1;
19253 }
19254
19255 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19256 {
19257 extra_cost = -1;
19258 type = "vector d-form lo_sum #3";
19259 }
19260
19261 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19262 {
19263 extra_cost = 1;
19264 type = "toc reference";
19265 }
19266 break;
19267
19268 default:
19269 {
19270 fail_msg = "bad address";
19271 extra_cost = -1;
19272 }
19273 }
19274
19275 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19276 {
19277 if (extra_cost < 0)
19278 fprintf (stderr,
19279 "rs6000_secondary_reload_memory error: mode = %s, "
19280 "class = %s, addr_mask = '%s', %s\n",
19281 GET_MODE_NAME (mode),
19282 reg_class_names[rclass],
19283 rs6000_debug_addr_mask (addr_mask, false),
19284 (fail_msg != NULL) ? fail_msg : "<bad address>");
19285
19286 else
19287 fprintf (stderr,
19288 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19289 "addr_mask = '%s', extra cost = %d, %s\n",
19290 GET_MODE_NAME (mode),
19291 reg_class_names[rclass],
19292 rs6000_debug_addr_mask (addr_mask, false),
19293 extra_cost,
19294 (type) ? type : "<none>");
19295
19296 debug_rtx (addr);
19297 }
19298
19299 return extra_cost;
19300 }
19301
19302 /* Helper function for rs6000_secondary_reload to return true if a move to a
19303 different register classe is really a simple move. */
19304
19305 static bool
19306 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19307 enum rs6000_reg_type from_type,
19308 machine_mode mode)
19309 {
19310 int size = GET_MODE_SIZE (mode);
19311
19312 /* Add support for various direct moves available. In this function, we only
19313 look at cases where we don't need any extra registers, and one or more
19314 simple move insns are issued. Originally small integers are not allowed
19315 in FPR/VSX registers. Single precision binary floating is not a simple
19316 move because we need to convert to the single precision memory layout.
19317 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19318 need special direct move handling, which we do not support yet. */
19319 if (TARGET_DIRECT_MOVE
19320 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19321 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19322 {
19323 if (TARGET_POWERPC64)
19324 {
19325 /* ISA 2.07: MTVSRD or MVFVSRD. */
19326 if (size == 8)
19327 return true;
19328
19329 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19330 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19331 return true;
19332 }
19333
19334 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19335 if (TARGET_P8_VECTOR)
19336 {
19337 if (mode == SImode)
19338 return true;
19339
19340 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19341 return true;
19342 }
19343
19344 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19345 if (mode == SDmode)
19346 return true;
19347 }
19348
19349 /* Power6+: MFTGPR or MFFGPR. */
19350 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19351 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19352 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19353 return true;
19354
19355 /* Move to/from SPR. */
19356 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19357 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19358 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19359 return true;
19360
19361 return false;
19362 }
19363
19364 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19365 special direct moves that involve allocating an extra register, return the
19366 insn code of the helper function if there is such a function or
19367 CODE_FOR_nothing if not. */
19368
19369 static bool
19370 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19371 enum rs6000_reg_type from_type,
19372 machine_mode mode,
19373 secondary_reload_info *sri,
19374 bool altivec_p)
19375 {
19376 bool ret = false;
19377 enum insn_code icode = CODE_FOR_nothing;
19378 int cost = 0;
19379 int size = GET_MODE_SIZE (mode);
19380
19381 if (TARGET_POWERPC64 && size == 16)
19382 {
19383 /* Handle moving 128-bit values from GPRs to VSX point registers on
19384 ISA 2.07 (power8, power9) when running in 64-bit mode using
19385 XXPERMDI to glue the two 64-bit values back together. */
19386 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19387 {
19388 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19389 icode = reg_addr[mode].reload_vsx_gpr;
19390 }
19391
19392 /* Handle moving 128-bit values from VSX point registers to GPRs on
19393 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19394 bottom 64-bit value. */
19395 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19396 {
19397 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19398 icode = reg_addr[mode].reload_gpr_vsx;
19399 }
19400 }
19401
19402 else if (TARGET_POWERPC64 && mode == SFmode)
19403 {
19404 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19405 {
19406 cost = 3; /* xscvdpspn, mfvsrd, and. */
19407 icode = reg_addr[mode].reload_gpr_vsx;
19408 }
19409
19410 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19411 {
19412 cost = 2; /* mtvsrz, xscvspdpn. */
19413 icode = reg_addr[mode].reload_vsx_gpr;
19414 }
19415 }
19416
19417 else if (!TARGET_POWERPC64 && size == 8)
19418 {
19419 /* Handle moving 64-bit values from GPRs to floating point registers on
19420 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19421 32-bit values back together. Altivec register classes must be handled
19422 specially since a different instruction is used, and the secondary
19423 reload support requires a single instruction class in the scratch
19424 register constraint. However, right now TFmode is not allowed in
19425 Altivec registers, so the pattern will never match. */
19426 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19427 {
19428 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19429 icode = reg_addr[mode].reload_fpr_gpr;
19430 }
19431 }
19432
19433 if (icode != CODE_FOR_nothing)
19434 {
19435 ret = true;
19436 if (sri)
19437 {
19438 sri->icode = icode;
19439 sri->extra_cost = cost;
19440 }
19441 }
19442
19443 return ret;
19444 }
19445
19446 /* Return whether a move between two register classes can be done either
19447 directly (simple move) or via a pattern that uses a single extra temporary
19448 (using ISA 2.07's direct move in this case. */
19449
19450 static bool
19451 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19452 enum rs6000_reg_type from_type,
19453 machine_mode mode,
19454 secondary_reload_info *sri,
19455 bool altivec_p)
19456 {
19457 /* Fall back to load/store reloads if either type is not a register. */
19458 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19459 return false;
19460
19461 /* If we haven't allocated registers yet, assume the move can be done for the
19462 standard register types. */
19463 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19464 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19465 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19466 return true;
19467
19468 /* Moves to the same set of registers is a simple move for non-specialized
19469 registers. */
19470 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19471 return true;
19472
19473 /* Check whether a simple move can be done directly. */
19474 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19475 {
19476 if (sri)
19477 {
19478 sri->icode = CODE_FOR_nothing;
19479 sri->extra_cost = 0;
19480 }
19481 return true;
19482 }
19483
19484 /* Now check if we can do it in a few steps. */
19485 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19486 altivec_p);
19487 }
19488
19489 /* Inform reload about cases where moving X with a mode MODE to a register in
19490 RCLASS requires an extra scratch or immediate register. Return the class
19491 needed for the immediate register.
19492
19493 For VSX and Altivec, we may need a register to convert sp+offset into
19494 reg+sp.
19495
19496 For misaligned 64-bit gpr loads and stores we need a register to
19497 convert an offset address to indirect. */
19498
19499 static reg_class_t
19500 rs6000_secondary_reload (bool in_p,
19501 rtx x,
19502 reg_class_t rclass_i,
19503 machine_mode mode,
19504 secondary_reload_info *sri)
19505 {
19506 enum reg_class rclass = (enum reg_class) rclass_i;
19507 reg_class_t ret = ALL_REGS;
19508 enum insn_code icode;
19509 bool default_p = false;
19510 bool done_p = false;
19511
19512 /* Allow subreg of memory before/during reload. */
19513 bool memory_p = (MEM_P (x)
19514 || (!reload_completed && GET_CODE (x) == SUBREG
19515 && MEM_P (SUBREG_REG (x))));
19516
19517 sri->icode = CODE_FOR_nothing;
19518 sri->t_icode = CODE_FOR_nothing;
19519 sri->extra_cost = 0;
19520 icode = ((in_p)
19521 ? reg_addr[mode].reload_load
19522 : reg_addr[mode].reload_store);
19523
19524 if (REG_P (x) || register_operand (x, mode))
19525 {
19526 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19527 bool altivec_p = (rclass == ALTIVEC_REGS);
19528 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19529
19530 if (!in_p)
19531 std::swap (to_type, from_type);
19532
19533 /* Can we do a direct move of some sort? */
19534 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19535 altivec_p))
19536 {
19537 icode = (enum insn_code)sri->icode;
19538 default_p = false;
19539 done_p = true;
19540 ret = NO_REGS;
19541 }
19542 }
19543
19544 /* Make sure 0.0 is not reloaded or forced into memory. */
19545 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19546 {
19547 ret = NO_REGS;
19548 default_p = false;
19549 done_p = true;
19550 }
19551
19552 /* If this is a scalar floating point value and we want to load it into the
19553 traditional Altivec registers, do it via a move via a traditional floating
19554 point register, unless we have D-form addressing. Also make sure that
19555 non-zero constants use a FPR. */
19556 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19557 && !mode_supports_vmx_dform (mode)
19558 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19559 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19560 {
19561 ret = FLOAT_REGS;
19562 default_p = false;
19563 done_p = true;
19564 }
19565
19566 /* Handle reload of load/stores if we have reload helper functions. */
19567 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19568 {
19569 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19570 mode);
19571
19572 if (extra_cost >= 0)
19573 {
19574 done_p = true;
19575 ret = NO_REGS;
19576 if (extra_cost > 0)
19577 {
19578 sri->extra_cost = extra_cost;
19579 sri->icode = icode;
19580 }
19581 }
19582 }
19583
19584 /* Handle unaligned loads and stores of integer registers. */
19585 if (!done_p && TARGET_POWERPC64
19586 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19587 && memory_p
19588 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19589 {
19590 rtx addr = XEXP (x, 0);
19591 rtx off = address_offset (addr);
19592
19593 if (off != NULL_RTX)
19594 {
19595 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19596 unsigned HOST_WIDE_INT offset = INTVAL (off);
19597
19598 /* We need a secondary reload when our legitimate_address_p
19599 says the address is good (as otherwise the entire address
19600 will be reloaded), and the offset is not a multiple of
19601 four or we have an address wrap. Address wrap will only
19602 occur for LO_SUMs since legitimate_offset_address_p
19603 rejects addresses for 16-byte mems that will wrap. */
19604 if (GET_CODE (addr) == LO_SUM
19605 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19606 && ((offset & 3) != 0
19607 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19608 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19609 && (offset & 3) != 0))
19610 {
19611 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19612 if (in_p)
19613 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19614 : CODE_FOR_reload_di_load);
19615 else
19616 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19617 : CODE_FOR_reload_di_store);
19618 sri->extra_cost = 2;
19619 ret = NO_REGS;
19620 done_p = true;
19621 }
19622 else
19623 default_p = true;
19624 }
19625 else
19626 default_p = true;
19627 }
19628
19629 if (!done_p && !TARGET_POWERPC64
19630 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19631 && memory_p
19632 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19633 {
19634 rtx addr = XEXP (x, 0);
19635 rtx off = address_offset (addr);
19636
19637 if (off != NULL_RTX)
19638 {
19639 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19640 unsigned HOST_WIDE_INT offset = INTVAL (off);
19641
19642 /* We need a secondary reload when our legitimate_address_p
19643 says the address is good (as otherwise the entire address
19644 will be reloaded), and we have a wrap.
19645
19646 legitimate_lo_sum_address_p allows LO_SUM addresses to
19647 have any offset so test for wrap in the low 16 bits.
19648
19649 legitimate_offset_address_p checks for the range
19650 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19651 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19652 [0x7ff4,0x7fff] respectively, so test for the
19653 intersection of these ranges, [0x7ffc,0x7fff] and
19654 [0x7ff4,0x7ff7] respectively.
19655
19656 Note that the address we see here may have been
19657 manipulated by legitimize_reload_address. */
19658 if (GET_CODE (addr) == LO_SUM
19659 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19660 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19661 {
19662 if (in_p)
19663 sri->icode = CODE_FOR_reload_si_load;
19664 else
19665 sri->icode = CODE_FOR_reload_si_store;
19666 sri->extra_cost = 2;
19667 ret = NO_REGS;
19668 done_p = true;
19669 }
19670 else
19671 default_p = true;
19672 }
19673 else
19674 default_p = true;
19675 }
19676
19677 if (!done_p)
19678 default_p = true;
19679
19680 if (default_p)
19681 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19682
19683 gcc_assert (ret != ALL_REGS);
19684
19685 if (TARGET_DEBUG_ADDR)
19686 {
19687 fprintf (stderr,
19688 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19689 "mode = %s",
19690 reg_class_names[ret],
19691 in_p ? "true" : "false",
19692 reg_class_names[rclass],
19693 GET_MODE_NAME (mode));
19694
19695 if (reload_completed)
19696 fputs (", after reload", stderr);
19697
19698 if (!done_p)
19699 fputs (", done_p not set", stderr);
19700
19701 if (default_p)
19702 fputs (", default secondary reload", stderr);
19703
19704 if (sri->icode != CODE_FOR_nothing)
19705 fprintf (stderr, ", reload func = %s, extra cost = %d",
19706 insn_data[sri->icode].name, sri->extra_cost);
19707
19708 else if (sri->extra_cost > 0)
19709 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19710
19711 fputs ("\n", stderr);
19712 debug_rtx (x);
19713 }
19714
19715 return ret;
19716 }
19717
19718 /* Better tracing for rs6000_secondary_reload_inner. */
19719
19720 static void
19721 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19722 bool store_p)
19723 {
19724 rtx set, clobber;
19725
19726 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19727
19728 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19729 store_p ? "store" : "load");
19730
19731 if (store_p)
19732 set = gen_rtx_SET (mem, reg);
19733 else
19734 set = gen_rtx_SET (reg, mem);
19735
19736 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19737 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19738 }
19739
19740 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19741 ATTRIBUTE_NORETURN;
19742
19743 static void
19744 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19745 bool store_p)
19746 {
19747 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19748 gcc_unreachable ();
19749 }
19750
19751 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19752 reload helper functions. These were identified in
19753 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19754 reload, it calls the insns:
19755 reload_<RELOAD:mode>_<P:mptrsize>_store
19756 reload_<RELOAD:mode>_<P:mptrsize>_load
19757
19758 which in turn calls this function, to do whatever is necessary to create
19759 valid addresses. */
19760
19761 void
19762 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19763 {
19764 int regno = true_regnum (reg);
19765 machine_mode mode = GET_MODE (reg);
19766 addr_mask_type addr_mask;
19767 rtx addr;
19768 rtx new_addr;
19769 rtx op_reg, op0, op1;
19770 rtx and_op;
19771 rtx cc_clobber;
19772 rtvec rv;
19773
19774 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19775 || !base_reg_operand (scratch, GET_MODE (scratch)))
19776 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19777
19778 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19779 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19780
19781 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19782 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19783
19784 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19785 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19786
19787 else
19788 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19789
19790 /* Make sure the mode is valid in this register class. */
19791 if ((addr_mask & RELOAD_REG_VALID) == 0)
19792 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19793
19794 if (TARGET_DEBUG_ADDR)
19795 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19796
19797 new_addr = addr = XEXP (mem, 0);
19798 switch (GET_CODE (addr))
19799 {
19800 /* Does the register class support auto update forms for this mode? If
19801 not, do the update now. We don't need a scratch register, since the
19802 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19803 case PRE_INC:
19804 case PRE_DEC:
19805 op_reg = XEXP (addr, 0);
19806 if (!base_reg_operand (op_reg, Pmode))
19807 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19808
19809 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19810 {
19811 int delta = GET_MODE_SIZE (mode);
19812 if (GET_CODE (addr) == PRE_DEC)
19813 delta = -delta;
19814 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19815 new_addr = op_reg;
19816 }
19817 break;
19818
19819 case PRE_MODIFY:
19820 op0 = XEXP (addr, 0);
19821 op1 = XEXP (addr, 1);
19822 if (!base_reg_operand (op0, Pmode)
19823 || GET_CODE (op1) != PLUS
19824 || !rtx_equal_p (op0, XEXP (op1, 0)))
19825 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19826
19827 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19828 {
19829 emit_insn (gen_rtx_SET (op0, op1));
19830 new_addr = reg;
19831 }
19832 break;
19833
19834 /* Do we need to simulate AND -16 to clear the bottom address bits used
19835 in VMX load/stores? */
19836 case AND:
19837 op0 = XEXP (addr, 0);
19838 op1 = XEXP (addr, 1);
19839 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19840 {
19841 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19842 op_reg = op0;
19843
19844 else if (GET_CODE (op1) == PLUS)
19845 {
19846 emit_insn (gen_rtx_SET (scratch, op1));
19847 op_reg = scratch;
19848 }
19849
19850 else
19851 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19852
19853 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19854 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19855 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19856 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19857 new_addr = scratch;
19858 }
19859 break;
19860
19861 /* If this is an indirect address, make sure it is a base register. */
19862 case REG:
19863 case SUBREG:
19864 if (!base_reg_operand (addr, GET_MODE (addr)))
19865 {
19866 emit_insn (gen_rtx_SET (scratch, addr));
19867 new_addr = scratch;
19868 }
19869 break;
19870
19871 /* If this is an indexed address, make sure the register class can handle
19872 indexed addresses for this mode. */
19873 case PLUS:
19874 op0 = XEXP (addr, 0);
19875 op1 = XEXP (addr, 1);
19876 if (!base_reg_operand (op0, Pmode))
19877 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19878
19879 else if (int_reg_operand (op1, Pmode))
19880 {
19881 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19882 {
19883 emit_insn (gen_rtx_SET (scratch, addr));
19884 new_addr = scratch;
19885 }
19886 }
19887
19888 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19889 {
19890 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19891 || !quad_address_p (addr, mode, false))
19892 {
19893 emit_insn (gen_rtx_SET (scratch, addr));
19894 new_addr = scratch;
19895 }
19896 }
19897
19898 /* Make sure the register class can handle offset addresses. */
19899 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19900 {
19901 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19902 {
19903 emit_insn (gen_rtx_SET (scratch, addr));
19904 new_addr = scratch;
19905 }
19906 }
19907
19908 else
19909 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19910
19911 break;
19912
19913 case LO_SUM:
19914 op0 = XEXP (addr, 0);
19915 op1 = XEXP (addr, 1);
19916 if (!base_reg_operand (op0, Pmode))
19917 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19918
19919 else if (int_reg_operand (op1, Pmode))
19920 {
19921 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19922 {
19923 emit_insn (gen_rtx_SET (scratch, addr));
19924 new_addr = scratch;
19925 }
19926 }
19927
19928 /* Quad offsets are restricted and can't handle normal addresses. */
19929 else if (mode_supports_dq_form (mode))
19930 {
19931 emit_insn (gen_rtx_SET (scratch, addr));
19932 new_addr = scratch;
19933 }
19934
19935 /* Make sure the register class can handle offset addresses. */
19936 else if (legitimate_lo_sum_address_p (mode, addr, false))
19937 {
19938 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19939 {
19940 emit_insn (gen_rtx_SET (scratch, addr));
19941 new_addr = scratch;
19942 }
19943 }
19944
19945 else
19946 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19947
19948 break;
19949
19950 case SYMBOL_REF:
19951 case CONST:
19952 case LABEL_REF:
19953 rs6000_emit_move (scratch, addr, Pmode);
19954 new_addr = scratch;
19955 break;
19956
19957 default:
19958 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19959 }
19960
19961 /* Adjust the address if it changed. */
19962 if (addr != new_addr)
19963 {
19964 mem = replace_equiv_address_nv (mem, new_addr);
19965 if (TARGET_DEBUG_ADDR)
19966 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19967 }
19968
19969 /* Now create the move. */
19970 if (store_p)
19971 emit_insn (gen_rtx_SET (mem, reg));
19972 else
19973 emit_insn (gen_rtx_SET (reg, mem));
19974
19975 return;
19976 }
19977
19978 /* Convert reloads involving 64-bit gprs and misaligned offset
19979 addressing, or multiple 32-bit gprs and offsets that are too large,
19980 to use indirect addressing. */
19981
19982 void
19983 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19984 {
19985 int regno = true_regnum (reg);
19986 enum reg_class rclass;
19987 rtx addr;
19988 rtx scratch_or_premodify = scratch;
19989
19990 if (TARGET_DEBUG_ADDR)
19991 {
19992 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19993 store_p ? "store" : "load");
19994 fprintf (stderr, "reg:\n");
19995 debug_rtx (reg);
19996 fprintf (stderr, "mem:\n");
19997 debug_rtx (mem);
19998 fprintf (stderr, "scratch:\n");
19999 debug_rtx (scratch);
20000 }
20001
20002 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20003 gcc_assert (GET_CODE (mem) == MEM);
20004 rclass = REGNO_REG_CLASS (regno);
20005 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20006 addr = XEXP (mem, 0);
20007
20008 if (GET_CODE (addr) == PRE_MODIFY)
20009 {
20010 gcc_assert (REG_P (XEXP (addr, 0))
20011 && GET_CODE (XEXP (addr, 1)) == PLUS
20012 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20013 scratch_or_premodify = XEXP (addr, 0);
20014 addr = XEXP (addr, 1);
20015 }
20016 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20017
20018 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20019
20020 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20021
20022 /* Now create the move. */
20023 if (store_p)
20024 emit_insn (gen_rtx_SET (mem, reg));
20025 else
20026 emit_insn (gen_rtx_SET (reg, mem));
20027
20028 return;
20029 }
20030
20031 /* Given an rtx X being reloaded into a reg required to be
20032 in class CLASS, return the class of reg to actually use.
20033 In general this is just CLASS; but on some machines
20034 in some cases it is preferable to use a more restrictive class.
20035
20036 On the RS/6000, we have to return NO_REGS when we want to reload a
20037 floating-point CONST_DOUBLE to force it to be copied to memory.
20038
20039 We also don't want to reload integer values into floating-point
20040 registers if we can at all help it. In fact, this can
20041 cause reload to die, if it tries to generate a reload of CTR
20042 into a FP register and discovers it doesn't have the memory location
20043 required.
20044
20045 ??? Would it be a good idea to have reload do the converse, that is
20046 try to reload floating modes into FP registers if possible?
20047 */
20048
20049 static enum reg_class
20050 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20051 {
20052 machine_mode mode = GET_MODE (x);
20053 bool is_constant = CONSTANT_P (x);
20054
20055 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20056 reload class for it. */
20057 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20058 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20059 return NO_REGS;
20060
20061 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20062 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20063 return NO_REGS;
20064
20065 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20066 the reloading of address expressions using PLUS into floating point
20067 registers. */
20068 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20069 {
20070 if (is_constant)
20071 {
20072 /* Zero is always allowed in all VSX registers. */
20073 if (x == CONST0_RTX (mode))
20074 return rclass;
20075
20076 /* If this is a vector constant that can be formed with a few Altivec
20077 instructions, we want altivec registers. */
20078 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20079 return ALTIVEC_REGS;
20080
20081 /* If this is an integer constant that can easily be loaded into
20082 vector registers, allow it. */
20083 if (CONST_INT_P (x))
20084 {
20085 HOST_WIDE_INT value = INTVAL (x);
20086
20087 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20088 2.06 can generate it in the Altivec registers with
20089 VSPLTI<x>. */
20090 if (value == -1)
20091 {
20092 if (TARGET_P8_VECTOR)
20093 return rclass;
20094 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20095 return ALTIVEC_REGS;
20096 else
20097 return NO_REGS;
20098 }
20099
20100 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20101 a sign extend in the Altivec registers. */
20102 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20103 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20104 return ALTIVEC_REGS;
20105 }
20106
20107 /* Force constant to memory. */
20108 return NO_REGS;
20109 }
20110
20111 /* D-form addressing can easily reload the value. */
20112 if (mode_supports_vmx_dform (mode)
20113 || mode_supports_dq_form (mode))
20114 return rclass;
20115
20116 /* If this is a scalar floating point value and we don't have D-form
20117 addressing, prefer the traditional floating point registers so that we
20118 can use D-form (register+offset) addressing. */
20119 if (rclass == VSX_REGS
20120 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20121 return FLOAT_REGS;
20122
20123 /* Prefer the Altivec registers if Altivec is handling the vector
20124 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20125 loads. */
20126 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20127 || mode == V1TImode)
20128 return ALTIVEC_REGS;
20129
20130 return rclass;
20131 }
20132
20133 if (is_constant || GET_CODE (x) == PLUS)
20134 {
20135 if (reg_class_subset_p (GENERAL_REGS, rclass))
20136 return GENERAL_REGS;
20137 if (reg_class_subset_p (BASE_REGS, rclass))
20138 return BASE_REGS;
20139 return NO_REGS;
20140 }
20141
20142 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20143 return GENERAL_REGS;
20144
20145 return rclass;
20146 }
20147
20148 /* Debug version of rs6000_preferred_reload_class. */
20149 static enum reg_class
20150 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20151 {
20152 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20153
20154 fprintf (stderr,
20155 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20156 "mode = %s, x:\n",
20157 reg_class_names[ret], reg_class_names[rclass],
20158 GET_MODE_NAME (GET_MODE (x)));
20159 debug_rtx (x);
20160
20161 return ret;
20162 }
20163
20164 /* If we are copying between FP or AltiVec registers and anything else, we need
20165 a memory location. The exception is when we are targeting ppc64 and the
20166 move to/from fpr to gpr instructions are available. Also, under VSX, you
20167 can copy vector registers from the FP register set to the Altivec register
20168 set and vice versa. */
20169
20170 static bool
20171 rs6000_secondary_memory_needed (machine_mode mode,
20172 reg_class_t from_class,
20173 reg_class_t to_class)
20174 {
20175 enum rs6000_reg_type from_type, to_type;
20176 bool altivec_p = ((from_class == ALTIVEC_REGS)
20177 || (to_class == ALTIVEC_REGS));
20178
20179 /* If a simple/direct move is available, we don't need secondary memory */
20180 from_type = reg_class_to_reg_type[(int)from_class];
20181 to_type = reg_class_to_reg_type[(int)to_class];
20182
20183 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20184 (secondary_reload_info *)0, altivec_p))
20185 return false;
20186
20187 /* If we have a floating point or vector register class, we need to use
20188 memory to transfer the data. */
20189 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20190 return true;
20191
20192 return false;
20193 }
20194
20195 /* Debug version of rs6000_secondary_memory_needed. */
20196 static bool
20197 rs6000_debug_secondary_memory_needed (machine_mode mode,
20198 reg_class_t from_class,
20199 reg_class_t to_class)
20200 {
20201 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20202
20203 fprintf (stderr,
20204 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20205 "to_class = %s, mode = %s\n",
20206 ret ? "true" : "false",
20207 reg_class_names[from_class],
20208 reg_class_names[to_class],
20209 GET_MODE_NAME (mode));
20210
20211 return ret;
20212 }
20213
20214 /* Return the register class of a scratch register needed to copy IN into
20215 or out of a register in RCLASS in MODE. If it can be done directly,
20216 NO_REGS is returned. */
20217
20218 static enum reg_class
20219 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20220 rtx in)
20221 {
20222 int regno;
20223
20224 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20225 #if TARGET_MACHO
20226 && MACHOPIC_INDIRECT
20227 #endif
20228 ))
20229 {
20230 /* We cannot copy a symbolic operand directly into anything
20231 other than BASE_REGS for TARGET_ELF. So indicate that a
20232 register from BASE_REGS is needed as an intermediate
20233 register.
20234
20235 On Darwin, pic addresses require a load from memory, which
20236 needs a base register. */
20237 if (rclass != BASE_REGS
20238 && (GET_CODE (in) == SYMBOL_REF
20239 || GET_CODE (in) == HIGH
20240 || GET_CODE (in) == LABEL_REF
20241 || GET_CODE (in) == CONST))
20242 return BASE_REGS;
20243 }
20244
20245 if (GET_CODE (in) == REG)
20246 {
20247 regno = REGNO (in);
20248 if (regno >= FIRST_PSEUDO_REGISTER)
20249 {
20250 regno = true_regnum (in);
20251 if (regno >= FIRST_PSEUDO_REGISTER)
20252 regno = -1;
20253 }
20254 }
20255 else if (GET_CODE (in) == SUBREG)
20256 {
20257 regno = true_regnum (in);
20258 if (regno >= FIRST_PSEUDO_REGISTER)
20259 regno = -1;
20260 }
20261 else
20262 regno = -1;
20263
20264 /* If we have VSX register moves, prefer moving scalar values between
20265 Altivec registers and GPR by going via an FPR (and then via memory)
20266 instead of reloading the secondary memory address for Altivec moves. */
20267 if (TARGET_VSX
20268 && GET_MODE_SIZE (mode) < 16
20269 && !mode_supports_vmx_dform (mode)
20270 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20271 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20272 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20273 && (regno >= 0 && INT_REGNO_P (regno)))))
20274 return FLOAT_REGS;
20275
20276 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20277 into anything. */
20278 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20279 || (regno >= 0 && INT_REGNO_P (regno)))
20280 return NO_REGS;
20281
20282 /* Constants, memory, and VSX registers can go into VSX registers (both the
20283 traditional floating point and the altivec registers). */
20284 if (rclass == VSX_REGS
20285 && (regno == -1 || VSX_REGNO_P (regno)))
20286 return NO_REGS;
20287
20288 /* Constants, memory, and FP registers can go into FP registers. */
20289 if ((regno == -1 || FP_REGNO_P (regno))
20290 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20291 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20292
20293 /* Memory, and AltiVec registers can go into AltiVec registers. */
20294 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20295 && rclass == ALTIVEC_REGS)
20296 return NO_REGS;
20297
20298 /* We can copy among the CR registers. */
20299 if ((rclass == CR_REGS || rclass == CR0_REGS)
20300 && regno >= 0 && CR_REGNO_P (regno))
20301 return NO_REGS;
20302
20303 /* Otherwise, we need GENERAL_REGS. */
20304 return GENERAL_REGS;
20305 }
20306
20307 /* Debug version of rs6000_secondary_reload_class. */
20308 static enum reg_class
20309 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20310 machine_mode mode, rtx in)
20311 {
20312 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20313 fprintf (stderr,
20314 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20315 "mode = %s, input rtx:\n",
20316 reg_class_names[ret], reg_class_names[rclass],
20317 GET_MODE_NAME (mode));
20318 debug_rtx (in);
20319
20320 return ret;
20321 }
20322
20323 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20324
20325 static bool
20326 rs6000_can_change_mode_class (machine_mode from,
20327 machine_mode to,
20328 reg_class_t rclass)
20329 {
20330 unsigned from_size = GET_MODE_SIZE (from);
20331 unsigned to_size = GET_MODE_SIZE (to);
20332
20333 if (from_size != to_size)
20334 {
20335 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20336
20337 if (reg_classes_intersect_p (xclass, rclass))
20338 {
20339 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20340 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20341 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20342 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20343
20344 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20345 single register under VSX because the scalar part of the register
20346 is in the upper 64-bits, and not the lower 64-bits. Types like
20347 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20348 IEEE floating point can't overlap, and neither can small
20349 values. */
20350
20351 if (to_float128_vector_p && from_float128_vector_p)
20352 return true;
20353
20354 else if (to_float128_vector_p || from_float128_vector_p)
20355 return false;
20356
20357 /* TDmode in floating-mode registers must always go into a register
20358 pair with the most significant word in the even-numbered register
20359 to match ISA requirements. In little-endian mode, this does not
20360 match subreg numbering, so we cannot allow subregs. */
20361 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20362 return false;
20363
20364 if (from_size < 8 || to_size < 8)
20365 return false;
20366
20367 if (from_size == 8 && (8 * to_nregs) != to_size)
20368 return false;
20369
20370 if (to_size == 8 && (8 * from_nregs) != from_size)
20371 return false;
20372
20373 return true;
20374 }
20375 else
20376 return true;
20377 }
20378
20379 /* Since the VSX register set includes traditional floating point registers
20380 and altivec registers, just check for the size being different instead of
20381 trying to check whether the modes are vector modes. Otherwise it won't
20382 allow say DF and DI to change classes. For types like TFmode and TDmode
20383 that take 2 64-bit registers, rather than a single 128-bit register, don't
20384 allow subregs of those types to other 128 bit types. */
20385 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20386 {
20387 unsigned num_regs = (from_size + 15) / 16;
20388 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20389 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20390 return false;
20391
20392 return (from_size == 8 || from_size == 16);
20393 }
20394
20395 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20396 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20397 return false;
20398
20399 return true;
20400 }
20401
20402 /* Debug version of rs6000_can_change_mode_class. */
20403 static bool
20404 rs6000_debug_can_change_mode_class (machine_mode from,
20405 machine_mode to,
20406 reg_class_t rclass)
20407 {
20408 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20409
20410 fprintf (stderr,
20411 "rs6000_can_change_mode_class, return %s, from = %s, "
20412 "to = %s, rclass = %s\n",
20413 ret ? "true" : "false",
20414 GET_MODE_NAME (from), GET_MODE_NAME (to),
20415 reg_class_names[rclass]);
20416
20417 return ret;
20418 }
20419 \f
20420 /* Return a string to do a move operation of 128 bits of data. */
20421
20422 const char *
20423 rs6000_output_move_128bit (rtx operands[])
20424 {
20425 rtx dest = operands[0];
20426 rtx src = operands[1];
20427 machine_mode mode = GET_MODE (dest);
20428 int dest_regno;
20429 int src_regno;
20430 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20431 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20432
20433 if (REG_P (dest))
20434 {
20435 dest_regno = REGNO (dest);
20436 dest_gpr_p = INT_REGNO_P (dest_regno);
20437 dest_fp_p = FP_REGNO_P (dest_regno);
20438 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20439 dest_vsx_p = dest_fp_p | dest_vmx_p;
20440 }
20441 else
20442 {
20443 dest_regno = -1;
20444 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20445 }
20446
20447 if (REG_P (src))
20448 {
20449 src_regno = REGNO (src);
20450 src_gpr_p = INT_REGNO_P (src_regno);
20451 src_fp_p = FP_REGNO_P (src_regno);
20452 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20453 src_vsx_p = src_fp_p | src_vmx_p;
20454 }
20455 else
20456 {
20457 src_regno = -1;
20458 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20459 }
20460
20461 /* Register moves. */
20462 if (dest_regno >= 0 && src_regno >= 0)
20463 {
20464 if (dest_gpr_p)
20465 {
20466 if (src_gpr_p)
20467 return "#";
20468
20469 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20470 return (WORDS_BIG_ENDIAN
20471 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20472 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20473
20474 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20475 return "#";
20476 }
20477
20478 else if (TARGET_VSX && dest_vsx_p)
20479 {
20480 if (src_vsx_p)
20481 return "xxlor %x0,%x1,%x1";
20482
20483 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20484 return (WORDS_BIG_ENDIAN
20485 ? "mtvsrdd %x0,%1,%L1"
20486 : "mtvsrdd %x0,%L1,%1");
20487
20488 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20489 return "#";
20490 }
20491
20492 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20493 return "vor %0,%1,%1";
20494
20495 else if (dest_fp_p && src_fp_p)
20496 return "#";
20497 }
20498
20499 /* Loads. */
20500 else if (dest_regno >= 0 && MEM_P (src))
20501 {
20502 if (dest_gpr_p)
20503 {
20504 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20505 return "lq %0,%1";
20506 else
20507 return "#";
20508 }
20509
20510 else if (TARGET_ALTIVEC && dest_vmx_p
20511 && altivec_indexed_or_indirect_operand (src, mode))
20512 return "lvx %0,%y1";
20513
20514 else if (TARGET_VSX && dest_vsx_p)
20515 {
20516 if (mode_supports_dq_form (mode)
20517 && quad_address_p (XEXP (src, 0), mode, true))
20518 return "lxv %x0,%1";
20519
20520 else if (TARGET_P9_VECTOR)
20521 return "lxvx %x0,%y1";
20522
20523 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20524 return "lxvw4x %x0,%y1";
20525
20526 else
20527 return "lxvd2x %x0,%y1";
20528 }
20529
20530 else if (TARGET_ALTIVEC && dest_vmx_p)
20531 return "lvx %0,%y1";
20532
20533 else if (dest_fp_p)
20534 return "#";
20535 }
20536
20537 /* Stores. */
20538 else if (src_regno >= 0 && MEM_P (dest))
20539 {
20540 if (src_gpr_p)
20541 {
20542 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20543 return "stq %1,%0";
20544 else
20545 return "#";
20546 }
20547
20548 else if (TARGET_ALTIVEC && src_vmx_p
20549 && altivec_indexed_or_indirect_operand (dest, mode))
20550 return "stvx %1,%y0";
20551
20552 else if (TARGET_VSX && src_vsx_p)
20553 {
20554 if (mode_supports_dq_form (mode)
20555 && quad_address_p (XEXP (dest, 0), mode, true))
20556 return "stxv %x1,%0";
20557
20558 else if (TARGET_P9_VECTOR)
20559 return "stxvx %x1,%y0";
20560
20561 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20562 return "stxvw4x %x1,%y0";
20563
20564 else
20565 return "stxvd2x %x1,%y0";
20566 }
20567
20568 else if (TARGET_ALTIVEC && src_vmx_p)
20569 return "stvx %1,%y0";
20570
20571 else if (src_fp_p)
20572 return "#";
20573 }
20574
20575 /* Constants. */
20576 else if (dest_regno >= 0
20577 && (GET_CODE (src) == CONST_INT
20578 || GET_CODE (src) == CONST_WIDE_INT
20579 || GET_CODE (src) == CONST_DOUBLE
20580 || GET_CODE (src) == CONST_VECTOR))
20581 {
20582 if (dest_gpr_p)
20583 return "#";
20584
20585 else if ((dest_vmx_p && TARGET_ALTIVEC)
20586 || (dest_vsx_p && TARGET_VSX))
20587 return output_vec_const_move (operands);
20588 }
20589
20590 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20591 }
20592
20593 /* Validate a 128-bit move. */
20594 bool
20595 rs6000_move_128bit_ok_p (rtx operands[])
20596 {
20597 machine_mode mode = GET_MODE (operands[0]);
20598 return (gpc_reg_operand (operands[0], mode)
20599 || gpc_reg_operand (operands[1], mode));
20600 }
20601
20602 /* Return true if a 128-bit move needs to be split. */
20603 bool
20604 rs6000_split_128bit_ok_p (rtx operands[])
20605 {
20606 if (!reload_completed)
20607 return false;
20608
20609 if (!gpr_or_gpr_p (operands[0], operands[1]))
20610 return false;
20611
20612 if (quad_load_store_p (operands[0], operands[1]))
20613 return false;
20614
20615 return true;
20616 }
20617
20618 \f
20619 /* Given a comparison operation, return the bit number in CCR to test. We
20620 know this is a valid comparison.
20621
20622 SCC_P is 1 if this is for an scc. That means that %D will have been
20623 used instead of %C, so the bits will be in different places.
20624
20625 Return -1 if OP isn't a valid comparison for some reason. */
20626
20627 int
20628 ccr_bit (rtx op, int scc_p)
20629 {
20630 enum rtx_code code = GET_CODE (op);
20631 machine_mode cc_mode;
20632 int cc_regnum;
20633 int base_bit;
20634 rtx reg;
20635
20636 if (!COMPARISON_P (op))
20637 return -1;
20638
20639 reg = XEXP (op, 0);
20640
20641 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20642 return -1;
20643
20644 cc_mode = GET_MODE (reg);
20645 cc_regnum = REGNO (reg);
20646 base_bit = 4 * (cc_regnum - CR0_REGNO);
20647
20648 validate_condition_mode (code, cc_mode);
20649
20650 /* When generating a sCOND operation, only positive conditions are
20651 allowed. */
20652 if (scc_p)
20653 switch (code)
20654 {
20655 case EQ:
20656 case GT:
20657 case LT:
20658 case UNORDERED:
20659 case GTU:
20660 case LTU:
20661 break;
20662 default:
20663 return -1;
20664 }
20665
20666 switch (code)
20667 {
20668 case NE:
20669 return scc_p ? base_bit + 3 : base_bit + 2;
20670 case EQ:
20671 return base_bit + 2;
20672 case GT: case GTU: case UNLE:
20673 return base_bit + 1;
20674 case LT: case LTU: case UNGE:
20675 return base_bit;
20676 case ORDERED: case UNORDERED:
20677 return base_bit + 3;
20678
20679 case GE: case GEU:
20680 /* If scc, we will have done a cror to put the bit in the
20681 unordered position. So test that bit. For integer, this is ! LT
20682 unless this is an scc insn. */
20683 return scc_p ? base_bit + 3 : base_bit;
20684
20685 case LE: case LEU:
20686 return scc_p ? base_bit + 3 : base_bit + 1;
20687
20688 default:
20689 return -1;
20690 }
20691 }
20692 \f
20693 /* Return the GOT register. */
20694
20695 rtx
20696 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20697 {
20698 /* The second flow pass currently (June 1999) can't update
20699 regs_ever_live without disturbing other parts of the compiler, so
20700 update it here to make the prolog/epilogue code happy. */
20701 if (!can_create_pseudo_p ()
20702 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20703 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20704
20705 crtl->uses_pic_offset_table = 1;
20706
20707 return pic_offset_table_rtx;
20708 }
20709 \f
20710 static rs6000_stack_t stack_info;
20711
20712 /* Function to init struct machine_function.
20713 This will be called, via a pointer variable,
20714 from push_function_context. */
20715
20716 static struct machine_function *
20717 rs6000_init_machine_status (void)
20718 {
20719 stack_info.reload_completed = 0;
20720 return ggc_cleared_alloc<machine_function> ();
20721 }
20722 \f
20723 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20724
20725 /* Write out a function code label. */
20726
20727 void
20728 rs6000_output_function_entry (FILE *file, const char *fname)
20729 {
20730 if (fname[0] != '.')
20731 {
20732 switch (DEFAULT_ABI)
20733 {
20734 default:
20735 gcc_unreachable ();
20736
20737 case ABI_AIX:
20738 if (DOT_SYMBOLS)
20739 putc ('.', file);
20740 else
20741 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20742 break;
20743
20744 case ABI_ELFv2:
20745 case ABI_V4:
20746 case ABI_DARWIN:
20747 break;
20748 }
20749 }
20750
20751 RS6000_OUTPUT_BASENAME (file, fname);
20752 }
20753
20754 /* Print an operand. Recognize special options, documented below. */
20755
20756 #if TARGET_ELF
20757 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20758 only introduced by the linker, when applying the sda21
20759 relocation. */
20760 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20761 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20762 #else
20763 #define SMALL_DATA_RELOC "sda21"
20764 #define SMALL_DATA_REG 0
20765 #endif
20766
20767 void
20768 print_operand (FILE *file, rtx x, int code)
20769 {
20770 int i;
20771 unsigned HOST_WIDE_INT uval;
20772
20773 switch (code)
20774 {
20775 /* %a is output_address. */
20776
20777 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20778 output_operand. */
20779
20780 case 'D':
20781 /* Like 'J' but get to the GT bit only. */
20782 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20783 {
20784 output_operand_lossage ("invalid %%D value");
20785 return;
20786 }
20787
20788 /* Bit 1 is GT bit. */
20789 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20790
20791 /* Add one for shift count in rlinm for scc. */
20792 fprintf (file, "%d", i + 1);
20793 return;
20794
20795 case 'e':
20796 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20797 if (! INT_P (x))
20798 {
20799 output_operand_lossage ("invalid %%e value");
20800 return;
20801 }
20802
20803 uval = INTVAL (x);
20804 if ((uval & 0xffff) == 0 && uval != 0)
20805 putc ('s', file);
20806 return;
20807
20808 case 'E':
20809 /* X is a CR register. Print the number of the EQ bit of the CR */
20810 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20811 output_operand_lossage ("invalid %%E value");
20812 else
20813 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20814 return;
20815
20816 case 'f':
20817 /* X is a CR register. Print the shift count needed to move it
20818 to the high-order four bits. */
20819 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20820 output_operand_lossage ("invalid %%f value");
20821 else
20822 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20823 return;
20824
20825 case 'F':
20826 /* Similar, but print the count for the rotate in the opposite
20827 direction. */
20828 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20829 output_operand_lossage ("invalid %%F value");
20830 else
20831 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20832 return;
20833
20834 case 'G':
20835 /* X is a constant integer. If it is negative, print "m",
20836 otherwise print "z". This is to make an aze or ame insn. */
20837 if (GET_CODE (x) != CONST_INT)
20838 output_operand_lossage ("invalid %%G value");
20839 else if (INTVAL (x) >= 0)
20840 putc ('z', file);
20841 else
20842 putc ('m', file);
20843 return;
20844
20845 case 'h':
20846 /* If constant, output low-order five bits. Otherwise, write
20847 normally. */
20848 if (INT_P (x))
20849 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20850 else
20851 print_operand (file, x, 0);
20852 return;
20853
20854 case 'H':
20855 /* If constant, output low-order six bits. Otherwise, write
20856 normally. */
20857 if (INT_P (x))
20858 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20859 else
20860 print_operand (file, x, 0);
20861 return;
20862
20863 case 'I':
20864 /* Print `i' if this is a constant, else nothing. */
20865 if (INT_P (x))
20866 putc ('i', file);
20867 return;
20868
20869 case 'j':
20870 /* Write the bit number in CCR for jump. */
20871 i = ccr_bit (x, 0);
20872 if (i == -1)
20873 output_operand_lossage ("invalid %%j code");
20874 else
20875 fprintf (file, "%d", i);
20876 return;
20877
20878 case 'J':
20879 /* Similar, but add one for shift count in rlinm for scc and pass
20880 scc flag to `ccr_bit'. */
20881 i = ccr_bit (x, 1);
20882 if (i == -1)
20883 output_operand_lossage ("invalid %%J code");
20884 else
20885 /* If we want bit 31, write a shift count of zero, not 32. */
20886 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20887 return;
20888
20889 case 'k':
20890 /* X must be a constant. Write the 1's complement of the
20891 constant. */
20892 if (! INT_P (x))
20893 output_operand_lossage ("invalid %%k value");
20894 else
20895 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20896 return;
20897
20898 case 'K':
20899 /* X must be a symbolic constant on ELF. Write an
20900 expression suitable for an 'addi' that adds in the low 16
20901 bits of the MEM. */
20902 if (GET_CODE (x) == CONST)
20903 {
20904 if (GET_CODE (XEXP (x, 0)) != PLUS
20905 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20906 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20907 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20908 output_operand_lossage ("invalid %%K value");
20909 }
20910 print_operand_address (file, x);
20911 fputs ("@l", file);
20912 return;
20913
20914 /* %l is output_asm_label. */
20915
20916 case 'L':
20917 /* Write second word of DImode or DFmode reference. Works on register
20918 or non-indexed memory only. */
20919 if (REG_P (x))
20920 fputs (reg_names[REGNO (x) + 1], file);
20921 else if (MEM_P (x))
20922 {
20923 machine_mode mode = GET_MODE (x);
20924 /* Handle possible auto-increment. Since it is pre-increment and
20925 we have already done it, we can just use an offset of word. */
20926 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20927 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20928 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20929 UNITS_PER_WORD));
20930 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20931 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20932 UNITS_PER_WORD));
20933 else
20934 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20935 UNITS_PER_WORD),
20936 0));
20937
20938 if (small_data_operand (x, GET_MODE (x)))
20939 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20940 reg_names[SMALL_DATA_REG]);
20941 }
20942 return;
20943
20944 case 'N': /* Unused */
20945 /* Write the number of elements in the vector times 4. */
20946 if (GET_CODE (x) != PARALLEL)
20947 output_operand_lossage ("invalid %%N value");
20948 else
20949 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20950 return;
20951
20952 case 'O': /* Unused */
20953 /* Similar, but subtract 1 first. */
20954 if (GET_CODE (x) != PARALLEL)
20955 output_operand_lossage ("invalid %%O value");
20956 else
20957 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20958 return;
20959
20960 case 'p':
20961 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20962 if (! INT_P (x)
20963 || INTVAL (x) < 0
20964 || (i = exact_log2 (INTVAL (x))) < 0)
20965 output_operand_lossage ("invalid %%p value");
20966 else
20967 fprintf (file, "%d", i);
20968 return;
20969
20970 case 'P':
20971 /* The operand must be an indirect memory reference. The result
20972 is the register name. */
20973 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20974 || REGNO (XEXP (x, 0)) >= 32)
20975 output_operand_lossage ("invalid %%P value");
20976 else
20977 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20978 return;
20979
20980 case 'q':
20981 /* This outputs the logical code corresponding to a boolean
20982 expression. The expression may have one or both operands
20983 negated (if one, only the first one). For condition register
20984 logical operations, it will also treat the negated
20985 CR codes as NOTs, but not handle NOTs of them. */
20986 {
20987 const char *const *t = 0;
20988 const char *s;
20989 enum rtx_code code = GET_CODE (x);
20990 static const char * const tbl[3][3] = {
20991 { "and", "andc", "nor" },
20992 { "or", "orc", "nand" },
20993 { "xor", "eqv", "xor" } };
20994
20995 if (code == AND)
20996 t = tbl[0];
20997 else if (code == IOR)
20998 t = tbl[1];
20999 else if (code == XOR)
21000 t = tbl[2];
21001 else
21002 output_operand_lossage ("invalid %%q value");
21003
21004 if (GET_CODE (XEXP (x, 0)) != NOT)
21005 s = t[0];
21006 else
21007 {
21008 if (GET_CODE (XEXP (x, 1)) == NOT)
21009 s = t[2];
21010 else
21011 s = t[1];
21012 }
21013
21014 fputs (s, file);
21015 }
21016 return;
21017
21018 case 'Q':
21019 if (! TARGET_MFCRF)
21020 return;
21021 fputc (',', file);
21022 /* FALLTHRU */
21023
21024 case 'R':
21025 /* X is a CR register. Print the mask for `mtcrf'. */
21026 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21027 output_operand_lossage ("invalid %%R value");
21028 else
21029 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21030 return;
21031
21032 case 's':
21033 /* Low 5 bits of 32 - value */
21034 if (! INT_P (x))
21035 output_operand_lossage ("invalid %%s value");
21036 else
21037 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21038 return;
21039
21040 case 't':
21041 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21042 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21043 {
21044 output_operand_lossage ("invalid %%t value");
21045 return;
21046 }
21047
21048 /* Bit 3 is OV bit. */
21049 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21050
21051 /* If we want bit 31, write a shift count of zero, not 32. */
21052 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21053 return;
21054
21055 case 'T':
21056 /* Print the symbolic name of a branch target register. */
21057 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21058 x = XVECEXP (x, 0, 0);
21059 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21060 && REGNO (x) != CTR_REGNO))
21061 output_operand_lossage ("invalid %%T value");
21062 else if (REGNO (x) == LR_REGNO)
21063 fputs ("lr", file);
21064 else
21065 fputs ("ctr", file);
21066 return;
21067
21068 case 'u':
21069 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21070 for use in unsigned operand. */
21071 if (! INT_P (x))
21072 {
21073 output_operand_lossage ("invalid %%u value");
21074 return;
21075 }
21076
21077 uval = INTVAL (x);
21078 if ((uval & 0xffff) == 0)
21079 uval >>= 16;
21080
21081 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21082 return;
21083
21084 case 'v':
21085 /* High-order 16 bits of constant for use in signed operand. */
21086 if (! INT_P (x))
21087 output_operand_lossage ("invalid %%v value");
21088 else
21089 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21090 (INTVAL (x) >> 16) & 0xffff);
21091 return;
21092
21093 case 'U':
21094 /* Print `u' if this has an auto-increment or auto-decrement. */
21095 if (MEM_P (x)
21096 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21097 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21098 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21099 putc ('u', file);
21100 return;
21101
21102 case 'V':
21103 /* Print the trap code for this operand. */
21104 switch (GET_CODE (x))
21105 {
21106 case EQ:
21107 fputs ("eq", file); /* 4 */
21108 break;
21109 case NE:
21110 fputs ("ne", file); /* 24 */
21111 break;
21112 case LT:
21113 fputs ("lt", file); /* 16 */
21114 break;
21115 case LE:
21116 fputs ("le", file); /* 20 */
21117 break;
21118 case GT:
21119 fputs ("gt", file); /* 8 */
21120 break;
21121 case GE:
21122 fputs ("ge", file); /* 12 */
21123 break;
21124 case LTU:
21125 fputs ("llt", file); /* 2 */
21126 break;
21127 case LEU:
21128 fputs ("lle", file); /* 6 */
21129 break;
21130 case GTU:
21131 fputs ("lgt", file); /* 1 */
21132 break;
21133 case GEU:
21134 fputs ("lge", file); /* 5 */
21135 break;
21136 default:
21137 output_operand_lossage ("invalid %%V value");
21138 }
21139 break;
21140
21141 case 'w':
21142 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21143 normally. */
21144 if (INT_P (x))
21145 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21146 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21147 else
21148 print_operand (file, x, 0);
21149 return;
21150
21151 case 'x':
21152 /* X is a FPR or Altivec register used in a VSX context. */
21153 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21154 output_operand_lossage ("invalid %%x value");
21155 else
21156 {
21157 int reg = REGNO (x);
21158 int vsx_reg = (FP_REGNO_P (reg)
21159 ? reg - 32
21160 : reg - FIRST_ALTIVEC_REGNO + 32);
21161
21162 #ifdef TARGET_REGNAMES
21163 if (TARGET_REGNAMES)
21164 fprintf (file, "%%vs%d", vsx_reg);
21165 else
21166 #endif
21167 fprintf (file, "%d", vsx_reg);
21168 }
21169 return;
21170
21171 case 'X':
21172 if (MEM_P (x)
21173 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21174 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21175 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21176 putc ('x', file);
21177 return;
21178
21179 case 'Y':
21180 /* Like 'L', for third word of TImode/PTImode */
21181 if (REG_P (x))
21182 fputs (reg_names[REGNO (x) + 2], file);
21183 else if (MEM_P (x))
21184 {
21185 machine_mode mode = GET_MODE (x);
21186 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21187 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21188 output_address (mode, plus_constant (Pmode,
21189 XEXP (XEXP (x, 0), 0), 8));
21190 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21191 output_address (mode, plus_constant (Pmode,
21192 XEXP (XEXP (x, 0), 0), 8));
21193 else
21194 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21195 if (small_data_operand (x, GET_MODE (x)))
21196 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21197 reg_names[SMALL_DATA_REG]);
21198 }
21199 return;
21200
21201 case 'z':
21202 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21203 x = XVECEXP (x, 0, 1);
21204 /* X is a SYMBOL_REF. Write out the name preceded by a
21205 period and without any trailing data in brackets. Used for function
21206 names. If we are configured for System V (or the embedded ABI) on
21207 the PowerPC, do not emit the period, since those systems do not use
21208 TOCs and the like. */
21209 if (!SYMBOL_REF_P (x))
21210 {
21211 output_operand_lossage ("invalid %%z value");
21212 return;
21213 }
21214
21215 /* For macho, check to see if we need a stub. */
21216 if (TARGET_MACHO)
21217 {
21218 const char *name = XSTR (x, 0);
21219 #if TARGET_MACHO
21220 if (darwin_emit_branch_islands
21221 && MACHOPIC_INDIRECT
21222 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21223 name = machopic_indirection_name (x, /*stub_p=*/true);
21224 #endif
21225 assemble_name (file, name);
21226 }
21227 else if (!DOT_SYMBOLS)
21228 assemble_name (file, XSTR (x, 0));
21229 else
21230 rs6000_output_function_entry (file, XSTR (x, 0));
21231 return;
21232
21233 case 'Z':
21234 /* Like 'L', for last word of TImode/PTImode. */
21235 if (REG_P (x))
21236 fputs (reg_names[REGNO (x) + 3], file);
21237 else if (MEM_P (x))
21238 {
21239 machine_mode mode = GET_MODE (x);
21240 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21241 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21242 output_address (mode, plus_constant (Pmode,
21243 XEXP (XEXP (x, 0), 0), 12));
21244 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21245 output_address (mode, plus_constant (Pmode,
21246 XEXP (XEXP (x, 0), 0), 12));
21247 else
21248 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21249 if (small_data_operand (x, GET_MODE (x)))
21250 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21251 reg_names[SMALL_DATA_REG]);
21252 }
21253 return;
21254
21255 /* Print AltiVec memory operand. */
21256 case 'y':
21257 {
21258 rtx tmp;
21259
21260 gcc_assert (MEM_P (x));
21261
21262 tmp = XEXP (x, 0);
21263
21264 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21265 && GET_CODE (tmp) == AND
21266 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21267 && INTVAL (XEXP (tmp, 1)) == -16)
21268 tmp = XEXP (tmp, 0);
21269 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21270 && GET_CODE (tmp) == PRE_MODIFY)
21271 tmp = XEXP (tmp, 1);
21272 if (REG_P (tmp))
21273 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21274 else
21275 {
21276 if (GET_CODE (tmp) != PLUS
21277 || !REG_P (XEXP (tmp, 0))
21278 || !REG_P (XEXP (tmp, 1)))
21279 {
21280 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21281 break;
21282 }
21283
21284 if (REGNO (XEXP (tmp, 0)) == 0)
21285 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21286 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21287 else
21288 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21289 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21290 }
21291 break;
21292 }
21293
21294 case 0:
21295 if (REG_P (x))
21296 fprintf (file, "%s", reg_names[REGNO (x)]);
21297 else if (MEM_P (x))
21298 {
21299 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21300 know the width from the mode. */
21301 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21302 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21303 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21304 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21305 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21306 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21307 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21308 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21309 else
21310 output_address (GET_MODE (x), XEXP (x, 0));
21311 }
21312 else if (toc_relative_expr_p (x, false,
21313 &tocrel_base_oac, &tocrel_offset_oac))
21314 /* This hack along with a corresponding hack in
21315 rs6000_output_addr_const_extra arranges to output addends
21316 where the assembler expects to find them. eg.
21317 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21318 without this hack would be output as "x@toc+4". We
21319 want "x+4@toc". */
21320 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21321 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21322 output_addr_const (file, XVECEXP (x, 0, 0));
21323 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21324 output_addr_const (file, XVECEXP (x, 0, 1));
21325 else
21326 output_addr_const (file, x);
21327 return;
21328
21329 case '&':
21330 if (const char *name = get_some_local_dynamic_name ())
21331 assemble_name (file, name);
21332 else
21333 output_operand_lossage ("'%%&' used without any "
21334 "local dynamic TLS references");
21335 return;
21336
21337 default:
21338 output_operand_lossage ("invalid %%xn code");
21339 }
21340 }
21341 \f
21342 /* Print the address of an operand. */
21343
21344 void
21345 print_operand_address (FILE *file, rtx x)
21346 {
21347 if (REG_P (x))
21348 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21349 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21350 || GET_CODE (x) == LABEL_REF)
21351 {
21352 output_addr_const (file, x);
21353 if (small_data_operand (x, GET_MODE (x)))
21354 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21355 reg_names[SMALL_DATA_REG]);
21356 else
21357 gcc_assert (!TARGET_TOC);
21358 }
21359 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21360 && REG_P (XEXP (x, 1)))
21361 {
21362 if (REGNO (XEXP (x, 0)) == 0)
21363 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21364 reg_names[ REGNO (XEXP (x, 0)) ]);
21365 else
21366 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21367 reg_names[ REGNO (XEXP (x, 1)) ]);
21368 }
21369 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21370 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21371 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21372 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21373 #if TARGET_MACHO
21374 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21375 && CONSTANT_P (XEXP (x, 1)))
21376 {
21377 fprintf (file, "lo16(");
21378 output_addr_const (file, XEXP (x, 1));
21379 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21380 }
21381 #endif
21382 #if TARGET_ELF
21383 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21384 && CONSTANT_P (XEXP (x, 1)))
21385 {
21386 output_addr_const (file, XEXP (x, 1));
21387 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21388 }
21389 #endif
21390 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21391 {
21392 /* This hack along with a corresponding hack in
21393 rs6000_output_addr_const_extra arranges to output addends
21394 where the assembler expects to find them. eg.
21395 (lo_sum (reg 9)
21396 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21397 without this hack would be output as "x@toc+8@l(9)". We
21398 want "x+8@toc@l(9)". */
21399 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21400 if (GET_CODE (x) == LO_SUM)
21401 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21402 else
21403 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21404 }
21405 else
21406 output_addr_const (file, x);
21407 }
21408 \f
21409 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21410
21411 static bool
21412 rs6000_output_addr_const_extra (FILE *file, rtx x)
21413 {
21414 if (GET_CODE (x) == UNSPEC)
21415 switch (XINT (x, 1))
21416 {
21417 case UNSPEC_TOCREL:
21418 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21419 && REG_P (XVECEXP (x, 0, 1))
21420 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21421 output_addr_const (file, XVECEXP (x, 0, 0));
21422 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21423 {
21424 if (INTVAL (tocrel_offset_oac) >= 0)
21425 fprintf (file, "+");
21426 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21427 }
21428 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21429 {
21430 putc ('-', file);
21431 assemble_name (file, toc_label_name);
21432 need_toc_init = 1;
21433 }
21434 else if (TARGET_ELF)
21435 fputs ("@toc", file);
21436 return true;
21437
21438 #if TARGET_MACHO
21439 case UNSPEC_MACHOPIC_OFFSET:
21440 output_addr_const (file, XVECEXP (x, 0, 0));
21441 putc ('-', file);
21442 machopic_output_function_base_name (file);
21443 return true;
21444 #endif
21445 }
21446 return false;
21447 }
21448 \f
21449 /* Target hook for assembling integer objects. The PowerPC version has
21450 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21451 is defined. It also needs to handle DI-mode objects on 64-bit
21452 targets. */
21453
21454 static bool
21455 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21456 {
21457 #ifdef RELOCATABLE_NEEDS_FIXUP
21458 /* Special handling for SI values. */
21459 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21460 {
21461 static int recurse = 0;
21462
21463 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21464 the .fixup section. Since the TOC section is already relocated, we
21465 don't need to mark it here. We used to skip the text section, but it
21466 should never be valid for relocated addresses to be placed in the text
21467 section. */
21468 if (DEFAULT_ABI == ABI_V4
21469 && (TARGET_RELOCATABLE || flag_pic > 1)
21470 && in_section != toc_section
21471 && !recurse
21472 && !CONST_SCALAR_INT_P (x)
21473 && CONSTANT_P (x))
21474 {
21475 char buf[256];
21476
21477 recurse = 1;
21478 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21479 fixuplabelno++;
21480 ASM_OUTPUT_LABEL (asm_out_file, buf);
21481 fprintf (asm_out_file, "\t.long\t(");
21482 output_addr_const (asm_out_file, x);
21483 fprintf (asm_out_file, ")@fixup\n");
21484 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21485 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21486 fprintf (asm_out_file, "\t.long\t");
21487 assemble_name (asm_out_file, buf);
21488 fprintf (asm_out_file, "\n\t.previous\n");
21489 recurse = 0;
21490 return true;
21491 }
21492 /* Remove initial .'s to turn a -mcall-aixdesc function
21493 address into the address of the descriptor, not the function
21494 itself. */
21495 else if (GET_CODE (x) == SYMBOL_REF
21496 && XSTR (x, 0)[0] == '.'
21497 && DEFAULT_ABI == ABI_AIX)
21498 {
21499 const char *name = XSTR (x, 0);
21500 while (*name == '.')
21501 name++;
21502
21503 fprintf (asm_out_file, "\t.long\t%s\n", name);
21504 return true;
21505 }
21506 }
21507 #endif /* RELOCATABLE_NEEDS_FIXUP */
21508 return default_assemble_integer (x, size, aligned_p);
21509 }
21510
21511 /* Return a template string for assembly to emit when making an
21512 external call. FUNOP is the call mem argument operand number. */
21513
21514 static const char *
21515 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21516 {
21517 /* -Wformat-overflow workaround, without which gcc thinks that %u
21518 might produce 10 digits. */
21519 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21520
21521 char arg[12];
21522 arg[0] = 0;
21523 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21524 {
21525 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21526 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21527 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21528 sprintf (arg, "(%%&@tlsld)");
21529 else
21530 gcc_unreachable ();
21531 }
21532
21533 /* The magic 32768 offset here corresponds to the offset of
21534 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21535 char z[11];
21536 sprintf (z, "%%z%u%s", funop,
21537 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21538 ? "+32768" : ""));
21539
21540 static char str[32]; /* 2 spare */
21541 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21542 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21543 sibcall ? "" : "\n\tnop");
21544 else if (DEFAULT_ABI == ABI_V4)
21545 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21546 flag_pic ? "@plt" : "");
21547 #if TARGET_MACHO
21548 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21549 else if (DEFAULT_ABI == ABI_DARWIN)
21550 {
21551 /* The cookie is in operand func+2. */
21552 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21553 int cookie = INTVAL (operands[funop + 2]);
21554 if (cookie & CALL_LONG)
21555 {
21556 tree funname = get_identifier (XSTR (operands[funop], 0));
21557 tree labelname = get_prev_label (funname);
21558 gcc_checking_assert (labelname && !sibcall);
21559
21560 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21561 instruction will reach 'foo', otherwise link as 'bl L42'".
21562 "L42" should be a 'branch island', that will do a far jump to
21563 'foo'. Branch islands are generated in
21564 macho_branch_islands(). */
21565 sprintf (str, "jbsr %%z%u,%.10s", funop,
21566 IDENTIFIER_POINTER (labelname));
21567 }
21568 else
21569 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21570 after the call. */
21571 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21572 }
21573 #endif
21574 else
21575 gcc_unreachable ();
21576 return str;
21577 }
21578
21579 const char *
21580 rs6000_call_template (rtx *operands, unsigned int funop)
21581 {
21582 return rs6000_call_template_1 (operands, funop, false);
21583 }
21584
21585 const char *
21586 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21587 {
21588 return rs6000_call_template_1 (operands, funop, true);
21589 }
21590
21591 /* As above, for indirect calls. */
21592
21593 static const char *
21594 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21595 bool sibcall)
21596 {
21597 /* -Wformat-overflow workaround, without which gcc thinks that %u
21598 might produce 10 digits. */
21599 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21600
21601 static char str[144]; /* 1 spare */
21602 char *s = str;
21603 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21604
21605 if (DEFAULT_ABI == ABI_AIX)
21606 s += sprintf (s,
21607 "l%s 2,%%%u\n\t",
21608 ptrload, funop + 2);
21609
21610 /* We don't need the extra code to stop indirect call speculation if
21611 calling via LR. */
21612 bool speculate = (TARGET_MACHO
21613 || rs6000_speculate_indirect_jumps
21614 || (REG_P (operands[funop])
21615 && REGNO (operands[funop]) == LR_REGNO));
21616
21617 if (!TARGET_MACHO && HAVE_AS_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21618 {
21619 const char *rel64 = TARGET_64BIT ? "64" : "";
21620 char tls[29];
21621 tls[0] = 0;
21622 if (GET_CODE (operands[funop + 1]) == UNSPEC)
21623 {
21624 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21625 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21626 rel64, funop + 1);
21627 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21628 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21629 rel64);
21630 else
21631 gcc_unreachable ();
21632 }
21633
21634 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21635 && flag_pic == 2 ? "+32768" : "");
21636 if (!speculate)
21637 {
21638 s += sprintf (s,
21639 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21640 tls, rel64, funop, addend);
21641 s += sprintf (s, "crset 2\n\t");
21642 }
21643 s += sprintf (s,
21644 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21645 tls, rel64, funop, addend);
21646 }
21647 else if (!speculate)
21648 s += sprintf (s, "crset 2\n\t");
21649
21650 if (DEFAULT_ABI == ABI_AIX)
21651 {
21652 if (speculate)
21653 sprintf (s,
21654 "b%%T%ul\n\t"
21655 "l%s 2,%%%u(1)",
21656 funop, ptrload, funop + 3);
21657 else
21658 sprintf (s,
21659 "beq%%T%ul-\n\t"
21660 "l%s 2,%%%u(1)",
21661 funop, ptrload, funop + 3);
21662 }
21663 else if (DEFAULT_ABI == ABI_ELFv2)
21664 {
21665 if (speculate)
21666 sprintf (s,
21667 "b%%T%ul\n\t"
21668 "l%s 2,%%%u(1)",
21669 funop, ptrload, funop + 2);
21670 else
21671 sprintf (s,
21672 "beq%%T%ul-\n\t"
21673 "l%s 2,%%%u(1)",
21674 funop, ptrload, funop + 2);
21675 }
21676 else
21677 {
21678 if (speculate)
21679 sprintf (s,
21680 "b%%T%u%s",
21681 funop, sibcall ? "" : "l");
21682 else
21683 sprintf (s,
21684 "beq%%T%u%s-%s",
21685 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21686 }
21687 return str;
21688 }
21689
21690 const char *
21691 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21692 {
21693 return rs6000_indirect_call_template_1 (operands, funop, false);
21694 }
21695
21696 const char *
21697 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21698 {
21699 return rs6000_indirect_call_template_1 (operands, funop, true);
21700 }
21701
21702 #if HAVE_AS_PLTSEQ
21703 /* Output indirect call insns.
21704 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21705 const char *
21706 rs6000_pltseq_template (rtx *operands, int which)
21707 {
21708 const char *rel64 = TARGET_64BIT ? "64" : "";
21709 char tls[28];
21710 tls[0] = 0;
21711 if (GET_CODE (operands[3]) == UNSPEC)
21712 {
21713 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21714 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21715 rel64);
21716 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21717 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21718 rel64);
21719 else
21720 gcc_unreachable ();
21721 }
21722
21723 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21724 static char str[96]; /* 15 spare */
21725 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21726 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21727 && flag_pic == 2 ? "+32768" : "");
21728 switch (which)
21729 {
21730 case 0:
21731 sprintf (str,
21732 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21733 "st%s",
21734 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21735 break;
21736 case 1:
21737 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21738 sprintf (str,
21739 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21740 "lis %%0,0",
21741 tls, off, rel64);
21742 else
21743 sprintf (str,
21744 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21745 "addis %%0,%%1,0",
21746 tls, off, rel64, addend);
21747 break;
21748 case 2:
21749 sprintf (str,
21750 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21751 "l%s %%0,0(%%1)",
21752 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21753 TARGET_64BIT ? "d" : "wz");
21754 break;
21755 case 3:
21756 sprintf (str,
21757 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21758 "mtctr %%1",
21759 tls, rel64, addend);
21760 break;
21761 default:
21762 gcc_unreachable ();
21763 }
21764 return str;
21765 }
21766 #endif
21767
21768 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21769 /* Emit an assembler directive to set symbol visibility for DECL to
21770 VISIBILITY_TYPE. */
21771
21772 static void
21773 rs6000_assemble_visibility (tree decl, int vis)
21774 {
21775 if (TARGET_XCOFF)
21776 return;
21777
21778 /* Functions need to have their entry point symbol visibility set as
21779 well as their descriptor symbol visibility. */
21780 if (DEFAULT_ABI == ABI_AIX
21781 && DOT_SYMBOLS
21782 && TREE_CODE (decl) == FUNCTION_DECL)
21783 {
21784 static const char * const visibility_types[] = {
21785 NULL, "protected", "hidden", "internal"
21786 };
21787
21788 const char *name, *type;
21789
21790 name = ((* targetm.strip_name_encoding)
21791 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21792 type = visibility_types[vis];
21793
21794 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21795 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21796 }
21797 else
21798 default_assemble_visibility (decl, vis);
21799 }
21800 #endif
21801 \f
21802 enum rtx_code
21803 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21804 {
21805 /* Reversal of FP compares takes care -- an ordered compare
21806 becomes an unordered compare and vice versa. */
21807 if (mode == CCFPmode
21808 && (!flag_finite_math_only
21809 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21810 || code == UNEQ || code == LTGT))
21811 return reverse_condition_maybe_unordered (code);
21812 else
21813 return reverse_condition (code);
21814 }
21815
21816 /* Generate a compare for CODE. Return a brand-new rtx that
21817 represents the result of the compare. */
21818
21819 static rtx
21820 rs6000_generate_compare (rtx cmp, machine_mode mode)
21821 {
21822 machine_mode comp_mode;
21823 rtx compare_result;
21824 enum rtx_code code = GET_CODE (cmp);
21825 rtx op0 = XEXP (cmp, 0);
21826 rtx op1 = XEXP (cmp, 1);
21827
21828 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21829 comp_mode = CCmode;
21830 else if (FLOAT_MODE_P (mode))
21831 comp_mode = CCFPmode;
21832 else if (code == GTU || code == LTU
21833 || code == GEU || code == LEU)
21834 comp_mode = CCUNSmode;
21835 else if ((code == EQ || code == NE)
21836 && unsigned_reg_p (op0)
21837 && (unsigned_reg_p (op1)
21838 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21839 /* These are unsigned values, perhaps there will be a later
21840 ordering compare that can be shared with this one. */
21841 comp_mode = CCUNSmode;
21842 else
21843 comp_mode = CCmode;
21844
21845 /* If we have an unsigned compare, make sure we don't have a signed value as
21846 an immediate. */
21847 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21848 && INTVAL (op1) < 0)
21849 {
21850 op0 = copy_rtx_if_shared (op0);
21851 op1 = force_reg (GET_MODE (op0), op1);
21852 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21853 }
21854
21855 /* First, the compare. */
21856 compare_result = gen_reg_rtx (comp_mode);
21857
21858 /* IEEE 128-bit support in VSX registers when we do not have hardware
21859 support. */
21860 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21861 {
21862 rtx libfunc = NULL_RTX;
21863 bool check_nan = false;
21864 rtx dest;
21865
21866 switch (code)
21867 {
21868 case EQ:
21869 case NE:
21870 libfunc = optab_libfunc (eq_optab, mode);
21871 break;
21872
21873 case GT:
21874 case GE:
21875 libfunc = optab_libfunc (ge_optab, mode);
21876 break;
21877
21878 case LT:
21879 case LE:
21880 libfunc = optab_libfunc (le_optab, mode);
21881 break;
21882
21883 case UNORDERED:
21884 case ORDERED:
21885 libfunc = optab_libfunc (unord_optab, mode);
21886 code = (code == UNORDERED) ? NE : EQ;
21887 break;
21888
21889 case UNGE:
21890 case UNGT:
21891 check_nan = true;
21892 libfunc = optab_libfunc (ge_optab, mode);
21893 code = (code == UNGE) ? GE : GT;
21894 break;
21895
21896 case UNLE:
21897 case UNLT:
21898 check_nan = true;
21899 libfunc = optab_libfunc (le_optab, mode);
21900 code = (code == UNLE) ? LE : LT;
21901 break;
21902
21903 case UNEQ:
21904 case LTGT:
21905 check_nan = true;
21906 libfunc = optab_libfunc (eq_optab, mode);
21907 code = (code = UNEQ) ? EQ : NE;
21908 break;
21909
21910 default:
21911 gcc_unreachable ();
21912 }
21913
21914 gcc_assert (libfunc);
21915
21916 if (!check_nan)
21917 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21918 SImode, op0, mode, op1, mode);
21919
21920 /* The library signals an exception for signalling NaNs, so we need to
21921 handle isgreater, etc. by first checking isordered. */
21922 else
21923 {
21924 rtx ne_rtx, normal_dest, unord_dest;
21925 rtx unord_func = optab_libfunc (unord_optab, mode);
21926 rtx join_label = gen_label_rtx ();
21927 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21928 rtx unord_cmp = gen_reg_rtx (comp_mode);
21929
21930
21931 /* Test for either value being a NaN. */
21932 gcc_assert (unord_func);
21933 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21934 SImode, op0, mode, op1, mode);
21935
21936 /* Set value (0) if either value is a NaN, and jump to the join
21937 label. */
21938 dest = gen_reg_rtx (SImode);
21939 emit_move_insn (dest, const1_rtx);
21940 emit_insn (gen_rtx_SET (unord_cmp,
21941 gen_rtx_COMPARE (comp_mode, unord_dest,
21942 const0_rtx)));
21943
21944 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21945 emit_jump_insn (gen_rtx_SET (pc_rtx,
21946 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21947 join_ref,
21948 pc_rtx)));
21949
21950 /* Do the normal comparison, knowing that the values are not
21951 NaNs. */
21952 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21953 SImode, op0, mode, op1, mode);
21954
21955 emit_insn (gen_cstoresi4 (dest,
21956 gen_rtx_fmt_ee (code, SImode, normal_dest,
21957 const0_rtx),
21958 normal_dest, const0_rtx));
21959
21960 /* Join NaN and non-Nan paths. Compare dest against 0. */
21961 emit_label (join_label);
21962 code = NE;
21963 }
21964
21965 emit_insn (gen_rtx_SET (compare_result,
21966 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21967 }
21968
21969 else
21970 {
21971 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21972 CLOBBERs to match cmptf_internal2 pattern. */
21973 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21974 && FLOAT128_IBM_P (GET_MODE (op0))
21975 && TARGET_HARD_FLOAT)
21976 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21977 gen_rtvec (10,
21978 gen_rtx_SET (compare_result,
21979 gen_rtx_COMPARE (comp_mode, op0, op1)),
21980 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21981 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21982 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21983 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21984 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21985 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21986 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21987 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21988 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21989 else if (GET_CODE (op1) == UNSPEC
21990 && XINT (op1, 1) == UNSPEC_SP_TEST)
21991 {
21992 rtx op1b = XVECEXP (op1, 0, 0);
21993 comp_mode = CCEQmode;
21994 compare_result = gen_reg_rtx (CCEQmode);
21995 if (TARGET_64BIT)
21996 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21997 else
21998 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21999 }
22000 else
22001 emit_insn (gen_rtx_SET (compare_result,
22002 gen_rtx_COMPARE (comp_mode, op0, op1)));
22003 }
22004
22005 /* Some kinds of FP comparisons need an OR operation;
22006 under flag_finite_math_only we don't bother. */
22007 if (FLOAT_MODE_P (mode)
22008 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22009 && !flag_finite_math_only
22010 && (code == LE || code == GE
22011 || code == UNEQ || code == LTGT
22012 || code == UNGT || code == UNLT))
22013 {
22014 enum rtx_code or1, or2;
22015 rtx or1_rtx, or2_rtx, compare2_rtx;
22016 rtx or_result = gen_reg_rtx (CCEQmode);
22017
22018 switch (code)
22019 {
22020 case LE: or1 = LT; or2 = EQ; break;
22021 case GE: or1 = GT; or2 = EQ; break;
22022 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22023 case LTGT: or1 = LT; or2 = GT; break;
22024 case UNGT: or1 = UNORDERED; or2 = GT; break;
22025 case UNLT: or1 = UNORDERED; or2 = LT; break;
22026 default: gcc_unreachable ();
22027 }
22028 validate_condition_mode (or1, comp_mode);
22029 validate_condition_mode (or2, comp_mode);
22030 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22031 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22032 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22033 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22034 const_true_rtx);
22035 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22036
22037 compare_result = or_result;
22038 code = EQ;
22039 }
22040
22041 validate_condition_mode (code, GET_MODE (compare_result));
22042
22043 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22044 }
22045
22046 \f
22047 /* Return the diagnostic message string if the binary operation OP is
22048 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22049
22050 static const char*
22051 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22052 const_tree type1,
22053 const_tree type2)
22054 {
22055 machine_mode mode1 = TYPE_MODE (type1);
22056 machine_mode mode2 = TYPE_MODE (type2);
22057
22058 /* For complex modes, use the inner type. */
22059 if (COMPLEX_MODE_P (mode1))
22060 mode1 = GET_MODE_INNER (mode1);
22061
22062 if (COMPLEX_MODE_P (mode2))
22063 mode2 = GET_MODE_INNER (mode2);
22064
22065 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22066 double to intermix unless -mfloat128-convert. */
22067 if (mode1 == mode2)
22068 return NULL;
22069
22070 if (!TARGET_FLOAT128_CVT)
22071 {
22072 if ((mode1 == KFmode && mode2 == IFmode)
22073 || (mode1 == IFmode && mode2 == KFmode))
22074 return N_("__float128 and __ibm128 cannot be used in the same "
22075 "expression");
22076
22077 if (TARGET_IEEEQUAD
22078 && ((mode1 == IFmode && mode2 == TFmode)
22079 || (mode1 == TFmode && mode2 == IFmode)))
22080 return N_("__ibm128 and long double cannot be used in the same "
22081 "expression");
22082
22083 if (!TARGET_IEEEQUAD
22084 && ((mode1 == KFmode && mode2 == TFmode)
22085 || (mode1 == TFmode && mode2 == KFmode)))
22086 return N_("__float128 and long double cannot be used in the same "
22087 "expression");
22088 }
22089
22090 return NULL;
22091 }
22092
22093 \f
22094 /* Expand floating point conversion to/from __float128 and __ibm128. */
22095
22096 void
22097 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22098 {
22099 machine_mode dest_mode = GET_MODE (dest);
22100 machine_mode src_mode = GET_MODE (src);
22101 convert_optab cvt = unknown_optab;
22102 bool do_move = false;
22103 rtx libfunc = NULL_RTX;
22104 rtx dest2;
22105 typedef rtx (*rtx_2func_t) (rtx, rtx);
22106 rtx_2func_t hw_convert = (rtx_2func_t)0;
22107 size_t kf_or_tf;
22108
22109 struct hw_conv_t {
22110 rtx_2func_t from_df;
22111 rtx_2func_t from_sf;
22112 rtx_2func_t from_si_sign;
22113 rtx_2func_t from_si_uns;
22114 rtx_2func_t from_di_sign;
22115 rtx_2func_t from_di_uns;
22116 rtx_2func_t to_df;
22117 rtx_2func_t to_sf;
22118 rtx_2func_t to_si_sign;
22119 rtx_2func_t to_si_uns;
22120 rtx_2func_t to_di_sign;
22121 rtx_2func_t to_di_uns;
22122 } hw_conversions[2] = {
22123 /* convertions to/from KFmode */
22124 {
22125 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22126 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22127 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22128 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22129 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22130 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22131 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22132 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22133 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22134 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22135 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22136 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22137 },
22138
22139 /* convertions to/from TFmode */
22140 {
22141 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22142 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22143 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22144 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22145 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22146 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22147 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22148 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22149 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22150 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22151 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22152 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22153 },
22154 };
22155
22156 if (dest_mode == src_mode)
22157 gcc_unreachable ();
22158
22159 /* Eliminate memory operations. */
22160 if (MEM_P (src))
22161 src = force_reg (src_mode, src);
22162
22163 if (MEM_P (dest))
22164 {
22165 rtx tmp = gen_reg_rtx (dest_mode);
22166 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22167 rs6000_emit_move (dest, tmp, dest_mode);
22168 return;
22169 }
22170
22171 /* Convert to IEEE 128-bit floating point. */
22172 if (FLOAT128_IEEE_P (dest_mode))
22173 {
22174 if (dest_mode == KFmode)
22175 kf_or_tf = 0;
22176 else if (dest_mode == TFmode)
22177 kf_or_tf = 1;
22178 else
22179 gcc_unreachable ();
22180
22181 switch (src_mode)
22182 {
22183 case E_DFmode:
22184 cvt = sext_optab;
22185 hw_convert = hw_conversions[kf_or_tf].from_df;
22186 break;
22187
22188 case E_SFmode:
22189 cvt = sext_optab;
22190 hw_convert = hw_conversions[kf_or_tf].from_sf;
22191 break;
22192
22193 case E_KFmode:
22194 case E_IFmode:
22195 case E_TFmode:
22196 if (FLOAT128_IBM_P (src_mode))
22197 cvt = sext_optab;
22198 else
22199 do_move = true;
22200 break;
22201
22202 case E_SImode:
22203 if (unsigned_p)
22204 {
22205 cvt = ufloat_optab;
22206 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22207 }
22208 else
22209 {
22210 cvt = sfloat_optab;
22211 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22212 }
22213 break;
22214
22215 case E_DImode:
22216 if (unsigned_p)
22217 {
22218 cvt = ufloat_optab;
22219 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22220 }
22221 else
22222 {
22223 cvt = sfloat_optab;
22224 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22225 }
22226 break;
22227
22228 default:
22229 gcc_unreachable ();
22230 }
22231 }
22232
22233 /* Convert from IEEE 128-bit floating point. */
22234 else if (FLOAT128_IEEE_P (src_mode))
22235 {
22236 if (src_mode == KFmode)
22237 kf_or_tf = 0;
22238 else if (src_mode == TFmode)
22239 kf_or_tf = 1;
22240 else
22241 gcc_unreachable ();
22242
22243 switch (dest_mode)
22244 {
22245 case E_DFmode:
22246 cvt = trunc_optab;
22247 hw_convert = hw_conversions[kf_or_tf].to_df;
22248 break;
22249
22250 case E_SFmode:
22251 cvt = trunc_optab;
22252 hw_convert = hw_conversions[kf_or_tf].to_sf;
22253 break;
22254
22255 case E_KFmode:
22256 case E_IFmode:
22257 case E_TFmode:
22258 if (FLOAT128_IBM_P (dest_mode))
22259 cvt = trunc_optab;
22260 else
22261 do_move = true;
22262 break;
22263
22264 case E_SImode:
22265 if (unsigned_p)
22266 {
22267 cvt = ufix_optab;
22268 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22269 }
22270 else
22271 {
22272 cvt = sfix_optab;
22273 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22274 }
22275 break;
22276
22277 case E_DImode:
22278 if (unsigned_p)
22279 {
22280 cvt = ufix_optab;
22281 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22282 }
22283 else
22284 {
22285 cvt = sfix_optab;
22286 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22287 }
22288 break;
22289
22290 default:
22291 gcc_unreachable ();
22292 }
22293 }
22294
22295 /* Both IBM format. */
22296 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22297 do_move = true;
22298
22299 else
22300 gcc_unreachable ();
22301
22302 /* Handle conversion between TFmode/KFmode/IFmode. */
22303 if (do_move)
22304 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22305
22306 /* Handle conversion if we have hardware support. */
22307 else if (TARGET_FLOAT128_HW && hw_convert)
22308 emit_insn ((hw_convert) (dest, src));
22309
22310 /* Call an external function to do the conversion. */
22311 else if (cvt != unknown_optab)
22312 {
22313 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22314 gcc_assert (libfunc != NULL_RTX);
22315
22316 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22317 src, src_mode);
22318
22319 gcc_assert (dest2 != NULL_RTX);
22320 if (!rtx_equal_p (dest, dest2))
22321 emit_move_insn (dest, dest2);
22322 }
22323
22324 else
22325 gcc_unreachable ();
22326
22327 return;
22328 }
22329
22330 \f
22331 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22332 can be used as that dest register. Return the dest register. */
22333
22334 rtx
22335 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22336 {
22337 if (op2 == const0_rtx)
22338 return op1;
22339
22340 if (GET_CODE (scratch) == SCRATCH)
22341 scratch = gen_reg_rtx (mode);
22342
22343 if (logical_operand (op2, mode))
22344 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22345 else
22346 emit_insn (gen_rtx_SET (scratch,
22347 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22348
22349 return scratch;
22350 }
22351
22352 void
22353 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22354 {
22355 rtx condition_rtx;
22356 machine_mode op_mode;
22357 enum rtx_code cond_code;
22358 rtx result = operands[0];
22359
22360 condition_rtx = rs6000_generate_compare (operands[1], mode);
22361 cond_code = GET_CODE (condition_rtx);
22362
22363 if (cond_code == NE
22364 || cond_code == GE || cond_code == LE
22365 || cond_code == GEU || cond_code == LEU
22366 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22367 {
22368 rtx not_result = gen_reg_rtx (CCEQmode);
22369 rtx not_op, rev_cond_rtx;
22370 machine_mode cc_mode;
22371
22372 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22373
22374 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22375 SImode, XEXP (condition_rtx, 0), const0_rtx);
22376 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22377 emit_insn (gen_rtx_SET (not_result, not_op));
22378 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22379 }
22380
22381 op_mode = GET_MODE (XEXP (operands[1], 0));
22382 if (op_mode == VOIDmode)
22383 op_mode = GET_MODE (XEXP (operands[1], 1));
22384
22385 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22386 {
22387 PUT_MODE (condition_rtx, DImode);
22388 convert_move (result, condition_rtx, 0);
22389 }
22390 else
22391 {
22392 PUT_MODE (condition_rtx, SImode);
22393 emit_insn (gen_rtx_SET (result, condition_rtx));
22394 }
22395 }
22396
22397 /* Emit a branch of kind CODE to location LOC. */
22398
22399 void
22400 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22401 {
22402 rtx condition_rtx, loc_ref;
22403
22404 condition_rtx = rs6000_generate_compare (operands[0], mode);
22405 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22406 emit_jump_insn (gen_rtx_SET (pc_rtx,
22407 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22408 loc_ref, pc_rtx)));
22409 }
22410
22411 /* Return the string to output a conditional branch to LABEL, which is
22412 the operand template of the label, or NULL if the branch is really a
22413 conditional return.
22414
22415 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22416 condition code register and its mode specifies what kind of
22417 comparison we made.
22418
22419 REVERSED is nonzero if we should reverse the sense of the comparison.
22420
22421 INSN is the insn. */
22422
22423 char *
22424 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22425 {
22426 static char string[64];
22427 enum rtx_code code = GET_CODE (op);
22428 rtx cc_reg = XEXP (op, 0);
22429 machine_mode mode = GET_MODE (cc_reg);
22430 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22431 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22432 int really_reversed = reversed ^ need_longbranch;
22433 char *s = string;
22434 const char *ccode;
22435 const char *pred;
22436 rtx note;
22437
22438 validate_condition_mode (code, mode);
22439
22440 /* Work out which way this really branches. We could use
22441 reverse_condition_maybe_unordered here always but this
22442 makes the resulting assembler clearer. */
22443 if (really_reversed)
22444 {
22445 /* Reversal of FP compares takes care -- an ordered compare
22446 becomes an unordered compare and vice versa. */
22447 if (mode == CCFPmode)
22448 code = reverse_condition_maybe_unordered (code);
22449 else
22450 code = reverse_condition (code);
22451 }
22452
22453 switch (code)
22454 {
22455 /* Not all of these are actually distinct opcodes, but
22456 we distinguish them for clarity of the resulting assembler. */
22457 case NE: case LTGT:
22458 ccode = "ne"; break;
22459 case EQ: case UNEQ:
22460 ccode = "eq"; break;
22461 case GE: case GEU:
22462 ccode = "ge"; break;
22463 case GT: case GTU: case UNGT:
22464 ccode = "gt"; break;
22465 case LE: case LEU:
22466 ccode = "le"; break;
22467 case LT: case LTU: case UNLT:
22468 ccode = "lt"; break;
22469 case UNORDERED: ccode = "un"; break;
22470 case ORDERED: ccode = "nu"; break;
22471 case UNGE: ccode = "nl"; break;
22472 case UNLE: ccode = "ng"; break;
22473 default:
22474 gcc_unreachable ();
22475 }
22476
22477 /* Maybe we have a guess as to how likely the branch is. */
22478 pred = "";
22479 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22480 if (note != NULL_RTX)
22481 {
22482 /* PROB is the difference from 50%. */
22483 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22484 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22485
22486 /* Only hint for highly probable/improbable branches on newer cpus when
22487 we have real profile data, as static prediction overrides processor
22488 dynamic prediction. For older cpus we may as well always hint, but
22489 assume not taken for branches that are very close to 50% as a
22490 mispredicted taken branch is more expensive than a
22491 mispredicted not-taken branch. */
22492 if (rs6000_always_hint
22493 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22494 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22495 && br_prob_note_reliable_p (note)))
22496 {
22497 if (abs (prob) > REG_BR_PROB_BASE / 20
22498 && ((prob > 0) ^ need_longbranch))
22499 pred = "+";
22500 else
22501 pred = "-";
22502 }
22503 }
22504
22505 if (label == NULL)
22506 s += sprintf (s, "b%slr%s ", ccode, pred);
22507 else
22508 s += sprintf (s, "b%s%s ", ccode, pred);
22509
22510 /* We need to escape any '%' characters in the reg_names string.
22511 Assume they'd only be the first character.... */
22512 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22513 *s++ = '%';
22514 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22515
22516 if (label != NULL)
22517 {
22518 /* If the branch distance was too far, we may have to use an
22519 unconditional branch to go the distance. */
22520 if (need_longbranch)
22521 s += sprintf (s, ",$+8\n\tb %s", label);
22522 else
22523 s += sprintf (s, ",%s", label);
22524 }
22525
22526 return string;
22527 }
22528
22529 /* Return insn for VSX or Altivec comparisons. */
22530
22531 static rtx
22532 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22533 {
22534 rtx mask;
22535 machine_mode mode = GET_MODE (op0);
22536
22537 switch (code)
22538 {
22539 default:
22540 break;
22541
22542 case GE:
22543 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22544 return NULL_RTX;
22545 /* FALLTHRU */
22546
22547 case EQ:
22548 case GT:
22549 case GTU:
22550 case ORDERED:
22551 case UNORDERED:
22552 case UNEQ:
22553 case LTGT:
22554 mask = gen_reg_rtx (mode);
22555 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22556 return mask;
22557 }
22558
22559 return NULL_RTX;
22560 }
22561
22562 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22563 DMODE is expected destination mode. This is a recursive function. */
22564
22565 static rtx
22566 rs6000_emit_vector_compare (enum rtx_code rcode,
22567 rtx op0, rtx op1,
22568 machine_mode dmode)
22569 {
22570 rtx mask;
22571 bool swap_operands = false;
22572 bool try_again = false;
22573
22574 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22575 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22576
22577 /* See if the comparison works as is. */
22578 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22579 if (mask)
22580 return mask;
22581
22582 switch (rcode)
22583 {
22584 case LT:
22585 rcode = GT;
22586 swap_operands = true;
22587 try_again = true;
22588 break;
22589 case LTU:
22590 rcode = GTU;
22591 swap_operands = true;
22592 try_again = true;
22593 break;
22594 case NE:
22595 case UNLE:
22596 case UNLT:
22597 case UNGE:
22598 case UNGT:
22599 /* Invert condition and try again.
22600 e.g., A != B becomes ~(A==B). */
22601 {
22602 enum rtx_code rev_code;
22603 enum insn_code nor_code;
22604 rtx mask2;
22605
22606 rev_code = reverse_condition_maybe_unordered (rcode);
22607 if (rev_code == UNKNOWN)
22608 return NULL_RTX;
22609
22610 nor_code = optab_handler (one_cmpl_optab, dmode);
22611 if (nor_code == CODE_FOR_nothing)
22612 return NULL_RTX;
22613
22614 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22615 if (!mask2)
22616 return NULL_RTX;
22617
22618 mask = gen_reg_rtx (dmode);
22619 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22620 return mask;
22621 }
22622 break;
22623 case GE:
22624 case GEU:
22625 case LE:
22626 case LEU:
22627 /* Try GT/GTU/LT/LTU OR EQ */
22628 {
22629 rtx c_rtx, eq_rtx;
22630 enum insn_code ior_code;
22631 enum rtx_code new_code;
22632
22633 switch (rcode)
22634 {
22635 case GE:
22636 new_code = GT;
22637 break;
22638
22639 case GEU:
22640 new_code = GTU;
22641 break;
22642
22643 case LE:
22644 new_code = LT;
22645 break;
22646
22647 case LEU:
22648 new_code = LTU;
22649 break;
22650
22651 default:
22652 gcc_unreachable ();
22653 }
22654
22655 ior_code = optab_handler (ior_optab, dmode);
22656 if (ior_code == CODE_FOR_nothing)
22657 return NULL_RTX;
22658
22659 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22660 if (!c_rtx)
22661 return NULL_RTX;
22662
22663 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22664 if (!eq_rtx)
22665 return NULL_RTX;
22666
22667 mask = gen_reg_rtx (dmode);
22668 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22669 return mask;
22670 }
22671 break;
22672 default:
22673 return NULL_RTX;
22674 }
22675
22676 if (try_again)
22677 {
22678 if (swap_operands)
22679 std::swap (op0, op1);
22680
22681 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22682 if (mask)
22683 return mask;
22684 }
22685
22686 /* You only get two chances. */
22687 return NULL_RTX;
22688 }
22689
22690 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22691 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22692 operands for the relation operation COND. */
22693
22694 int
22695 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22696 rtx cond, rtx cc_op0, rtx cc_op1)
22697 {
22698 machine_mode dest_mode = GET_MODE (dest);
22699 machine_mode mask_mode = GET_MODE (cc_op0);
22700 enum rtx_code rcode = GET_CODE (cond);
22701 machine_mode cc_mode = CCmode;
22702 rtx mask;
22703 rtx cond2;
22704 bool invert_move = false;
22705
22706 if (VECTOR_UNIT_NONE_P (dest_mode))
22707 return 0;
22708
22709 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22710 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22711
22712 switch (rcode)
22713 {
22714 /* Swap operands if we can, and fall back to doing the operation as
22715 specified, and doing a NOR to invert the test. */
22716 case NE:
22717 case UNLE:
22718 case UNLT:
22719 case UNGE:
22720 case UNGT:
22721 /* Invert condition and try again.
22722 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22723 invert_move = true;
22724 rcode = reverse_condition_maybe_unordered (rcode);
22725 if (rcode == UNKNOWN)
22726 return 0;
22727 break;
22728
22729 case GE:
22730 case LE:
22731 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22732 {
22733 /* Invert condition to avoid compound test. */
22734 invert_move = true;
22735 rcode = reverse_condition (rcode);
22736 }
22737 break;
22738
22739 case GTU:
22740 case GEU:
22741 case LTU:
22742 case LEU:
22743 /* Mark unsigned tests with CCUNSmode. */
22744 cc_mode = CCUNSmode;
22745
22746 /* Invert condition to avoid compound test if necessary. */
22747 if (rcode == GEU || rcode == LEU)
22748 {
22749 invert_move = true;
22750 rcode = reverse_condition (rcode);
22751 }
22752 break;
22753
22754 default:
22755 break;
22756 }
22757
22758 /* Get the vector mask for the given relational operations. */
22759 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22760
22761 if (!mask)
22762 return 0;
22763
22764 if (invert_move)
22765 std::swap (op_true, op_false);
22766
22767 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22768 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22769 && (GET_CODE (op_true) == CONST_VECTOR
22770 || GET_CODE (op_false) == CONST_VECTOR))
22771 {
22772 rtx constant_0 = CONST0_RTX (dest_mode);
22773 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22774
22775 if (op_true == constant_m1 && op_false == constant_0)
22776 {
22777 emit_move_insn (dest, mask);
22778 return 1;
22779 }
22780
22781 else if (op_true == constant_0 && op_false == constant_m1)
22782 {
22783 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22784 return 1;
22785 }
22786
22787 /* If we can't use the vector comparison directly, perhaps we can use
22788 the mask for the true or false fields, instead of loading up a
22789 constant. */
22790 if (op_true == constant_m1)
22791 op_true = mask;
22792
22793 if (op_false == constant_0)
22794 op_false = mask;
22795 }
22796
22797 if (!REG_P (op_true) && !SUBREG_P (op_true))
22798 op_true = force_reg (dest_mode, op_true);
22799
22800 if (!REG_P (op_false) && !SUBREG_P (op_false))
22801 op_false = force_reg (dest_mode, op_false);
22802
22803 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22804 CONST0_RTX (dest_mode));
22805 emit_insn (gen_rtx_SET (dest,
22806 gen_rtx_IF_THEN_ELSE (dest_mode,
22807 cond2,
22808 op_true,
22809 op_false)));
22810 return 1;
22811 }
22812
22813 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22814 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22815 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22816 hardware has no such operation. */
22817
22818 static int
22819 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22820 {
22821 enum rtx_code code = GET_CODE (op);
22822 rtx op0 = XEXP (op, 0);
22823 rtx op1 = XEXP (op, 1);
22824 machine_mode compare_mode = GET_MODE (op0);
22825 machine_mode result_mode = GET_MODE (dest);
22826 bool max_p = false;
22827
22828 if (result_mode != compare_mode)
22829 return 0;
22830
22831 if (code == GE || code == GT)
22832 max_p = true;
22833 else if (code == LE || code == LT)
22834 max_p = false;
22835 else
22836 return 0;
22837
22838 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22839 ;
22840
22841 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22842 max_p = !max_p;
22843
22844 else
22845 return 0;
22846
22847 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22848 return 1;
22849 }
22850
22851 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22852 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22853 operands of the last comparison is nonzero/true, FALSE_COND if it is
22854 zero/false. Return 0 if the hardware has no such operation. */
22855
22856 static int
22857 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22858 {
22859 enum rtx_code code = GET_CODE (op);
22860 rtx op0 = XEXP (op, 0);
22861 rtx op1 = XEXP (op, 1);
22862 machine_mode result_mode = GET_MODE (dest);
22863 rtx compare_rtx;
22864 rtx cmove_rtx;
22865 rtx clobber_rtx;
22866
22867 if (!can_create_pseudo_p ())
22868 return 0;
22869
22870 switch (code)
22871 {
22872 case EQ:
22873 case GE:
22874 case GT:
22875 break;
22876
22877 case NE:
22878 case LT:
22879 case LE:
22880 code = swap_condition (code);
22881 std::swap (op0, op1);
22882 break;
22883
22884 default:
22885 return 0;
22886 }
22887
22888 /* Generate: [(parallel [(set (dest)
22889 (if_then_else (op (cmp1) (cmp2))
22890 (true)
22891 (false)))
22892 (clobber (scratch))])]. */
22893
22894 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22895 cmove_rtx = gen_rtx_SET (dest,
22896 gen_rtx_IF_THEN_ELSE (result_mode,
22897 compare_rtx,
22898 true_cond,
22899 false_cond));
22900
22901 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22902 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22903 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22904
22905 return 1;
22906 }
22907
22908 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22909 operands of the last comparison is nonzero/true, FALSE_COND if it
22910 is zero/false. Return 0 if the hardware has no such operation. */
22911
22912 int
22913 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22914 {
22915 enum rtx_code code = GET_CODE (op);
22916 rtx op0 = XEXP (op, 0);
22917 rtx op1 = XEXP (op, 1);
22918 machine_mode compare_mode = GET_MODE (op0);
22919 machine_mode result_mode = GET_MODE (dest);
22920 rtx temp;
22921 bool is_against_zero;
22922
22923 /* These modes should always match. */
22924 if (GET_MODE (op1) != compare_mode
22925 /* In the isel case however, we can use a compare immediate, so
22926 op1 may be a small constant. */
22927 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22928 return 0;
22929 if (GET_MODE (true_cond) != result_mode)
22930 return 0;
22931 if (GET_MODE (false_cond) != result_mode)
22932 return 0;
22933
22934 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22935 if (TARGET_P9_MINMAX
22936 && (compare_mode == SFmode || compare_mode == DFmode)
22937 && (result_mode == SFmode || result_mode == DFmode))
22938 {
22939 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22940 return 1;
22941
22942 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22943 return 1;
22944 }
22945
22946 /* Don't allow using floating point comparisons for integer results for
22947 now. */
22948 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22949 return 0;
22950
22951 /* First, work out if the hardware can do this at all, or
22952 if it's too slow.... */
22953 if (!FLOAT_MODE_P (compare_mode))
22954 {
22955 if (TARGET_ISEL)
22956 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22957 return 0;
22958 }
22959
22960 is_against_zero = op1 == CONST0_RTX (compare_mode);
22961
22962 /* A floating-point subtract might overflow, underflow, or produce
22963 an inexact result, thus changing the floating-point flags, so it
22964 can't be generated if we care about that. It's safe if one side
22965 of the construct is zero, since then no subtract will be
22966 generated. */
22967 if (SCALAR_FLOAT_MODE_P (compare_mode)
22968 && flag_trapping_math && ! is_against_zero)
22969 return 0;
22970
22971 /* Eliminate half of the comparisons by switching operands, this
22972 makes the remaining code simpler. */
22973 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22974 || code == LTGT || code == LT || code == UNLE)
22975 {
22976 code = reverse_condition_maybe_unordered (code);
22977 temp = true_cond;
22978 true_cond = false_cond;
22979 false_cond = temp;
22980 }
22981
22982 /* UNEQ and LTGT take four instructions for a comparison with zero,
22983 it'll probably be faster to use a branch here too. */
22984 if (code == UNEQ && HONOR_NANS (compare_mode))
22985 return 0;
22986
22987 /* We're going to try to implement comparisons by performing
22988 a subtract, then comparing against zero. Unfortunately,
22989 Inf - Inf is NaN which is not zero, and so if we don't
22990 know that the operand is finite and the comparison
22991 would treat EQ different to UNORDERED, we can't do it. */
22992 if (HONOR_INFINITIES (compare_mode)
22993 && code != GT && code != UNGE
22994 && (GET_CODE (op1) != CONST_DOUBLE
22995 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22996 /* Constructs of the form (a OP b ? a : b) are safe. */
22997 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22998 || (! rtx_equal_p (op0, true_cond)
22999 && ! rtx_equal_p (op1, true_cond))))
23000 return 0;
23001
23002 /* At this point we know we can use fsel. */
23003
23004 /* Reduce the comparison to a comparison against zero. */
23005 if (! is_against_zero)
23006 {
23007 temp = gen_reg_rtx (compare_mode);
23008 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23009 op0 = temp;
23010 op1 = CONST0_RTX (compare_mode);
23011 }
23012
23013 /* If we don't care about NaNs we can reduce some of the comparisons
23014 down to faster ones. */
23015 if (! HONOR_NANS (compare_mode))
23016 switch (code)
23017 {
23018 case GT:
23019 code = LE;
23020 temp = true_cond;
23021 true_cond = false_cond;
23022 false_cond = temp;
23023 break;
23024 case UNGE:
23025 code = GE;
23026 break;
23027 case UNEQ:
23028 code = EQ;
23029 break;
23030 default:
23031 break;
23032 }
23033
23034 /* Now, reduce everything down to a GE. */
23035 switch (code)
23036 {
23037 case GE:
23038 break;
23039
23040 case LE:
23041 temp = gen_reg_rtx (compare_mode);
23042 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23043 op0 = temp;
23044 break;
23045
23046 case ORDERED:
23047 temp = gen_reg_rtx (compare_mode);
23048 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23049 op0 = temp;
23050 break;
23051
23052 case EQ:
23053 temp = gen_reg_rtx (compare_mode);
23054 emit_insn (gen_rtx_SET (temp,
23055 gen_rtx_NEG (compare_mode,
23056 gen_rtx_ABS (compare_mode, op0))));
23057 op0 = temp;
23058 break;
23059
23060 case UNGE:
23061 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23062 temp = gen_reg_rtx (result_mode);
23063 emit_insn (gen_rtx_SET (temp,
23064 gen_rtx_IF_THEN_ELSE (result_mode,
23065 gen_rtx_GE (VOIDmode,
23066 op0, op1),
23067 true_cond, false_cond)));
23068 false_cond = true_cond;
23069 true_cond = temp;
23070
23071 temp = gen_reg_rtx (compare_mode);
23072 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23073 op0 = temp;
23074 break;
23075
23076 case GT:
23077 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23078 temp = gen_reg_rtx (result_mode);
23079 emit_insn (gen_rtx_SET (temp,
23080 gen_rtx_IF_THEN_ELSE (result_mode,
23081 gen_rtx_GE (VOIDmode,
23082 op0, op1),
23083 true_cond, false_cond)));
23084 true_cond = false_cond;
23085 false_cond = temp;
23086
23087 temp = gen_reg_rtx (compare_mode);
23088 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23089 op0 = temp;
23090 break;
23091
23092 default:
23093 gcc_unreachable ();
23094 }
23095
23096 emit_insn (gen_rtx_SET (dest,
23097 gen_rtx_IF_THEN_ELSE (result_mode,
23098 gen_rtx_GE (VOIDmode,
23099 op0, op1),
23100 true_cond, false_cond)));
23101 return 1;
23102 }
23103
23104 /* Same as above, but for ints (isel). */
23105
23106 int
23107 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23108 {
23109 rtx condition_rtx, cr;
23110 machine_mode mode = GET_MODE (dest);
23111 enum rtx_code cond_code;
23112 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23113 bool signedp;
23114
23115 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23116 return 0;
23117
23118 /* We still have to do the compare, because isel doesn't do a
23119 compare, it just looks at the CRx bits set by a previous compare
23120 instruction. */
23121 condition_rtx = rs6000_generate_compare (op, mode);
23122 cond_code = GET_CODE (condition_rtx);
23123 cr = XEXP (condition_rtx, 0);
23124 signedp = GET_MODE (cr) == CCmode;
23125
23126 isel_func = (mode == SImode
23127 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23128 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23129
23130 switch (cond_code)
23131 {
23132 case LT: case GT: case LTU: case GTU: case EQ:
23133 /* isel handles these directly. */
23134 break;
23135
23136 default:
23137 /* We need to swap the sense of the comparison. */
23138 {
23139 std::swap (false_cond, true_cond);
23140 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23141 }
23142 break;
23143 }
23144
23145 false_cond = force_reg (mode, false_cond);
23146 if (true_cond != const0_rtx)
23147 true_cond = force_reg (mode, true_cond);
23148
23149 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23150
23151 return 1;
23152 }
23153
23154 void
23155 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23156 {
23157 machine_mode mode = GET_MODE (op0);
23158 enum rtx_code c;
23159 rtx target;
23160
23161 /* VSX/altivec have direct min/max insns. */
23162 if ((code == SMAX || code == SMIN)
23163 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23164 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23165 {
23166 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23167 return;
23168 }
23169
23170 if (code == SMAX || code == SMIN)
23171 c = GE;
23172 else
23173 c = GEU;
23174
23175 if (code == SMAX || code == UMAX)
23176 target = emit_conditional_move (dest, c, op0, op1, mode,
23177 op0, op1, mode, 0);
23178 else
23179 target = emit_conditional_move (dest, c, op0, op1, mode,
23180 op1, op0, mode, 0);
23181 gcc_assert (target);
23182 if (target != dest)
23183 emit_move_insn (dest, target);
23184 }
23185
23186 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23187 COND is true. Mark the jump as unlikely to be taken. */
23188
23189 static void
23190 emit_unlikely_jump (rtx cond, rtx label)
23191 {
23192 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23193 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23194 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23195 }
23196
23197 /* A subroutine of the atomic operation splitters. Emit a load-locked
23198 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23199 the zero_extend operation. */
23200
23201 static void
23202 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23203 {
23204 rtx (*fn) (rtx, rtx) = NULL;
23205
23206 switch (mode)
23207 {
23208 case E_QImode:
23209 fn = gen_load_lockedqi;
23210 break;
23211 case E_HImode:
23212 fn = gen_load_lockedhi;
23213 break;
23214 case E_SImode:
23215 if (GET_MODE (mem) == QImode)
23216 fn = gen_load_lockedqi_si;
23217 else if (GET_MODE (mem) == HImode)
23218 fn = gen_load_lockedhi_si;
23219 else
23220 fn = gen_load_lockedsi;
23221 break;
23222 case E_DImode:
23223 fn = gen_load_lockeddi;
23224 break;
23225 case E_TImode:
23226 fn = gen_load_lockedti;
23227 break;
23228 default:
23229 gcc_unreachable ();
23230 }
23231 emit_insn (fn (reg, mem));
23232 }
23233
23234 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23235 instruction in MODE. */
23236
23237 static void
23238 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23239 {
23240 rtx (*fn) (rtx, rtx, rtx) = NULL;
23241
23242 switch (mode)
23243 {
23244 case E_QImode:
23245 fn = gen_store_conditionalqi;
23246 break;
23247 case E_HImode:
23248 fn = gen_store_conditionalhi;
23249 break;
23250 case E_SImode:
23251 fn = gen_store_conditionalsi;
23252 break;
23253 case E_DImode:
23254 fn = gen_store_conditionaldi;
23255 break;
23256 case E_TImode:
23257 fn = gen_store_conditionalti;
23258 break;
23259 default:
23260 gcc_unreachable ();
23261 }
23262
23263 /* Emit sync before stwcx. to address PPC405 Erratum. */
23264 if (PPC405_ERRATUM77)
23265 emit_insn (gen_hwsync ());
23266
23267 emit_insn (fn (res, mem, val));
23268 }
23269
23270 /* Expand barriers before and after a load_locked/store_cond sequence. */
23271
23272 static rtx
23273 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23274 {
23275 rtx addr = XEXP (mem, 0);
23276
23277 if (!legitimate_indirect_address_p (addr, reload_completed)
23278 && !legitimate_indexed_address_p (addr, reload_completed))
23279 {
23280 addr = force_reg (Pmode, addr);
23281 mem = replace_equiv_address_nv (mem, addr);
23282 }
23283
23284 switch (model)
23285 {
23286 case MEMMODEL_RELAXED:
23287 case MEMMODEL_CONSUME:
23288 case MEMMODEL_ACQUIRE:
23289 break;
23290 case MEMMODEL_RELEASE:
23291 case MEMMODEL_ACQ_REL:
23292 emit_insn (gen_lwsync ());
23293 break;
23294 case MEMMODEL_SEQ_CST:
23295 emit_insn (gen_hwsync ());
23296 break;
23297 default:
23298 gcc_unreachable ();
23299 }
23300 return mem;
23301 }
23302
23303 static void
23304 rs6000_post_atomic_barrier (enum memmodel model)
23305 {
23306 switch (model)
23307 {
23308 case MEMMODEL_RELAXED:
23309 case MEMMODEL_CONSUME:
23310 case MEMMODEL_RELEASE:
23311 break;
23312 case MEMMODEL_ACQUIRE:
23313 case MEMMODEL_ACQ_REL:
23314 case MEMMODEL_SEQ_CST:
23315 emit_insn (gen_isync ());
23316 break;
23317 default:
23318 gcc_unreachable ();
23319 }
23320 }
23321
23322 /* A subroutine of the various atomic expanders. For sub-word operations,
23323 we must adjust things to operate on SImode. Given the original MEM,
23324 return a new aligned memory. Also build and return the quantities by
23325 which to shift and mask. */
23326
23327 static rtx
23328 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23329 {
23330 rtx addr, align, shift, mask, mem;
23331 HOST_WIDE_INT shift_mask;
23332 machine_mode mode = GET_MODE (orig_mem);
23333
23334 /* For smaller modes, we have to implement this via SImode. */
23335 shift_mask = (mode == QImode ? 0x18 : 0x10);
23336
23337 addr = XEXP (orig_mem, 0);
23338 addr = force_reg (GET_MODE (addr), addr);
23339
23340 /* Aligned memory containing subword. Generate a new memory. We
23341 do not want any of the existing MEM_ATTR data, as we're now
23342 accessing memory outside the original object. */
23343 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23344 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23345 mem = gen_rtx_MEM (SImode, align);
23346 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23347 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23348 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23349
23350 /* Shift amount for subword relative to aligned word. */
23351 shift = gen_reg_rtx (SImode);
23352 addr = gen_lowpart (SImode, addr);
23353 rtx tmp = gen_reg_rtx (SImode);
23354 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23355 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23356 if (BYTES_BIG_ENDIAN)
23357 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23358 shift, 1, OPTAB_LIB_WIDEN);
23359 *pshift = shift;
23360
23361 /* Mask for insertion. */
23362 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23363 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23364 *pmask = mask;
23365
23366 return mem;
23367 }
23368
23369 /* A subroutine of the various atomic expanders. For sub-word operands,
23370 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23371
23372 static rtx
23373 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23374 {
23375 rtx x;
23376
23377 x = gen_reg_rtx (SImode);
23378 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23379 gen_rtx_NOT (SImode, mask),
23380 oldval)));
23381
23382 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23383
23384 return x;
23385 }
23386
23387 /* A subroutine of the various atomic expanders. For sub-word operands,
23388 extract WIDE to NARROW via SHIFT. */
23389
23390 static void
23391 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23392 {
23393 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23394 wide, 1, OPTAB_LIB_WIDEN);
23395 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23396 }
23397
23398 /* Expand an atomic compare and swap operation. */
23399
23400 void
23401 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23402 {
23403 rtx boolval, retval, mem, oldval, newval, cond;
23404 rtx label1, label2, x, mask, shift;
23405 machine_mode mode, orig_mode;
23406 enum memmodel mod_s, mod_f;
23407 bool is_weak;
23408
23409 boolval = operands[0];
23410 retval = operands[1];
23411 mem = operands[2];
23412 oldval = operands[3];
23413 newval = operands[4];
23414 is_weak = (INTVAL (operands[5]) != 0);
23415 mod_s = memmodel_base (INTVAL (operands[6]));
23416 mod_f = memmodel_base (INTVAL (operands[7]));
23417 orig_mode = mode = GET_MODE (mem);
23418
23419 mask = shift = NULL_RTX;
23420 if (mode == QImode || mode == HImode)
23421 {
23422 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23423 lwarx and shift/mask operations. With power8, we need to do the
23424 comparison in SImode, but the store is still done in QI/HImode. */
23425 oldval = convert_modes (SImode, mode, oldval, 1);
23426
23427 if (!TARGET_SYNC_HI_QI)
23428 {
23429 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23430
23431 /* Shift and mask OLDVAL into position with the word. */
23432 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23433 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23434
23435 /* Shift and mask NEWVAL into position within the word. */
23436 newval = convert_modes (SImode, mode, newval, 1);
23437 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23438 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23439 }
23440
23441 /* Prepare to adjust the return value. */
23442 retval = gen_reg_rtx (SImode);
23443 mode = SImode;
23444 }
23445 else if (reg_overlap_mentioned_p (retval, oldval))
23446 oldval = copy_to_reg (oldval);
23447
23448 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23449 oldval = copy_to_mode_reg (mode, oldval);
23450
23451 if (reg_overlap_mentioned_p (retval, newval))
23452 newval = copy_to_reg (newval);
23453
23454 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23455
23456 label1 = NULL_RTX;
23457 if (!is_weak)
23458 {
23459 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23460 emit_label (XEXP (label1, 0));
23461 }
23462 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23463
23464 emit_load_locked (mode, retval, mem);
23465
23466 x = retval;
23467 if (mask)
23468 x = expand_simple_binop (SImode, AND, retval, mask,
23469 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23470
23471 cond = gen_reg_rtx (CCmode);
23472 /* If we have TImode, synthesize a comparison. */
23473 if (mode != TImode)
23474 x = gen_rtx_COMPARE (CCmode, x, oldval);
23475 else
23476 {
23477 rtx xor1_result = gen_reg_rtx (DImode);
23478 rtx xor2_result = gen_reg_rtx (DImode);
23479 rtx or_result = gen_reg_rtx (DImode);
23480 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23481 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23482 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23483 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23484
23485 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23486 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23487 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23488 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23489 }
23490
23491 emit_insn (gen_rtx_SET (cond, x));
23492
23493 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23494 emit_unlikely_jump (x, label2);
23495
23496 x = newval;
23497 if (mask)
23498 x = rs6000_mask_atomic_subword (retval, newval, mask);
23499
23500 emit_store_conditional (orig_mode, cond, mem, x);
23501
23502 if (!is_weak)
23503 {
23504 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23505 emit_unlikely_jump (x, label1);
23506 }
23507
23508 if (!is_mm_relaxed (mod_f))
23509 emit_label (XEXP (label2, 0));
23510
23511 rs6000_post_atomic_barrier (mod_s);
23512
23513 if (is_mm_relaxed (mod_f))
23514 emit_label (XEXP (label2, 0));
23515
23516 if (shift)
23517 rs6000_finish_atomic_subword (operands[1], retval, shift);
23518 else if (mode != GET_MODE (operands[1]))
23519 convert_move (operands[1], retval, 1);
23520
23521 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23522 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23523 emit_insn (gen_rtx_SET (boolval, x));
23524 }
23525
23526 /* Expand an atomic exchange operation. */
23527
23528 void
23529 rs6000_expand_atomic_exchange (rtx operands[])
23530 {
23531 rtx retval, mem, val, cond;
23532 machine_mode mode;
23533 enum memmodel model;
23534 rtx label, x, mask, shift;
23535
23536 retval = operands[0];
23537 mem = operands[1];
23538 val = operands[2];
23539 model = memmodel_base (INTVAL (operands[3]));
23540 mode = GET_MODE (mem);
23541
23542 mask = shift = NULL_RTX;
23543 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23544 {
23545 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23546
23547 /* Shift and mask VAL into position with the word. */
23548 val = convert_modes (SImode, mode, val, 1);
23549 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23550 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23551
23552 /* Prepare to adjust the return value. */
23553 retval = gen_reg_rtx (SImode);
23554 mode = SImode;
23555 }
23556
23557 mem = rs6000_pre_atomic_barrier (mem, model);
23558
23559 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23560 emit_label (XEXP (label, 0));
23561
23562 emit_load_locked (mode, retval, mem);
23563
23564 x = val;
23565 if (mask)
23566 x = rs6000_mask_atomic_subword (retval, val, mask);
23567
23568 cond = gen_reg_rtx (CCmode);
23569 emit_store_conditional (mode, cond, mem, x);
23570
23571 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23572 emit_unlikely_jump (x, label);
23573
23574 rs6000_post_atomic_barrier (model);
23575
23576 if (shift)
23577 rs6000_finish_atomic_subword (operands[0], retval, shift);
23578 }
23579
23580 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23581 to perform. MEM is the memory on which to operate. VAL is the second
23582 operand of the binary operator. BEFORE and AFTER are optional locations to
23583 return the value of MEM either before of after the operation. MODEL_RTX
23584 is a CONST_INT containing the memory model to use. */
23585
23586 void
23587 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23588 rtx orig_before, rtx orig_after, rtx model_rtx)
23589 {
23590 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23591 machine_mode mode = GET_MODE (mem);
23592 machine_mode store_mode = mode;
23593 rtx label, x, cond, mask, shift;
23594 rtx before = orig_before, after = orig_after;
23595
23596 mask = shift = NULL_RTX;
23597 /* On power8, we want to use SImode for the operation. On previous systems,
23598 use the operation in a subword and shift/mask to get the proper byte or
23599 halfword. */
23600 if (mode == QImode || mode == HImode)
23601 {
23602 if (TARGET_SYNC_HI_QI)
23603 {
23604 val = convert_modes (SImode, mode, val, 1);
23605
23606 /* Prepare to adjust the return value. */
23607 before = gen_reg_rtx (SImode);
23608 if (after)
23609 after = gen_reg_rtx (SImode);
23610 mode = SImode;
23611 }
23612 else
23613 {
23614 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23615
23616 /* Shift and mask VAL into position with the word. */
23617 val = convert_modes (SImode, mode, val, 1);
23618 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23619 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23620
23621 switch (code)
23622 {
23623 case IOR:
23624 case XOR:
23625 /* We've already zero-extended VAL. That is sufficient to
23626 make certain that it does not affect other bits. */
23627 mask = NULL;
23628 break;
23629
23630 case AND:
23631 /* If we make certain that all of the other bits in VAL are
23632 set, that will be sufficient to not affect other bits. */
23633 x = gen_rtx_NOT (SImode, mask);
23634 x = gen_rtx_IOR (SImode, x, val);
23635 emit_insn (gen_rtx_SET (val, x));
23636 mask = NULL;
23637 break;
23638
23639 case NOT:
23640 case PLUS:
23641 case MINUS:
23642 /* These will all affect bits outside the field and need
23643 adjustment via MASK within the loop. */
23644 break;
23645
23646 default:
23647 gcc_unreachable ();
23648 }
23649
23650 /* Prepare to adjust the return value. */
23651 before = gen_reg_rtx (SImode);
23652 if (after)
23653 after = gen_reg_rtx (SImode);
23654 store_mode = mode = SImode;
23655 }
23656 }
23657
23658 mem = rs6000_pre_atomic_barrier (mem, model);
23659
23660 label = gen_label_rtx ();
23661 emit_label (label);
23662 label = gen_rtx_LABEL_REF (VOIDmode, label);
23663
23664 if (before == NULL_RTX)
23665 before = gen_reg_rtx (mode);
23666
23667 emit_load_locked (mode, before, mem);
23668
23669 if (code == NOT)
23670 {
23671 x = expand_simple_binop (mode, AND, before, val,
23672 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23673 after = expand_simple_unop (mode, NOT, x, after, 1);
23674 }
23675 else
23676 {
23677 after = expand_simple_binop (mode, code, before, val,
23678 after, 1, OPTAB_LIB_WIDEN);
23679 }
23680
23681 x = after;
23682 if (mask)
23683 {
23684 x = expand_simple_binop (SImode, AND, after, mask,
23685 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23686 x = rs6000_mask_atomic_subword (before, x, mask);
23687 }
23688 else if (store_mode != mode)
23689 x = convert_modes (store_mode, mode, x, 1);
23690
23691 cond = gen_reg_rtx (CCmode);
23692 emit_store_conditional (store_mode, cond, mem, x);
23693
23694 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23695 emit_unlikely_jump (x, label);
23696
23697 rs6000_post_atomic_barrier (model);
23698
23699 if (shift)
23700 {
23701 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23702 then do the calcuations in a SImode register. */
23703 if (orig_before)
23704 rs6000_finish_atomic_subword (orig_before, before, shift);
23705 if (orig_after)
23706 rs6000_finish_atomic_subword (orig_after, after, shift);
23707 }
23708 else if (store_mode != mode)
23709 {
23710 /* QImode/HImode on machines with lbarx/lharx where we do the native
23711 operation and then do the calcuations in a SImode register. */
23712 if (orig_before)
23713 convert_move (orig_before, before, 1);
23714 if (orig_after)
23715 convert_move (orig_after, after, 1);
23716 }
23717 else if (orig_after && after != orig_after)
23718 emit_move_insn (orig_after, after);
23719 }
23720
23721 /* Emit instructions to move SRC to DST. Called by splitters for
23722 multi-register moves. It will emit at most one instruction for
23723 each register that is accessed; that is, it won't emit li/lis pairs
23724 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23725 register. */
23726
23727 void
23728 rs6000_split_multireg_move (rtx dst, rtx src)
23729 {
23730 /* The register number of the first register being moved. */
23731 int reg;
23732 /* The mode that is to be moved. */
23733 machine_mode mode;
23734 /* The mode that the move is being done in, and its size. */
23735 machine_mode reg_mode;
23736 int reg_mode_size;
23737 /* The number of registers that will be moved. */
23738 int nregs;
23739
23740 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23741 mode = GET_MODE (dst);
23742 nregs = hard_regno_nregs (reg, mode);
23743 if (FP_REGNO_P (reg))
23744 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23745 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23746 else if (ALTIVEC_REGNO_P (reg))
23747 reg_mode = V16QImode;
23748 else
23749 reg_mode = word_mode;
23750 reg_mode_size = GET_MODE_SIZE (reg_mode);
23751
23752 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23753
23754 /* TDmode residing in FP registers is special, since the ISA requires that
23755 the lower-numbered word of a register pair is always the most significant
23756 word, even in little-endian mode. This does not match the usual subreg
23757 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23758 the appropriate constituent registers "by hand" in little-endian mode.
23759
23760 Note we do not need to check for destructive overlap here since TDmode
23761 can only reside in even/odd register pairs. */
23762 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23763 {
23764 rtx p_src, p_dst;
23765 int i;
23766
23767 for (i = 0; i < nregs; i++)
23768 {
23769 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23770 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23771 else
23772 p_src = simplify_gen_subreg (reg_mode, src, mode,
23773 i * reg_mode_size);
23774
23775 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23776 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23777 else
23778 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23779 i * reg_mode_size);
23780
23781 emit_insn (gen_rtx_SET (p_dst, p_src));
23782 }
23783
23784 return;
23785 }
23786
23787 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23788 {
23789 /* Move register range backwards, if we might have destructive
23790 overlap. */
23791 int i;
23792 for (i = nregs - 1; i >= 0; i--)
23793 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23794 i * reg_mode_size),
23795 simplify_gen_subreg (reg_mode, src, mode,
23796 i * reg_mode_size)));
23797 }
23798 else
23799 {
23800 int i;
23801 int j = -1;
23802 bool used_update = false;
23803 rtx restore_basereg = NULL_RTX;
23804
23805 if (MEM_P (src) && INT_REGNO_P (reg))
23806 {
23807 rtx breg;
23808
23809 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23810 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23811 {
23812 rtx delta_rtx;
23813 breg = XEXP (XEXP (src, 0), 0);
23814 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23815 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23816 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23817 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23818 src = replace_equiv_address (src, breg);
23819 }
23820 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23821 {
23822 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23823 {
23824 rtx basereg = XEXP (XEXP (src, 0), 0);
23825 if (TARGET_UPDATE)
23826 {
23827 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23828 emit_insn (gen_rtx_SET (ndst,
23829 gen_rtx_MEM (reg_mode,
23830 XEXP (src, 0))));
23831 used_update = true;
23832 }
23833 else
23834 emit_insn (gen_rtx_SET (basereg,
23835 XEXP (XEXP (src, 0), 1)));
23836 src = replace_equiv_address (src, basereg);
23837 }
23838 else
23839 {
23840 rtx basereg = gen_rtx_REG (Pmode, reg);
23841 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23842 src = replace_equiv_address (src, basereg);
23843 }
23844 }
23845
23846 breg = XEXP (src, 0);
23847 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23848 breg = XEXP (breg, 0);
23849
23850 /* If the base register we are using to address memory is
23851 also a destination reg, then change that register last. */
23852 if (REG_P (breg)
23853 && REGNO (breg) >= REGNO (dst)
23854 && REGNO (breg) < REGNO (dst) + nregs)
23855 j = REGNO (breg) - REGNO (dst);
23856 }
23857 else if (MEM_P (dst) && INT_REGNO_P (reg))
23858 {
23859 rtx breg;
23860
23861 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23862 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23863 {
23864 rtx delta_rtx;
23865 breg = XEXP (XEXP (dst, 0), 0);
23866 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23867 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23868 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23869
23870 /* We have to update the breg before doing the store.
23871 Use store with update, if available. */
23872
23873 if (TARGET_UPDATE)
23874 {
23875 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23876 emit_insn (TARGET_32BIT
23877 ? (TARGET_POWERPC64
23878 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23879 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23880 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23881 used_update = true;
23882 }
23883 else
23884 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23885 dst = replace_equiv_address (dst, breg);
23886 }
23887 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23888 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23889 {
23890 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23891 {
23892 rtx basereg = XEXP (XEXP (dst, 0), 0);
23893 if (TARGET_UPDATE)
23894 {
23895 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23896 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23897 XEXP (dst, 0)),
23898 nsrc));
23899 used_update = true;
23900 }
23901 else
23902 emit_insn (gen_rtx_SET (basereg,
23903 XEXP (XEXP (dst, 0), 1)));
23904 dst = replace_equiv_address (dst, basereg);
23905 }
23906 else
23907 {
23908 rtx basereg = XEXP (XEXP (dst, 0), 0);
23909 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23910 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23911 && REG_P (basereg)
23912 && REG_P (offsetreg)
23913 && REGNO (basereg) != REGNO (offsetreg));
23914 if (REGNO (basereg) == 0)
23915 {
23916 rtx tmp = offsetreg;
23917 offsetreg = basereg;
23918 basereg = tmp;
23919 }
23920 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23921 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23922 dst = replace_equiv_address (dst, basereg);
23923 }
23924 }
23925 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23926 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23927 }
23928
23929 for (i = 0; i < nregs; i++)
23930 {
23931 /* Calculate index to next subword. */
23932 ++j;
23933 if (j == nregs)
23934 j = 0;
23935
23936 /* If compiler already emitted move of first word by
23937 store with update, no need to do anything. */
23938 if (j == 0 && used_update)
23939 continue;
23940
23941 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23942 j * reg_mode_size),
23943 simplify_gen_subreg (reg_mode, src, mode,
23944 j * reg_mode_size)));
23945 }
23946 if (restore_basereg != NULL_RTX)
23947 emit_insn (restore_basereg);
23948 }
23949 }
23950
23951 \f
23952 /* This page contains routines that are used to determine what the
23953 function prologue and epilogue code will do and write them out. */
23954
23955 /* Determine whether the REG is really used. */
23956
23957 static bool
23958 save_reg_p (int reg)
23959 {
23960 /* We need to mark the PIC offset register live for the same conditions
23961 as it is set up, or otherwise it won't be saved before we clobber it. */
23962
23963 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23964 {
23965 /* When calling eh_return, we must return true for all the cases
23966 where conditional_register_usage marks the PIC offset reg
23967 call used. */
23968 if (TARGET_TOC && TARGET_MINIMAL_TOC
23969 && (crtl->calls_eh_return
23970 || df_regs_ever_live_p (reg)
23971 || !constant_pool_empty_p ()))
23972 return true;
23973
23974 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23975 && flag_pic && crtl->uses_pic_offset_table)
23976 return true;
23977 }
23978
23979 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23980 }
23981
23982 /* Return the first fixed-point register that is required to be
23983 saved. 32 if none. */
23984
23985 int
23986 first_reg_to_save (void)
23987 {
23988 int first_reg;
23989
23990 /* Find lowest numbered live register. */
23991 for (first_reg = 13; first_reg <= 31; first_reg++)
23992 if (save_reg_p (first_reg))
23993 break;
23994
23995 return first_reg;
23996 }
23997
23998 /* Similar, for FP regs. */
23999
24000 int
24001 first_fp_reg_to_save (void)
24002 {
24003 int first_reg;
24004
24005 /* Find lowest numbered live register. */
24006 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24007 if (save_reg_p (first_reg))
24008 break;
24009
24010 return first_reg;
24011 }
24012
24013 /* Similar, for AltiVec regs. */
24014
24015 static int
24016 first_altivec_reg_to_save (void)
24017 {
24018 int i;
24019
24020 /* Stack frame remains as is unless we are in AltiVec ABI. */
24021 if (! TARGET_ALTIVEC_ABI)
24022 return LAST_ALTIVEC_REGNO + 1;
24023
24024 /* On Darwin, the unwind routines are compiled without
24025 TARGET_ALTIVEC, and use save_world to save/restore the
24026 altivec registers when necessary. */
24027 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24028 && ! TARGET_ALTIVEC)
24029 return FIRST_ALTIVEC_REGNO + 20;
24030
24031 /* Find lowest numbered live register. */
24032 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24033 if (save_reg_p (i))
24034 break;
24035
24036 return i;
24037 }
24038
24039 /* Return a 32-bit mask of the AltiVec registers we need to set in
24040 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24041 the 32-bit word is 0. */
24042
24043 static unsigned int
24044 compute_vrsave_mask (void)
24045 {
24046 unsigned int i, mask = 0;
24047
24048 /* On Darwin, the unwind routines are compiled without
24049 TARGET_ALTIVEC, and use save_world to save/restore the
24050 call-saved altivec registers when necessary. */
24051 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24052 && ! TARGET_ALTIVEC)
24053 mask |= 0xFFF;
24054
24055 /* First, find out if we use _any_ altivec registers. */
24056 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24057 if (df_regs_ever_live_p (i))
24058 mask |= ALTIVEC_REG_BIT (i);
24059
24060 if (mask == 0)
24061 return mask;
24062
24063 /* Next, remove the argument registers from the set. These must
24064 be in the VRSAVE mask set by the caller, so we don't need to add
24065 them in again. More importantly, the mask we compute here is
24066 used to generate CLOBBERs in the set_vrsave insn, and we do not
24067 wish the argument registers to die. */
24068 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24069 mask &= ~ALTIVEC_REG_BIT (i);
24070
24071 /* Similarly, remove the return value from the set. */
24072 {
24073 bool yes = false;
24074 diddle_return_value (is_altivec_return_reg, &yes);
24075 if (yes)
24076 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24077 }
24078
24079 return mask;
24080 }
24081
24082 /* For a very restricted set of circumstances, we can cut down the
24083 size of prologues/epilogues by calling our own save/restore-the-world
24084 routines. */
24085
24086 static void
24087 compute_save_world_info (rs6000_stack_t *info)
24088 {
24089 info->world_save_p = 1;
24090 info->world_save_p
24091 = (WORLD_SAVE_P (info)
24092 && DEFAULT_ABI == ABI_DARWIN
24093 && !cfun->has_nonlocal_label
24094 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24095 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24096 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24097 && info->cr_save_p);
24098
24099 /* This will not work in conjunction with sibcalls. Make sure there
24100 are none. (This check is expensive, but seldom executed.) */
24101 if (WORLD_SAVE_P (info))
24102 {
24103 rtx_insn *insn;
24104 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24105 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24106 {
24107 info->world_save_p = 0;
24108 break;
24109 }
24110 }
24111
24112 if (WORLD_SAVE_P (info))
24113 {
24114 /* Even if we're not touching VRsave, make sure there's room on the
24115 stack for it, if it looks like we're calling SAVE_WORLD, which
24116 will attempt to save it. */
24117 info->vrsave_size = 4;
24118
24119 /* If we are going to save the world, we need to save the link register too. */
24120 info->lr_save_p = 1;
24121
24122 /* "Save" the VRsave register too if we're saving the world. */
24123 if (info->vrsave_mask == 0)
24124 info->vrsave_mask = compute_vrsave_mask ();
24125
24126 /* Because the Darwin register save/restore routines only handle
24127 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24128 check. */
24129 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24130 && (info->first_altivec_reg_save
24131 >= FIRST_SAVED_ALTIVEC_REGNO));
24132 }
24133
24134 return;
24135 }
24136
24137
24138 static void
24139 is_altivec_return_reg (rtx reg, void *xyes)
24140 {
24141 bool *yes = (bool *) xyes;
24142 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24143 *yes = true;
24144 }
24145
24146 \f
24147 /* Return whether REG is a global user reg or has been specifed by
24148 -ffixed-REG. We should not restore these, and so cannot use
24149 lmw or out-of-line restore functions if there are any. We also
24150 can't save them (well, emit frame notes for them), because frame
24151 unwinding during exception handling will restore saved registers. */
24152
24153 static bool
24154 fixed_reg_p (int reg)
24155 {
24156 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24157 backend sets it, overriding anything the user might have given. */
24158 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24159 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24160 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24161 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24162 return false;
24163
24164 return fixed_regs[reg];
24165 }
24166
24167 /* Determine the strategy for savings/restoring registers. */
24168
24169 enum {
24170 SAVE_MULTIPLE = 0x1,
24171 SAVE_INLINE_GPRS = 0x2,
24172 SAVE_INLINE_FPRS = 0x4,
24173 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24174 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24175 SAVE_INLINE_VRS = 0x20,
24176 REST_MULTIPLE = 0x100,
24177 REST_INLINE_GPRS = 0x200,
24178 REST_INLINE_FPRS = 0x400,
24179 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24180 REST_INLINE_VRS = 0x1000
24181 };
24182
24183 static int
24184 rs6000_savres_strategy (rs6000_stack_t *info,
24185 bool using_static_chain_p)
24186 {
24187 int strategy = 0;
24188
24189 /* Select between in-line and out-of-line save and restore of regs.
24190 First, all the obvious cases where we don't use out-of-line. */
24191 if (crtl->calls_eh_return
24192 || cfun->machine->ra_need_lr)
24193 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24194 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24195 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24196
24197 if (info->first_gp_reg_save == 32)
24198 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24199
24200 if (info->first_fp_reg_save == 64)
24201 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24202
24203 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24204 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24205
24206 /* Define cutoff for using out-of-line functions to save registers. */
24207 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24208 {
24209 if (!optimize_size)
24210 {
24211 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24212 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24213 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24214 }
24215 else
24216 {
24217 /* Prefer out-of-line restore if it will exit. */
24218 if (info->first_fp_reg_save > 61)
24219 strategy |= SAVE_INLINE_FPRS;
24220 if (info->first_gp_reg_save > 29)
24221 {
24222 if (info->first_fp_reg_save == 64)
24223 strategy |= SAVE_INLINE_GPRS;
24224 else
24225 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24226 }
24227 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24228 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24229 }
24230 }
24231 else if (DEFAULT_ABI == ABI_DARWIN)
24232 {
24233 if (info->first_fp_reg_save > 60)
24234 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24235 if (info->first_gp_reg_save > 29)
24236 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24237 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24238 }
24239 else
24240 {
24241 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24242 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24243 || info->first_fp_reg_save > 61)
24244 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24245 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24246 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24247 }
24248
24249 /* Don't bother to try to save things out-of-line if r11 is occupied
24250 by the static chain. It would require too much fiddling and the
24251 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24252 pointer on Darwin, and AIX uses r1 or r12. */
24253 if (using_static_chain_p
24254 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24255 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24256 | SAVE_INLINE_GPRS
24257 | SAVE_INLINE_VRS);
24258
24259 /* Don't ever restore fixed regs. That means we can't use the
24260 out-of-line register restore functions if a fixed reg is in the
24261 range of regs restored. */
24262 if (!(strategy & REST_INLINE_FPRS))
24263 for (int i = info->first_fp_reg_save; i < 64; i++)
24264 if (fixed_regs[i])
24265 {
24266 strategy |= REST_INLINE_FPRS;
24267 break;
24268 }
24269
24270 /* We can only use the out-of-line routines to restore fprs if we've
24271 saved all the registers from first_fp_reg_save in the prologue.
24272 Otherwise, we risk loading garbage. Of course, if we have saved
24273 out-of-line then we know we haven't skipped any fprs. */
24274 if ((strategy & SAVE_INLINE_FPRS)
24275 && !(strategy & REST_INLINE_FPRS))
24276 for (int i = info->first_fp_reg_save; i < 64; i++)
24277 if (!save_reg_p (i))
24278 {
24279 strategy |= REST_INLINE_FPRS;
24280 break;
24281 }
24282
24283 /* Similarly, for altivec regs. */
24284 if (!(strategy & REST_INLINE_VRS))
24285 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24286 if (fixed_regs[i])
24287 {
24288 strategy |= REST_INLINE_VRS;
24289 break;
24290 }
24291
24292 if ((strategy & SAVE_INLINE_VRS)
24293 && !(strategy & REST_INLINE_VRS))
24294 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24295 if (!save_reg_p (i))
24296 {
24297 strategy |= REST_INLINE_VRS;
24298 break;
24299 }
24300
24301 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24302 saved is an out-of-line save or restore. Set up the value for
24303 the next test (excluding out-of-line gprs). */
24304 bool lr_save_p = (info->lr_save_p
24305 || !(strategy & SAVE_INLINE_FPRS)
24306 || !(strategy & SAVE_INLINE_VRS)
24307 || !(strategy & REST_INLINE_FPRS)
24308 || !(strategy & REST_INLINE_VRS));
24309
24310 if (TARGET_MULTIPLE
24311 && !TARGET_POWERPC64
24312 && info->first_gp_reg_save < 31
24313 && !(flag_shrink_wrap
24314 && flag_shrink_wrap_separate
24315 && optimize_function_for_speed_p (cfun)))
24316 {
24317 int count = 0;
24318 for (int i = info->first_gp_reg_save; i < 32; i++)
24319 if (save_reg_p (i))
24320 count++;
24321
24322 if (count <= 1)
24323 /* Don't use store multiple if only one reg needs to be
24324 saved. This can occur for example when the ABI_V4 pic reg
24325 (r30) needs to be saved to make calls, but r31 is not
24326 used. */
24327 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24328 else
24329 {
24330 /* Prefer store multiple for saves over out-of-line
24331 routines, since the store-multiple instruction will
24332 always be smaller. */
24333 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24334
24335 /* The situation is more complicated with load multiple.
24336 We'd prefer to use the out-of-line routines for restores,
24337 since the "exit" out-of-line routines can handle the
24338 restore of LR and the frame teardown. However if doesn't
24339 make sense to use the out-of-line routine if that is the
24340 only reason we'd need to save LR, and we can't use the
24341 "exit" out-of-line gpr restore if we have saved some
24342 fprs; In those cases it is advantageous to use load
24343 multiple when available. */
24344 if (info->first_fp_reg_save != 64 || !lr_save_p)
24345 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24346 }
24347 }
24348
24349 /* Using the "exit" out-of-line routine does not improve code size
24350 if using it would require lr to be saved and if only saving one
24351 or two gprs. */
24352 else if (!lr_save_p && info->first_gp_reg_save > 29)
24353 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24354
24355 /* Don't ever restore fixed regs. */
24356 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24357 for (int i = info->first_gp_reg_save; i < 32; i++)
24358 if (fixed_reg_p (i))
24359 {
24360 strategy |= REST_INLINE_GPRS;
24361 strategy &= ~REST_MULTIPLE;
24362 break;
24363 }
24364
24365 /* We can only use load multiple or the out-of-line routines to
24366 restore gprs if we've saved all the registers from
24367 first_gp_reg_save. Otherwise, we risk loading garbage.
24368 Of course, if we have saved out-of-line or used stmw then we know
24369 we haven't skipped any gprs. */
24370 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24371 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24372 for (int i = info->first_gp_reg_save; i < 32; i++)
24373 if (!save_reg_p (i))
24374 {
24375 strategy |= REST_INLINE_GPRS;
24376 strategy &= ~REST_MULTIPLE;
24377 break;
24378 }
24379
24380 if (TARGET_ELF && TARGET_64BIT)
24381 {
24382 if (!(strategy & SAVE_INLINE_FPRS))
24383 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24384 else if (!(strategy & SAVE_INLINE_GPRS)
24385 && info->first_fp_reg_save == 64)
24386 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24387 }
24388 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24389 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24390
24391 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24392 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24393
24394 return strategy;
24395 }
24396
24397 /* Calculate the stack information for the current function. This is
24398 complicated by having two separate calling sequences, the AIX calling
24399 sequence and the V.4 calling sequence.
24400
24401 AIX (and Darwin/Mac OS X) stack frames look like:
24402 32-bit 64-bit
24403 SP----> +---------------------------------------+
24404 | back chain to caller | 0 0
24405 +---------------------------------------+
24406 | saved CR | 4 8 (8-11)
24407 +---------------------------------------+
24408 | saved LR | 8 16
24409 +---------------------------------------+
24410 | reserved for compilers | 12 24
24411 +---------------------------------------+
24412 | reserved for binders | 16 32
24413 +---------------------------------------+
24414 | saved TOC pointer | 20 40
24415 +---------------------------------------+
24416 | Parameter save area (+padding*) (P) | 24 48
24417 +---------------------------------------+
24418 | Alloca space (A) | 24+P etc.
24419 +---------------------------------------+
24420 | Local variable space (L) | 24+P+A
24421 +---------------------------------------+
24422 | Float/int conversion temporary (X) | 24+P+A+L
24423 +---------------------------------------+
24424 | Save area for AltiVec registers (W) | 24+P+A+L+X
24425 +---------------------------------------+
24426 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24427 +---------------------------------------+
24428 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24429 +---------------------------------------+
24430 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24431 +---------------------------------------+
24432 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24433 +---------------------------------------+
24434 old SP->| back chain to caller's caller |
24435 +---------------------------------------+
24436
24437 * If the alloca area is present, the parameter save area is
24438 padded so that the former starts 16-byte aligned.
24439
24440 The required alignment for AIX configurations is two words (i.e., 8
24441 or 16 bytes).
24442
24443 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24444
24445 SP----> +---------------------------------------+
24446 | Back chain to caller | 0
24447 +---------------------------------------+
24448 | Save area for CR | 8
24449 +---------------------------------------+
24450 | Saved LR | 16
24451 +---------------------------------------+
24452 | Saved TOC pointer | 24
24453 +---------------------------------------+
24454 | Parameter save area (+padding*) (P) | 32
24455 +---------------------------------------+
24456 | Alloca space (A) | 32+P
24457 +---------------------------------------+
24458 | Local variable space (L) | 32+P+A
24459 +---------------------------------------+
24460 | Save area for AltiVec registers (W) | 32+P+A+L
24461 +---------------------------------------+
24462 | AltiVec alignment padding (Y) | 32+P+A+L+W
24463 +---------------------------------------+
24464 | Save area for GP registers (G) | 32+P+A+L+W+Y
24465 +---------------------------------------+
24466 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24467 +---------------------------------------+
24468 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24469 +---------------------------------------+
24470
24471 * If the alloca area is present, the parameter save area is
24472 padded so that the former starts 16-byte aligned.
24473
24474 V.4 stack frames look like:
24475
24476 SP----> +---------------------------------------+
24477 | back chain to caller | 0
24478 +---------------------------------------+
24479 | caller's saved LR | 4
24480 +---------------------------------------+
24481 | Parameter save area (+padding*) (P) | 8
24482 +---------------------------------------+
24483 | Alloca space (A) | 8+P
24484 +---------------------------------------+
24485 | Varargs save area (V) | 8+P+A
24486 +---------------------------------------+
24487 | Local variable space (L) | 8+P+A+V
24488 +---------------------------------------+
24489 | Float/int conversion temporary (X) | 8+P+A+V+L
24490 +---------------------------------------+
24491 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24492 +---------------------------------------+
24493 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24494 +---------------------------------------+
24495 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24496 +---------------------------------------+
24497 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24498 +---------------------------------------+
24499 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24500 +---------------------------------------+
24501 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24502 +---------------------------------------+
24503 old SP->| back chain to caller's caller |
24504 +---------------------------------------+
24505
24506 * If the alloca area is present and the required alignment is
24507 16 bytes, the parameter save area is padded so that the
24508 alloca area starts 16-byte aligned.
24509
24510 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24511 given. (But note below and in sysv4.h that we require only 8 and
24512 may round up the size of our stack frame anyways. The historical
24513 reason is early versions of powerpc-linux which didn't properly
24514 align the stack at program startup. A happy side-effect is that
24515 -mno-eabi libraries can be used with -meabi programs.)
24516
24517 The EABI configuration defaults to the V.4 layout. However,
24518 the stack alignment requirements may differ. If -mno-eabi is not
24519 given, the required stack alignment is 8 bytes; if -mno-eabi is
24520 given, the required alignment is 16 bytes. (But see V.4 comment
24521 above.) */
24522
24523 #ifndef ABI_STACK_BOUNDARY
24524 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24525 #endif
24526
24527 static rs6000_stack_t *
24528 rs6000_stack_info (void)
24529 {
24530 /* We should never be called for thunks, we are not set up for that. */
24531 gcc_assert (!cfun->is_thunk);
24532
24533 rs6000_stack_t *info = &stack_info;
24534 int reg_size = TARGET_32BIT ? 4 : 8;
24535 int ehrd_size;
24536 int ehcr_size;
24537 int save_align;
24538 int first_gp;
24539 HOST_WIDE_INT non_fixed_size;
24540 bool using_static_chain_p;
24541
24542 if (reload_completed && info->reload_completed)
24543 return info;
24544
24545 memset (info, 0, sizeof (*info));
24546 info->reload_completed = reload_completed;
24547
24548 /* Select which calling sequence. */
24549 info->abi = DEFAULT_ABI;
24550
24551 /* Calculate which registers need to be saved & save area size. */
24552 info->first_gp_reg_save = first_reg_to_save ();
24553 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24554 even if it currently looks like we won't. Reload may need it to
24555 get at a constant; if so, it will have already created a constant
24556 pool entry for it. */
24557 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24558 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24559 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24560 && crtl->uses_const_pool
24561 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24562 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24563 else
24564 first_gp = info->first_gp_reg_save;
24565
24566 info->gp_size = reg_size * (32 - first_gp);
24567
24568 info->first_fp_reg_save = first_fp_reg_to_save ();
24569 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24570
24571 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24572 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24573 - info->first_altivec_reg_save);
24574
24575 /* Does this function call anything? */
24576 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24577
24578 /* Determine if we need to save the condition code registers. */
24579 if (save_reg_p (CR2_REGNO)
24580 || save_reg_p (CR3_REGNO)
24581 || save_reg_p (CR4_REGNO))
24582 {
24583 info->cr_save_p = 1;
24584 if (DEFAULT_ABI == ABI_V4)
24585 info->cr_size = reg_size;
24586 }
24587
24588 /* If the current function calls __builtin_eh_return, then we need
24589 to allocate stack space for registers that will hold data for
24590 the exception handler. */
24591 if (crtl->calls_eh_return)
24592 {
24593 unsigned int i;
24594 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24595 continue;
24596
24597 ehrd_size = i * UNITS_PER_WORD;
24598 }
24599 else
24600 ehrd_size = 0;
24601
24602 /* In the ELFv2 ABI, we also need to allocate space for separate
24603 CR field save areas if the function calls __builtin_eh_return. */
24604 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24605 {
24606 /* This hard-codes that we have three call-saved CR fields. */
24607 ehcr_size = 3 * reg_size;
24608 /* We do *not* use the regular CR save mechanism. */
24609 info->cr_save_p = 0;
24610 }
24611 else
24612 ehcr_size = 0;
24613
24614 /* Determine various sizes. */
24615 info->reg_size = reg_size;
24616 info->fixed_size = RS6000_SAVE_AREA;
24617 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24618 if (cfun->calls_alloca)
24619 info->parm_size =
24620 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24621 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24622 else
24623 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24624 TARGET_ALTIVEC ? 16 : 8);
24625 if (FRAME_GROWS_DOWNWARD)
24626 info->vars_size
24627 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24628 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24629 - (info->fixed_size + info->vars_size + info->parm_size);
24630
24631 if (TARGET_ALTIVEC_ABI)
24632 info->vrsave_mask = compute_vrsave_mask ();
24633
24634 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24635 info->vrsave_size = 4;
24636
24637 compute_save_world_info (info);
24638
24639 /* Calculate the offsets. */
24640 switch (DEFAULT_ABI)
24641 {
24642 case ABI_NONE:
24643 default:
24644 gcc_unreachable ();
24645
24646 case ABI_AIX:
24647 case ABI_ELFv2:
24648 case ABI_DARWIN:
24649 info->fp_save_offset = -info->fp_size;
24650 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24651
24652 if (TARGET_ALTIVEC_ABI)
24653 {
24654 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24655
24656 /* Align stack so vector save area is on a quadword boundary.
24657 The padding goes above the vectors. */
24658 if (info->altivec_size != 0)
24659 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24660
24661 info->altivec_save_offset = info->vrsave_save_offset
24662 - info->altivec_padding_size
24663 - info->altivec_size;
24664 gcc_assert (info->altivec_size == 0
24665 || info->altivec_save_offset % 16 == 0);
24666
24667 /* Adjust for AltiVec case. */
24668 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24669 }
24670 else
24671 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24672
24673 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24674 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24675 info->lr_save_offset = 2*reg_size;
24676 break;
24677
24678 case ABI_V4:
24679 info->fp_save_offset = -info->fp_size;
24680 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24681 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24682
24683 if (TARGET_ALTIVEC_ABI)
24684 {
24685 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24686
24687 /* Align stack so vector save area is on a quadword boundary. */
24688 if (info->altivec_size != 0)
24689 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24690
24691 info->altivec_save_offset = info->vrsave_save_offset
24692 - info->altivec_padding_size
24693 - info->altivec_size;
24694
24695 /* Adjust for AltiVec case. */
24696 info->ehrd_offset = info->altivec_save_offset;
24697 }
24698 else
24699 info->ehrd_offset = info->cr_save_offset;
24700
24701 info->ehrd_offset -= ehrd_size;
24702 info->lr_save_offset = reg_size;
24703 }
24704
24705 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24706 info->save_size = RS6000_ALIGN (info->fp_size
24707 + info->gp_size
24708 + info->altivec_size
24709 + info->altivec_padding_size
24710 + ehrd_size
24711 + ehcr_size
24712 + info->cr_size
24713 + info->vrsave_size,
24714 save_align);
24715
24716 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24717
24718 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24719 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24720
24721 /* Determine if we need to save the link register. */
24722 if (info->calls_p
24723 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24724 && crtl->profile
24725 && !TARGET_PROFILE_KERNEL)
24726 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24727 #ifdef TARGET_RELOCATABLE
24728 || (DEFAULT_ABI == ABI_V4
24729 && (TARGET_RELOCATABLE || flag_pic > 1)
24730 && !constant_pool_empty_p ())
24731 #endif
24732 || rs6000_ra_ever_killed ())
24733 info->lr_save_p = 1;
24734
24735 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24736 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24737 && call_used_regs[STATIC_CHAIN_REGNUM]);
24738 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24739
24740 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24741 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24742 || !(info->savres_strategy & SAVE_INLINE_VRS)
24743 || !(info->savres_strategy & REST_INLINE_GPRS)
24744 || !(info->savres_strategy & REST_INLINE_FPRS)
24745 || !(info->savres_strategy & REST_INLINE_VRS))
24746 info->lr_save_p = 1;
24747
24748 if (info->lr_save_p)
24749 df_set_regs_ever_live (LR_REGNO, true);
24750
24751 /* Determine if we need to allocate any stack frame:
24752
24753 For AIX we need to push the stack if a frame pointer is needed
24754 (because the stack might be dynamically adjusted), if we are
24755 debugging, if we make calls, or if the sum of fp_save, gp_save,
24756 and local variables are more than the space needed to save all
24757 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24758 + 18*8 = 288 (GPR13 reserved).
24759
24760 For V.4 we don't have the stack cushion that AIX uses, but assume
24761 that the debugger can handle stackless frames. */
24762
24763 if (info->calls_p)
24764 info->push_p = 1;
24765
24766 else if (DEFAULT_ABI == ABI_V4)
24767 info->push_p = non_fixed_size != 0;
24768
24769 else if (frame_pointer_needed)
24770 info->push_p = 1;
24771
24772 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24773 info->push_p = 1;
24774
24775 else
24776 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24777
24778 return info;
24779 }
24780
24781 static void
24782 debug_stack_info (rs6000_stack_t *info)
24783 {
24784 const char *abi_string;
24785
24786 if (! info)
24787 info = rs6000_stack_info ();
24788
24789 fprintf (stderr, "\nStack information for function %s:\n",
24790 ((current_function_decl && DECL_NAME (current_function_decl))
24791 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24792 : "<unknown>"));
24793
24794 switch (info->abi)
24795 {
24796 default: abi_string = "Unknown"; break;
24797 case ABI_NONE: abi_string = "NONE"; break;
24798 case ABI_AIX: abi_string = "AIX"; break;
24799 case ABI_ELFv2: abi_string = "ELFv2"; break;
24800 case ABI_DARWIN: abi_string = "Darwin"; break;
24801 case ABI_V4: abi_string = "V.4"; break;
24802 }
24803
24804 fprintf (stderr, "\tABI = %5s\n", abi_string);
24805
24806 if (TARGET_ALTIVEC_ABI)
24807 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24808
24809 if (info->first_gp_reg_save != 32)
24810 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24811
24812 if (info->first_fp_reg_save != 64)
24813 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24814
24815 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24816 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24817 info->first_altivec_reg_save);
24818
24819 if (info->lr_save_p)
24820 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24821
24822 if (info->cr_save_p)
24823 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24824
24825 if (info->vrsave_mask)
24826 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24827
24828 if (info->push_p)
24829 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24830
24831 if (info->calls_p)
24832 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24833
24834 if (info->gp_size)
24835 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24836
24837 if (info->fp_size)
24838 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24839
24840 if (info->altivec_size)
24841 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24842 info->altivec_save_offset);
24843
24844 if (info->vrsave_size)
24845 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24846 info->vrsave_save_offset);
24847
24848 if (info->lr_save_p)
24849 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24850
24851 if (info->cr_save_p)
24852 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24853
24854 if (info->varargs_save_offset)
24855 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24856
24857 if (info->total_size)
24858 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24859 info->total_size);
24860
24861 if (info->vars_size)
24862 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24863 info->vars_size);
24864
24865 if (info->parm_size)
24866 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24867
24868 if (info->fixed_size)
24869 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24870
24871 if (info->gp_size)
24872 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24873
24874 if (info->fp_size)
24875 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24876
24877 if (info->altivec_size)
24878 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24879
24880 if (info->vrsave_size)
24881 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24882
24883 if (info->altivec_padding_size)
24884 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24885 info->altivec_padding_size);
24886
24887 if (info->cr_size)
24888 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24889
24890 if (info->save_size)
24891 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24892
24893 if (info->reg_size != 4)
24894 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24895
24896 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24897
24898 fprintf (stderr, "\n");
24899 }
24900
24901 rtx
24902 rs6000_return_addr (int count, rtx frame)
24903 {
24904 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24905 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24906 if (count != 0
24907 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24908 {
24909 cfun->machine->ra_needs_full_frame = 1;
24910
24911 if (count == 0)
24912 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24913 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24914 frame = stack_pointer_rtx;
24915 rtx prev_frame_addr = memory_address (Pmode, frame);
24916 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24917 rtx lr_save_off = plus_constant (Pmode,
24918 prev_frame, RETURN_ADDRESS_OFFSET);
24919 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24920 return gen_rtx_MEM (Pmode, lr_save_addr);
24921 }
24922
24923 cfun->machine->ra_need_lr = 1;
24924 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24925 }
24926
24927 /* Say whether a function is a candidate for sibcall handling or not. */
24928
24929 static bool
24930 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24931 {
24932 tree fntype;
24933
24934 /* The sibcall epilogue may clobber the static chain register.
24935 ??? We could work harder and avoid that, but it's probably
24936 not worth the hassle in practice. */
24937 if (CALL_EXPR_STATIC_CHAIN (exp))
24938 return false;
24939
24940 if (decl)
24941 fntype = TREE_TYPE (decl);
24942 else
24943 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24944
24945 /* We can't do it if the called function has more vector parameters
24946 than the current function; there's nowhere to put the VRsave code. */
24947 if (TARGET_ALTIVEC_ABI
24948 && TARGET_ALTIVEC_VRSAVE
24949 && !(decl && decl == current_function_decl))
24950 {
24951 function_args_iterator args_iter;
24952 tree type;
24953 int nvreg = 0;
24954
24955 /* Functions with vector parameters are required to have a
24956 prototype, so the argument type info must be available
24957 here. */
24958 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24959 if (TREE_CODE (type) == VECTOR_TYPE
24960 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24961 nvreg++;
24962
24963 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24964 if (TREE_CODE (type) == VECTOR_TYPE
24965 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24966 nvreg--;
24967
24968 if (nvreg > 0)
24969 return false;
24970 }
24971
24972 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24973 functions, because the callee may have a different TOC pointer to
24974 the caller and there's no way to ensure we restore the TOC when
24975 we return. With the secure-plt SYSV ABI we can't make non-local
24976 calls when -fpic/PIC because the plt call stubs use r30. */
24977 if (DEFAULT_ABI == ABI_DARWIN
24978 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24979 && decl
24980 && !DECL_EXTERNAL (decl)
24981 && !DECL_WEAK (decl)
24982 && (*targetm.binds_local_p) (decl))
24983 || (DEFAULT_ABI == ABI_V4
24984 && (!TARGET_SECURE_PLT
24985 || !flag_pic
24986 || (decl
24987 && (*targetm.binds_local_p) (decl)))))
24988 {
24989 tree attr_list = TYPE_ATTRIBUTES (fntype);
24990
24991 if (!lookup_attribute ("longcall", attr_list)
24992 || lookup_attribute ("shortcall", attr_list))
24993 return true;
24994 }
24995
24996 return false;
24997 }
24998
24999 static int
25000 rs6000_ra_ever_killed (void)
25001 {
25002 rtx_insn *top;
25003 rtx reg;
25004 rtx_insn *insn;
25005
25006 if (cfun->is_thunk)
25007 return 0;
25008
25009 if (cfun->machine->lr_save_state)
25010 return cfun->machine->lr_save_state - 1;
25011
25012 /* regs_ever_live has LR marked as used if any sibcalls are present,
25013 but this should not force saving and restoring in the
25014 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25015 clobbers LR, so that is inappropriate. */
25016
25017 /* Also, the prologue can generate a store into LR that
25018 doesn't really count, like this:
25019
25020 move LR->R0
25021 bcl to set PIC register
25022 move LR->R31
25023 move R0->LR
25024
25025 When we're called from the epilogue, we need to avoid counting
25026 this as a store. */
25027
25028 push_topmost_sequence ();
25029 top = get_insns ();
25030 pop_topmost_sequence ();
25031 reg = gen_rtx_REG (Pmode, LR_REGNO);
25032
25033 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25034 {
25035 if (INSN_P (insn))
25036 {
25037 if (CALL_P (insn))
25038 {
25039 if (!SIBLING_CALL_P (insn))
25040 return 1;
25041 }
25042 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25043 return 1;
25044 else if (set_of (reg, insn) != NULL_RTX
25045 && !prologue_epilogue_contains (insn))
25046 return 1;
25047 }
25048 }
25049 return 0;
25050 }
25051 \f
25052 /* Emit instructions needed to load the TOC register.
25053 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25054 a constant pool; or for SVR4 -fpic. */
25055
25056 void
25057 rs6000_emit_load_toc_table (int fromprolog)
25058 {
25059 rtx dest;
25060 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25061
25062 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25063 {
25064 char buf[30];
25065 rtx lab, tmp1, tmp2, got;
25066
25067 lab = gen_label_rtx ();
25068 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25069 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25070 if (flag_pic == 2)
25071 {
25072 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25073 need_toc_init = 1;
25074 }
25075 else
25076 got = rs6000_got_sym ();
25077 tmp1 = tmp2 = dest;
25078 if (!fromprolog)
25079 {
25080 tmp1 = gen_reg_rtx (Pmode);
25081 tmp2 = gen_reg_rtx (Pmode);
25082 }
25083 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25084 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25085 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25086 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25087 }
25088 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25089 {
25090 emit_insn (gen_load_toc_v4_pic_si ());
25091 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25092 }
25093 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25094 {
25095 char buf[30];
25096 rtx temp0 = (fromprolog
25097 ? gen_rtx_REG (Pmode, 0)
25098 : gen_reg_rtx (Pmode));
25099
25100 if (fromprolog)
25101 {
25102 rtx symF, symL;
25103
25104 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25105 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25106
25107 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25108 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25109
25110 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25111 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25112 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25113 }
25114 else
25115 {
25116 rtx tocsym, lab;
25117
25118 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25119 need_toc_init = 1;
25120 lab = gen_label_rtx ();
25121 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25122 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25123 if (TARGET_LINK_STACK)
25124 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25125 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25126 }
25127 emit_insn (gen_addsi3 (dest, temp0, dest));
25128 }
25129 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25130 {
25131 /* This is for AIX code running in non-PIC ELF32. */
25132 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25133
25134 need_toc_init = 1;
25135 emit_insn (gen_elf_high (dest, realsym));
25136 emit_insn (gen_elf_low (dest, dest, realsym));
25137 }
25138 else
25139 {
25140 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25141
25142 if (TARGET_32BIT)
25143 emit_insn (gen_load_toc_aix_si (dest));
25144 else
25145 emit_insn (gen_load_toc_aix_di (dest));
25146 }
25147 }
25148
25149 /* Emit instructions to restore the link register after determining where
25150 its value has been stored. */
25151
25152 void
25153 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25154 {
25155 rs6000_stack_t *info = rs6000_stack_info ();
25156 rtx operands[2];
25157
25158 operands[0] = source;
25159 operands[1] = scratch;
25160
25161 if (info->lr_save_p)
25162 {
25163 rtx frame_rtx = stack_pointer_rtx;
25164 HOST_WIDE_INT sp_offset = 0;
25165 rtx tmp;
25166
25167 if (frame_pointer_needed
25168 || cfun->calls_alloca
25169 || info->total_size > 32767)
25170 {
25171 tmp = gen_frame_mem (Pmode, frame_rtx);
25172 emit_move_insn (operands[1], tmp);
25173 frame_rtx = operands[1];
25174 }
25175 else if (info->push_p)
25176 sp_offset = info->total_size;
25177
25178 tmp = plus_constant (Pmode, frame_rtx,
25179 info->lr_save_offset + sp_offset);
25180 tmp = gen_frame_mem (Pmode, tmp);
25181 emit_move_insn (tmp, operands[0]);
25182 }
25183 else
25184 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25185
25186 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25187 state of lr_save_p so any change from here on would be a bug. In
25188 particular, stop rs6000_ra_ever_killed from considering the SET
25189 of lr we may have added just above. */
25190 cfun->machine->lr_save_state = info->lr_save_p + 1;
25191 }
25192
25193 static GTY(()) alias_set_type set = -1;
25194
25195 alias_set_type
25196 get_TOC_alias_set (void)
25197 {
25198 if (set == -1)
25199 set = new_alias_set ();
25200 return set;
25201 }
25202
25203 /* This returns nonzero if the current function uses the TOC. This is
25204 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25205 is generated by the ABI_V4 load_toc_* patterns.
25206 Return 2 instead of 1 if the load_toc_* pattern is in the function
25207 partition that doesn't start the function. */
25208 #if TARGET_ELF
25209 static int
25210 uses_TOC (void)
25211 {
25212 rtx_insn *insn;
25213 int ret = 1;
25214
25215 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25216 {
25217 if (INSN_P (insn))
25218 {
25219 rtx pat = PATTERN (insn);
25220 int i;
25221
25222 if (GET_CODE (pat) == PARALLEL)
25223 for (i = 0; i < XVECLEN (pat, 0); i++)
25224 {
25225 rtx sub = XVECEXP (pat, 0, i);
25226 if (GET_CODE (sub) == USE)
25227 {
25228 sub = XEXP (sub, 0);
25229 if (GET_CODE (sub) == UNSPEC
25230 && XINT (sub, 1) == UNSPEC_TOC)
25231 return ret;
25232 }
25233 }
25234 }
25235 else if (crtl->has_bb_partition
25236 && NOTE_P (insn)
25237 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25238 ret = 2;
25239 }
25240 return 0;
25241 }
25242 #endif
25243
25244 rtx
25245 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25246 {
25247 rtx tocrel, tocreg, hi;
25248
25249 if (TARGET_DEBUG_ADDR)
25250 {
25251 if (GET_CODE (symbol) == SYMBOL_REF)
25252 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25253 XSTR (symbol, 0));
25254 else
25255 {
25256 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25257 GET_RTX_NAME (GET_CODE (symbol)));
25258 debug_rtx (symbol);
25259 }
25260 }
25261
25262 if (!can_create_pseudo_p ())
25263 df_set_regs_ever_live (TOC_REGISTER, true);
25264
25265 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25266 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25267 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25268 return tocrel;
25269
25270 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25271 if (largetoc_reg != NULL)
25272 {
25273 emit_move_insn (largetoc_reg, hi);
25274 hi = largetoc_reg;
25275 }
25276 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25277 }
25278
25279 /* Issue assembly directives that create a reference to the given DWARF
25280 FRAME_TABLE_LABEL from the current function section. */
25281 void
25282 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25283 {
25284 fprintf (asm_out_file, "\t.ref %s\n",
25285 (* targetm.strip_name_encoding) (frame_table_label));
25286 }
25287 \f
25288 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25289 and the change to the stack pointer. */
25290
25291 static void
25292 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25293 {
25294 rtvec p;
25295 int i;
25296 rtx regs[3];
25297
25298 i = 0;
25299 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25300 if (hard_frame_needed)
25301 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25302 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25303 || (hard_frame_needed
25304 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25305 regs[i++] = fp;
25306
25307 p = rtvec_alloc (i);
25308 while (--i >= 0)
25309 {
25310 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25311 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25312 }
25313
25314 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25315 }
25316
25317 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25318 and set the appropriate attributes for the generated insn. Return the
25319 first insn which adjusts the stack pointer or the last insn before
25320 the stack adjustment loop.
25321
25322 SIZE_INT is used to create the CFI note for the allocation.
25323
25324 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25325 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25326
25327 ORIG_SP contains the backchain value that must be stored at *sp. */
25328
25329 static rtx_insn *
25330 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25331 {
25332 rtx_insn *insn;
25333
25334 rtx size_rtx = GEN_INT (-size_int);
25335 if (size_int > 32767)
25336 {
25337 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25338 /* Need a note here so that try_split doesn't get confused. */
25339 if (get_last_insn () == NULL_RTX)
25340 emit_note (NOTE_INSN_DELETED);
25341 insn = emit_move_insn (tmp_reg, size_rtx);
25342 try_split (PATTERN (insn), insn, 0);
25343 size_rtx = tmp_reg;
25344 }
25345
25346 if (Pmode == SImode)
25347 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25348 stack_pointer_rtx,
25349 size_rtx,
25350 orig_sp));
25351 else
25352 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25353 stack_pointer_rtx,
25354 size_rtx,
25355 orig_sp));
25356 rtx par = PATTERN (insn);
25357 gcc_assert (GET_CODE (par) == PARALLEL);
25358 rtx set = XVECEXP (par, 0, 0);
25359 gcc_assert (GET_CODE (set) == SET);
25360 rtx mem = SET_DEST (set);
25361 gcc_assert (MEM_P (mem));
25362 MEM_NOTRAP_P (mem) = 1;
25363 set_mem_alias_set (mem, get_frame_alias_set ());
25364
25365 RTX_FRAME_RELATED_P (insn) = 1;
25366 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25367 gen_rtx_SET (stack_pointer_rtx,
25368 gen_rtx_PLUS (Pmode,
25369 stack_pointer_rtx,
25370 GEN_INT (-size_int))));
25371
25372 /* Emit a blockage to ensure the allocation/probing insns are
25373 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25374 note for similar reasons. */
25375 if (flag_stack_clash_protection)
25376 {
25377 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25378 emit_insn (gen_blockage ());
25379 }
25380
25381 return insn;
25382 }
25383
25384 static HOST_WIDE_INT
25385 get_stack_clash_protection_probe_interval (void)
25386 {
25387 return (HOST_WIDE_INT_1U
25388 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25389 }
25390
25391 static HOST_WIDE_INT
25392 get_stack_clash_protection_guard_size (void)
25393 {
25394 return (HOST_WIDE_INT_1U
25395 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25396 }
25397
25398 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25399 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25400
25401 COPY_REG, if non-null, should contain a copy of the original
25402 stack pointer at exit from this function.
25403
25404 This is subtly different than the Ada probing in that it tries hard to
25405 prevent attacks that jump the stack guard. Thus it is never allowed to
25406 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25407 space without a suitable probe. */
25408 static rtx_insn *
25409 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25410 rtx copy_reg)
25411 {
25412 rtx orig_sp = copy_reg;
25413
25414 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25415
25416 /* Round the size down to a multiple of PROBE_INTERVAL. */
25417 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25418
25419 /* If explicitly requested,
25420 or the rounded size is not the same as the original size
25421 or the the rounded size is greater than a page,
25422 then we will need a copy of the original stack pointer. */
25423 if (rounded_size != orig_size
25424 || rounded_size > probe_interval
25425 || copy_reg)
25426 {
25427 /* If the caller did not request a copy of the incoming stack
25428 pointer, then we use r0 to hold the copy. */
25429 if (!copy_reg)
25430 orig_sp = gen_rtx_REG (Pmode, 0);
25431 emit_move_insn (orig_sp, stack_pointer_rtx);
25432 }
25433
25434 /* There's three cases here.
25435
25436 One is a single probe which is the most common and most efficiently
25437 implemented as it does not have to have a copy of the original
25438 stack pointer if there are no residuals.
25439
25440 Second is unrolled allocation/probes which we use if there's just
25441 a few of them. It needs to save the original stack pointer into a
25442 temporary for use as a source register in the allocation/probe.
25443
25444 Last is a loop. This is the most uncommon case and least efficient. */
25445 rtx_insn *retval = NULL;
25446 if (rounded_size == probe_interval)
25447 {
25448 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25449
25450 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25451 }
25452 else if (rounded_size <= 8 * probe_interval)
25453 {
25454 /* The ABI requires using the store with update insns to allocate
25455 space and store the backchain into the stack
25456
25457 So we save the current stack pointer into a temporary, then
25458 emit the store-with-update insns to store the saved stack pointer
25459 into the right location in each new page. */
25460 for (int i = 0; i < rounded_size; i += probe_interval)
25461 {
25462 rtx_insn *insn
25463 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25464
25465 /* Save the first stack adjustment in RETVAL. */
25466 if (i == 0)
25467 retval = insn;
25468 }
25469
25470 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25471 }
25472 else
25473 {
25474 /* Compute the ending address. */
25475 rtx end_addr
25476 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25477 rtx rs = GEN_INT (-rounded_size);
25478 rtx_insn *insn;
25479 if (add_operand (rs, Pmode))
25480 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25481 else
25482 {
25483 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25484 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25485 stack_pointer_rtx));
25486 /* Describe the effect of INSN to the CFI engine. */
25487 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25488 gen_rtx_SET (end_addr,
25489 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25490 rs)));
25491 }
25492 RTX_FRAME_RELATED_P (insn) = 1;
25493
25494 /* Emit the loop. */
25495 if (TARGET_64BIT)
25496 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25497 stack_pointer_rtx, orig_sp,
25498 end_addr));
25499 else
25500 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25501 stack_pointer_rtx, orig_sp,
25502 end_addr));
25503 RTX_FRAME_RELATED_P (retval) = 1;
25504 /* Describe the effect of INSN to the CFI engine. */
25505 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25506 gen_rtx_SET (stack_pointer_rtx, end_addr));
25507
25508 /* Emit a blockage to ensure the allocation/probing insns are
25509 not optimized, combined, removed, etc. Other cases handle this
25510 within their call to rs6000_emit_allocate_stack_1. */
25511 emit_insn (gen_blockage ());
25512
25513 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25514 }
25515
25516 if (orig_size != rounded_size)
25517 {
25518 /* Allocate (and implicitly probe) any residual space. */
25519 HOST_WIDE_INT residual = orig_size - rounded_size;
25520
25521 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25522
25523 /* If the residual was the only allocation, then we can return the
25524 allocating insn. */
25525 if (!retval)
25526 retval = insn;
25527 }
25528
25529 return retval;
25530 }
25531
25532 /* Emit the correct code for allocating stack space, as insns.
25533 If COPY_REG, make sure a copy of the old frame is left there.
25534 The generated code may use hard register 0 as a temporary. */
25535
25536 static rtx_insn *
25537 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25538 {
25539 rtx_insn *insn;
25540 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25541 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25542 rtx todec = gen_int_mode (-size, Pmode);
25543
25544 if (INTVAL (todec) != -size)
25545 {
25546 warning (0, "stack frame too large");
25547 emit_insn (gen_trap ());
25548 return 0;
25549 }
25550
25551 if (crtl->limit_stack)
25552 {
25553 if (REG_P (stack_limit_rtx)
25554 && REGNO (stack_limit_rtx) > 1
25555 && REGNO (stack_limit_rtx) <= 31)
25556 {
25557 rtx_insn *insn
25558 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25559 gcc_assert (insn);
25560 emit_insn (insn);
25561 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25562 }
25563 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25564 && TARGET_32BIT
25565 && DEFAULT_ABI == ABI_V4
25566 && !flag_pic)
25567 {
25568 rtx toload = gen_rtx_CONST (VOIDmode,
25569 gen_rtx_PLUS (Pmode,
25570 stack_limit_rtx,
25571 GEN_INT (size)));
25572
25573 emit_insn (gen_elf_high (tmp_reg, toload));
25574 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25575 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25576 const0_rtx));
25577 }
25578 else
25579 warning (0, "stack limit expression is not supported");
25580 }
25581
25582 if (flag_stack_clash_protection)
25583 {
25584 if (size < get_stack_clash_protection_guard_size ())
25585 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25586 else
25587 {
25588 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25589 copy_reg);
25590
25591 /* If we asked for a copy with an offset, then we still need add in
25592 the offset. */
25593 if (copy_reg && copy_off)
25594 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25595 return insn;
25596 }
25597 }
25598
25599 if (copy_reg)
25600 {
25601 if (copy_off != 0)
25602 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25603 else
25604 emit_move_insn (copy_reg, stack_reg);
25605 }
25606
25607 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25608 it now and set the alias set/attributes. The above gen_*_update
25609 calls will generate a PARALLEL with the MEM set being the first
25610 operation. */
25611 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25612 return insn;
25613 }
25614
25615 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25616
25617 #if PROBE_INTERVAL > 32768
25618 #error Cannot use indexed addressing mode for stack probing
25619 #endif
25620
25621 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25622 inclusive. These are offsets from the current stack pointer. */
25623
25624 static void
25625 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25626 {
25627 /* See if we have a constant small number of probes to generate. If so,
25628 that's the easy case. */
25629 if (first + size <= 32768)
25630 {
25631 HOST_WIDE_INT i;
25632
25633 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25634 it exceeds SIZE. If only one probe is needed, this will not
25635 generate any code. Then probe at FIRST + SIZE. */
25636 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25637 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25638 -(first + i)));
25639
25640 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25641 -(first + size)));
25642 }
25643
25644 /* Otherwise, do the same as above, but in a loop. Note that we must be
25645 extra careful with variables wrapping around because we might be at
25646 the very top (or the very bottom) of the address space and we have
25647 to be able to handle this case properly; in particular, we use an
25648 equality test for the loop condition. */
25649 else
25650 {
25651 HOST_WIDE_INT rounded_size;
25652 rtx r12 = gen_rtx_REG (Pmode, 12);
25653 rtx r0 = gen_rtx_REG (Pmode, 0);
25654
25655 /* Sanity check for the addressing mode we're going to use. */
25656 gcc_assert (first <= 32768);
25657
25658 /* Step 1: round SIZE to the previous multiple of the interval. */
25659
25660 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25661
25662
25663 /* Step 2: compute initial and final value of the loop counter. */
25664
25665 /* TEST_ADDR = SP + FIRST. */
25666 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25667 -first)));
25668
25669 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25670 if (rounded_size > 32768)
25671 {
25672 emit_move_insn (r0, GEN_INT (-rounded_size));
25673 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25674 }
25675 else
25676 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25677 -rounded_size)));
25678
25679
25680 /* Step 3: the loop
25681
25682 do
25683 {
25684 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25685 probe at TEST_ADDR
25686 }
25687 while (TEST_ADDR != LAST_ADDR)
25688
25689 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25690 until it is equal to ROUNDED_SIZE. */
25691
25692 if (TARGET_64BIT)
25693 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25694 else
25695 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25696
25697
25698 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25699 that SIZE is equal to ROUNDED_SIZE. */
25700
25701 if (size != rounded_size)
25702 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25703 }
25704 }
25705
25706 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25707 addresses, not offsets. */
25708
25709 static const char *
25710 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25711 {
25712 static int labelno = 0;
25713 char loop_lab[32];
25714 rtx xops[2];
25715
25716 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25717
25718 /* Loop. */
25719 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25720
25721 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25722 xops[0] = reg1;
25723 xops[1] = GEN_INT (-PROBE_INTERVAL);
25724 output_asm_insn ("addi %0,%0,%1", xops);
25725
25726 /* Probe at TEST_ADDR. */
25727 xops[1] = gen_rtx_REG (Pmode, 0);
25728 output_asm_insn ("stw %1,0(%0)", xops);
25729
25730 /* Test if TEST_ADDR == LAST_ADDR. */
25731 xops[1] = reg2;
25732 if (TARGET_64BIT)
25733 output_asm_insn ("cmpd 0,%0,%1", xops);
25734 else
25735 output_asm_insn ("cmpw 0,%0,%1", xops);
25736
25737 /* Branch. */
25738 fputs ("\tbne 0,", asm_out_file);
25739 assemble_name_raw (asm_out_file, loop_lab);
25740 fputc ('\n', asm_out_file);
25741
25742 return "";
25743 }
25744
25745 /* This function is called when rs6000_frame_related is processing
25746 SETs within a PARALLEL, and returns whether the REGNO save ought to
25747 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25748 for out-of-line register save functions, store multiple, and the
25749 Darwin world_save. They may contain registers that don't really
25750 need saving. */
25751
25752 static bool
25753 interesting_frame_related_regno (unsigned int regno)
25754 {
25755 /* Saves apparently of r0 are actually saving LR. It doesn't make
25756 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25757 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25758 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25759 as frame related. */
25760 if (regno == 0)
25761 return true;
25762 /* If we see CR2 then we are here on a Darwin world save. Saves of
25763 CR2 signify the whole CR is being saved. This is a long-standing
25764 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25765 that CR needs to be saved. */
25766 if (regno == CR2_REGNO)
25767 return true;
25768 /* Omit frame info for any user-defined global regs. If frame info
25769 is supplied for them, frame unwinding will restore a user reg.
25770 Also omit frame info for any reg we don't need to save, as that
25771 bloats frame info and can cause problems with shrink wrapping.
25772 Since global regs won't be seen as needing to be saved, both of
25773 these conditions are covered by save_reg_p. */
25774 return save_reg_p (regno);
25775 }
25776
25777 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25778 addresses, not offsets.
25779
25780 REG2 contains the backchain that must be stored into *sp at each allocation.
25781
25782 This is subtly different than the Ada probing above in that it tries hard
25783 to prevent attacks that jump the stack guard. Thus, it is never allowed
25784 to allocate more than PROBE_INTERVAL bytes of stack space without a
25785 suitable probe. */
25786
25787 static const char *
25788 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25789 {
25790 static int labelno = 0;
25791 char loop_lab[32];
25792 rtx xops[3];
25793
25794 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25795
25796 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25797
25798 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25799
25800 /* This allocates and probes. */
25801 xops[0] = reg1;
25802 xops[1] = reg2;
25803 xops[2] = GEN_INT (-probe_interval);
25804 if (TARGET_64BIT)
25805 output_asm_insn ("stdu %1,%2(%0)", xops);
25806 else
25807 output_asm_insn ("stwu %1,%2(%0)", xops);
25808
25809 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25810 xops[0] = reg1;
25811 xops[1] = reg3;
25812 if (TARGET_64BIT)
25813 output_asm_insn ("cmpd 0,%0,%1", xops);
25814 else
25815 output_asm_insn ("cmpw 0,%0,%1", xops);
25816
25817 fputs ("\tbne 0,", asm_out_file);
25818 assemble_name_raw (asm_out_file, loop_lab);
25819 fputc ('\n', asm_out_file);
25820
25821 return "";
25822 }
25823
25824 /* Wrapper around the output_probe_stack_range routines. */
25825 const char *
25826 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25827 {
25828 if (flag_stack_clash_protection)
25829 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25830 else
25831 return output_probe_stack_range_1 (reg1, reg3);
25832 }
25833
25834 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25835 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25836 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25837 deduce these equivalences by itself so it wasn't necessary to hold
25838 its hand so much. Don't be tempted to always supply d2_f_d_e with
25839 the actual cfa register, ie. r31 when we are using a hard frame
25840 pointer. That fails when saving regs off r1, and sched moves the
25841 r31 setup past the reg saves. */
25842
25843 static rtx_insn *
25844 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25845 rtx reg2, rtx repl2)
25846 {
25847 rtx repl;
25848
25849 if (REGNO (reg) == STACK_POINTER_REGNUM)
25850 {
25851 gcc_checking_assert (val == 0);
25852 repl = NULL_RTX;
25853 }
25854 else
25855 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25856 GEN_INT (val));
25857
25858 rtx pat = PATTERN (insn);
25859 if (!repl && !reg2)
25860 {
25861 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25862 if (GET_CODE (pat) == PARALLEL)
25863 for (int i = 0; i < XVECLEN (pat, 0); i++)
25864 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25865 {
25866 rtx set = XVECEXP (pat, 0, i);
25867
25868 if (!REG_P (SET_SRC (set))
25869 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25870 RTX_FRAME_RELATED_P (set) = 1;
25871 }
25872 RTX_FRAME_RELATED_P (insn) = 1;
25873 return insn;
25874 }
25875
25876 /* We expect that 'pat' is either a SET or a PARALLEL containing
25877 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25878 are important so they all have to be marked RTX_FRAME_RELATED_P.
25879 Call simplify_replace_rtx on the SETs rather than the whole insn
25880 so as to leave the other stuff alone (for example USE of r12). */
25881
25882 set_used_flags (pat);
25883 if (GET_CODE (pat) == SET)
25884 {
25885 if (repl)
25886 pat = simplify_replace_rtx (pat, reg, repl);
25887 if (reg2)
25888 pat = simplify_replace_rtx (pat, reg2, repl2);
25889 }
25890 else if (GET_CODE (pat) == PARALLEL)
25891 {
25892 pat = shallow_copy_rtx (pat);
25893 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25894
25895 for (int i = 0; i < XVECLEN (pat, 0); i++)
25896 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25897 {
25898 rtx set = XVECEXP (pat, 0, i);
25899
25900 if (repl)
25901 set = simplify_replace_rtx (set, reg, repl);
25902 if (reg2)
25903 set = simplify_replace_rtx (set, reg2, repl2);
25904 XVECEXP (pat, 0, i) = set;
25905
25906 if (!REG_P (SET_SRC (set))
25907 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25908 RTX_FRAME_RELATED_P (set) = 1;
25909 }
25910 }
25911 else
25912 gcc_unreachable ();
25913
25914 RTX_FRAME_RELATED_P (insn) = 1;
25915 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25916
25917 return insn;
25918 }
25919
25920 /* Returns an insn that has a vrsave set operation with the
25921 appropriate CLOBBERs. */
25922
25923 static rtx
25924 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25925 {
25926 int nclobs, i;
25927 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25928 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25929
25930 clobs[0]
25931 = gen_rtx_SET (vrsave,
25932 gen_rtx_UNSPEC_VOLATILE (SImode,
25933 gen_rtvec (2, reg, vrsave),
25934 UNSPECV_SET_VRSAVE));
25935
25936 nclobs = 1;
25937
25938 /* We need to clobber the registers in the mask so the scheduler
25939 does not move sets to VRSAVE before sets of AltiVec registers.
25940
25941 However, if the function receives nonlocal gotos, reload will set
25942 all call saved registers live. We will end up with:
25943
25944 (set (reg 999) (mem))
25945 (parallel [ (set (reg vrsave) (unspec blah))
25946 (clobber (reg 999))])
25947
25948 The clobber will cause the store into reg 999 to be dead, and
25949 flow will attempt to delete an epilogue insn. In this case, we
25950 need an unspec use/set of the register. */
25951
25952 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25953 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25954 {
25955 if (!epiloguep || call_used_regs [i])
25956 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
25957 else
25958 {
25959 rtx reg = gen_rtx_REG (V4SImode, i);
25960
25961 clobs[nclobs++]
25962 = gen_rtx_SET (reg,
25963 gen_rtx_UNSPEC (V4SImode,
25964 gen_rtvec (1, reg), 27));
25965 }
25966 }
25967
25968 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25969
25970 for (i = 0; i < nclobs; ++i)
25971 XVECEXP (insn, 0, i) = clobs[i];
25972
25973 return insn;
25974 }
25975
25976 static rtx
25977 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25978 {
25979 rtx addr, mem;
25980
25981 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25982 mem = gen_frame_mem (GET_MODE (reg), addr);
25983 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25984 }
25985
25986 static rtx
25987 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25988 {
25989 return gen_frame_set (reg, frame_reg, offset, false);
25990 }
25991
25992 static rtx
25993 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25994 {
25995 return gen_frame_set (reg, frame_reg, offset, true);
25996 }
25997
25998 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25999 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26000
26001 static rtx_insn *
26002 emit_frame_save (rtx frame_reg, machine_mode mode,
26003 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26004 {
26005 rtx reg;
26006
26007 /* Some cases that need register indexed addressing. */
26008 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26009 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26010
26011 reg = gen_rtx_REG (mode, regno);
26012 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26013 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26014 NULL_RTX, NULL_RTX);
26015 }
26016
26017 /* Emit an offset memory reference suitable for a frame store, while
26018 converting to a valid addressing mode. */
26019
26020 static rtx
26021 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26022 {
26023 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26024 }
26025
26026 #ifndef TARGET_FIX_AND_CONTINUE
26027 #define TARGET_FIX_AND_CONTINUE 0
26028 #endif
26029
26030 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26031 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26032 #define LAST_SAVRES_REGISTER 31
26033 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26034
26035 enum {
26036 SAVRES_LR = 0x1,
26037 SAVRES_SAVE = 0x2,
26038 SAVRES_REG = 0x0c,
26039 SAVRES_GPR = 0,
26040 SAVRES_FPR = 4,
26041 SAVRES_VR = 8
26042 };
26043
26044 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26045
26046 /* Temporary holding space for an out-of-line register save/restore
26047 routine name. */
26048 static char savres_routine_name[30];
26049
26050 /* Return the name for an out-of-line register save/restore routine.
26051 We are saving/restoring GPRs if GPR is true. */
26052
26053 static char *
26054 rs6000_savres_routine_name (int regno, int sel)
26055 {
26056 const char *prefix = "";
26057 const char *suffix = "";
26058
26059 /* Different targets are supposed to define
26060 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26061 routine name could be defined with:
26062
26063 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26064
26065 This is a nice idea in practice, but in reality, things are
26066 complicated in several ways:
26067
26068 - ELF targets have save/restore routines for GPRs.
26069
26070 - PPC64 ELF targets have routines for save/restore of GPRs that
26071 differ in what they do with the link register, so having a set
26072 prefix doesn't work. (We only use one of the save routines at
26073 the moment, though.)
26074
26075 - PPC32 elf targets have "exit" versions of the restore routines
26076 that restore the link register and can save some extra space.
26077 These require an extra suffix. (There are also "tail" versions
26078 of the restore routines and "GOT" versions of the save routines,
26079 but we don't generate those at present. Same problems apply,
26080 though.)
26081
26082 We deal with all this by synthesizing our own prefix/suffix and
26083 using that for the simple sprintf call shown above. */
26084 if (DEFAULT_ABI == ABI_V4)
26085 {
26086 if (TARGET_64BIT)
26087 goto aix_names;
26088
26089 if ((sel & SAVRES_REG) == SAVRES_GPR)
26090 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26091 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26092 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26093 else if ((sel & SAVRES_REG) == SAVRES_VR)
26094 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26095 else
26096 abort ();
26097
26098 if ((sel & SAVRES_LR))
26099 suffix = "_x";
26100 }
26101 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26102 {
26103 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26104 /* No out-of-line save/restore routines for GPRs on AIX. */
26105 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26106 #endif
26107
26108 aix_names:
26109 if ((sel & SAVRES_REG) == SAVRES_GPR)
26110 prefix = ((sel & SAVRES_SAVE)
26111 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26112 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26113 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26114 {
26115 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26116 if ((sel & SAVRES_LR))
26117 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26118 else
26119 #endif
26120 {
26121 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26122 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26123 }
26124 }
26125 else if ((sel & SAVRES_REG) == SAVRES_VR)
26126 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26127 else
26128 abort ();
26129 }
26130
26131 if (DEFAULT_ABI == ABI_DARWIN)
26132 {
26133 /* The Darwin approach is (slightly) different, in order to be
26134 compatible with code generated by the system toolchain. There is a
26135 single symbol for the start of save sequence, and the code here
26136 embeds an offset into that code on the basis of the first register
26137 to be saved. */
26138 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26139 if ((sel & SAVRES_REG) == SAVRES_GPR)
26140 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26141 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26142 (regno - 13) * 4, prefix, regno);
26143 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26144 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26145 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26146 else if ((sel & SAVRES_REG) == SAVRES_VR)
26147 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26148 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26149 else
26150 abort ();
26151 }
26152 else
26153 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26154
26155 return savres_routine_name;
26156 }
26157
26158 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26159 We are saving/restoring GPRs if GPR is true. */
26160
26161 static rtx
26162 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26163 {
26164 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26165 ? info->first_gp_reg_save
26166 : (sel & SAVRES_REG) == SAVRES_FPR
26167 ? info->first_fp_reg_save - 32
26168 : (sel & SAVRES_REG) == SAVRES_VR
26169 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26170 : -1);
26171 rtx sym;
26172 int select = sel;
26173
26174 /* Don't generate bogus routine names. */
26175 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26176 && regno <= LAST_SAVRES_REGISTER
26177 && select >= 0 && select <= 12);
26178
26179 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26180
26181 if (sym == NULL)
26182 {
26183 char *name;
26184
26185 name = rs6000_savres_routine_name (regno, sel);
26186
26187 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26188 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26189 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26190 }
26191
26192 return sym;
26193 }
26194
26195 /* Emit a sequence of insns, including a stack tie if needed, for
26196 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26197 reset the stack pointer, but move the base of the frame into
26198 reg UPDT_REGNO for use by out-of-line register restore routines. */
26199
26200 static rtx
26201 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26202 unsigned updt_regno)
26203 {
26204 /* If there is nothing to do, don't do anything. */
26205 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26206 return NULL_RTX;
26207
26208 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26209
26210 /* This blockage is needed so that sched doesn't decide to move
26211 the sp change before the register restores. */
26212 if (DEFAULT_ABI == ABI_V4)
26213 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26214 GEN_INT (frame_off)));
26215
26216 /* If we are restoring registers out-of-line, we will be using the
26217 "exit" variants of the restore routines, which will reset the
26218 stack for us. But we do need to point updt_reg into the
26219 right place for those routines. */
26220 if (frame_off != 0)
26221 return emit_insn (gen_add3_insn (updt_reg_rtx,
26222 frame_reg_rtx, GEN_INT (frame_off)));
26223 else
26224 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26225
26226 return NULL_RTX;
26227 }
26228
26229 /* Return the register number used as a pointer by out-of-line
26230 save/restore functions. */
26231
26232 static inline unsigned
26233 ptr_regno_for_savres (int sel)
26234 {
26235 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26236 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26237 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26238 }
26239
26240 /* Construct a parallel rtx describing the effect of a call to an
26241 out-of-line register save/restore routine, and emit the insn
26242 or jump_insn as appropriate. */
26243
26244 static rtx_insn *
26245 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26246 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26247 machine_mode reg_mode, int sel)
26248 {
26249 int i;
26250 int offset, start_reg, end_reg, n_regs, use_reg;
26251 int reg_size = GET_MODE_SIZE (reg_mode);
26252 rtx sym;
26253 rtvec p;
26254 rtx par;
26255 rtx_insn *insn;
26256
26257 offset = 0;
26258 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26259 ? info->first_gp_reg_save
26260 : (sel & SAVRES_REG) == SAVRES_FPR
26261 ? info->first_fp_reg_save
26262 : (sel & SAVRES_REG) == SAVRES_VR
26263 ? info->first_altivec_reg_save
26264 : -1);
26265 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26266 ? 32
26267 : (sel & SAVRES_REG) == SAVRES_FPR
26268 ? 64
26269 : (sel & SAVRES_REG) == SAVRES_VR
26270 ? LAST_ALTIVEC_REGNO + 1
26271 : -1);
26272 n_regs = end_reg - start_reg;
26273 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26274 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26275 + n_regs);
26276
26277 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26278 RTVEC_ELT (p, offset++) = ret_rtx;
26279
26280 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26281
26282 sym = rs6000_savres_routine_sym (info, sel);
26283 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26284
26285 use_reg = ptr_regno_for_savres (sel);
26286 if ((sel & SAVRES_REG) == SAVRES_VR)
26287 {
26288 /* Vector regs are saved/restored using [reg+reg] addressing. */
26289 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26290 RTVEC_ELT (p, offset++)
26291 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26292 }
26293 else
26294 RTVEC_ELT (p, offset++)
26295 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26296
26297 for (i = 0; i < end_reg - start_reg; i++)
26298 RTVEC_ELT (p, i + offset)
26299 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26300 frame_reg_rtx, save_area_offset + reg_size * i,
26301 (sel & SAVRES_SAVE) != 0);
26302
26303 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26304 RTVEC_ELT (p, i + offset)
26305 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26306
26307 par = gen_rtx_PARALLEL (VOIDmode, p);
26308
26309 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26310 {
26311 insn = emit_jump_insn (par);
26312 JUMP_LABEL (insn) = ret_rtx;
26313 }
26314 else
26315 insn = emit_insn (par);
26316 return insn;
26317 }
26318
26319 /* Emit prologue code to store CR fields that need to be saved into REG. This
26320 function should only be called when moving the non-volatile CRs to REG, it
26321 is not a general purpose routine to move the entire set of CRs to REG.
26322 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26323 volatile CRs. */
26324
26325 static void
26326 rs6000_emit_prologue_move_from_cr (rtx reg)
26327 {
26328 /* Only the ELFv2 ABI allows storing only selected fields. */
26329 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26330 {
26331 int i, cr_reg[8], count = 0;
26332
26333 /* Collect CR fields that must be saved. */
26334 for (i = 0; i < 8; i++)
26335 if (save_reg_p (CR0_REGNO + i))
26336 cr_reg[count++] = i;
26337
26338 /* If it's just a single one, use mfcrf. */
26339 if (count == 1)
26340 {
26341 rtvec p = rtvec_alloc (1);
26342 rtvec r = rtvec_alloc (2);
26343 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26344 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26345 RTVEC_ELT (p, 0)
26346 = gen_rtx_SET (reg,
26347 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26348
26349 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26350 return;
26351 }
26352
26353 /* ??? It might be better to handle count == 2 / 3 cases here
26354 as well, using logical operations to combine the values. */
26355 }
26356
26357 emit_insn (gen_prologue_movesi_from_cr (reg));
26358 }
26359
26360 /* Return whether the split-stack arg pointer (r12) is used. */
26361
26362 static bool
26363 split_stack_arg_pointer_used_p (void)
26364 {
26365 /* If the pseudo holding the arg pointer is no longer a pseudo,
26366 then the arg pointer is used. */
26367 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26368 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26369 || (REGNO (cfun->machine->split_stack_arg_pointer)
26370 < FIRST_PSEUDO_REGISTER)))
26371 return true;
26372
26373 /* Unfortunately we also need to do some code scanning, since
26374 r12 may have been substituted for the pseudo. */
26375 rtx_insn *insn;
26376 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26377 FOR_BB_INSNS (bb, insn)
26378 if (NONDEBUG_INSN_P (insn))
26379 {
26380 /* A call destroys r12. */
26381 if (CALL_P (insn))
26382 return false;
26383
26384 df_ref use;
26385 FOR_EACH_INSN_USE (use, insn)
26386 {
26387 rtx x = DF_REF_REG (use);
26388 if (REG_P (x) && REGNO (x) == 12)
26389 return true;
26390 }
26391 df_ref def;
26392 FOR_EACH_INSN_DEF (def, insn)
26393 {
26394 rtx x = DF_REF_REG (def);
26395 if (REG_P (x) && REGNO (x) == 12)
26396 return false;
26397 }
26398 }
26399 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26400 }
26401
26402 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26403
26404 static bool
26405 rs6000_global_entry_point_needed_p (void)
26406 {
26407 /* Only needed for the ELFv2 ABI. */
26408 if (DEFAULT_ABI != ABI_ELFv2)
26409 return false;
26410
26411 /* With -msingle-pic-base, we assume the whole program shares the same
26412 TOC, so no global entry point prologues are needed anywhere. */
26413 if (TARGET_SINGLE_PIC_BASE)
26414 return false;
26415
26416 /* Ensure we have a global entry point for thunks. ??? We could
26417 avoid that if the target routine doesn't need a global entry point,
26418 but we do not know whether this is the case at this point. */
26419 if (cfun->is_thunk)
26420 return true;
26421
26422 /* For regular functions, rs6000_emit_prologue sets this flag if the
26423 routine ever uses the TOC pointer. */
26424 return cfun->machine->r2_setup_needed;
26425 }
26426
26427 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26428 static sbitmap
26429 rs6000_get_separate_components (void)
26430 {
26431 rs6000_stack_t *info = rs6000_stack_info ();
26432
26433 if (WORLD_SAVE_P (info))
26434 return NULL;
26435
26436 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26437 && !(info->savres_strategy & REST_MULTIPLE));
26438
26439 /* Component 0 is the save/restore of LR (done via GPR0).
26440 Component 2 is the save of the TOC (GPR2).
26441 Components 13..31 are the save/restore of GPR13..GPR31.
26442 Components 46..63 are the save/restore of FPR14..FPR31. */
26443
26444 cfun->machine->n_components = 64;
26445
26446 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26447 bitmap_clear (components);
26448
26449 int reg_size = TARGET_32BIT ? 4 : 8;
26450 int fp_reg_size = 8;
26451
26452 /* The GPRs we need saved to the frame. */
26453 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26454 && (info->savres_strategy & REST_INLINE_GPRS))
26455 {
26456 int offset = info->gp_save_offset;
26457 if (info->push_p)
26458 offset += info->total_size;
26459
26460 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26461 {
26462 if (IN_RANGE (offset, -0x8000, 0x7fff)
26463 && save_reg_p (regno))
26464 bitmap_set_bit (components, regno);
26465
26466 offset += reg_size;
26467 }
26468 }
26469
26470 /* Don't mess with the hard frame pointer. */
26471 if (frame_pointer_needed)
26472 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26473
26474 /* Don't mess with the fixed TOC register. */
26475 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26476 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26477 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26478 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26479
26480 /* The FPRs we need saved to the frame. */
26481 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26482 && (info->savres_strategy & REST_INLINE_FPRS))
26483 {
26484 int offset = info->fp_save_offset;
26485 if (info->push_p)
26486 offset += info->total_size;
26487
26488 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26489 {
26490 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26491 bitmap_set_bit (components, regno);
26492
26493 offset += fp_reg_size;
26494 }
26495 }
26496
26497 /* Optimize LR save and restore if we can. This is component 0. Any
26498 out-of-line register save/restore routines need LR. */
26499 if (info->lr_save_p
26500 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26501 && (info->savres_strategy & SAVE_INLINE_GPRS)
26502 && (info->savres_strategy & REST_INLINE_GPRS)
26503 && (info->savres_strategy & SAVE_INLINE_FPRS)
26504 && (info->savres_strategy & REST_INLINE_FPRS)
26505 && (info->savres_strategy & SAVE_INLINE_VRS)
26506 && (info->savres_strategy & REST_INLINE_VRS))
26507 {
26508 int offset = info->lr_save_offset;
26509 if (info->push_p)
26510 offset += info->total_size;
26511 if (IN_RANGE (offset, -0x8000, 0x7fff))
26512 bitmap_set_bit (components, 0);
26513 }
26514
26515 /* Optimize saving the TOC. This is component 2. */
26516 if (cfun->machine->save_toc_in_prologue)
26517 bitmap_set_bit (components, 2);
26518
26519 return components;
26520 }
26521
26522 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26523 static sbitmap
26524 rs6000_components_for_bb (basic_block bb)
26525 {
26526 rs6000_stack_t *info = rs6000_stack_info ();
26527
26528 bitmap in = DF_LIVE_IN (bb);
26529 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26530 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26531
26532 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26533 bitmap_clear (components);
26534
26535 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26536
26537 /* GPRs. */
26538 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26539 if (bitmap_bit_p (in, regno)
26540 || bitmap_bit_p (gen, regno)
26541 || bitmap_bit_p (kill, regno))
26542 bitmap_set_bit (components, regno);
26543
26544 /* FPRs. */
26545 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26546 if (bitmap_bit_p (in, regno)
26547 || bitmap_bit_p (gen, regno)
26548 || bitmap_bit_p (kill, regno))
26549 bitmap_set_bit (components, regno);
26550
26551 /* The link register. */
26552 if (bitmap_bit_p (in, LR_REGNO)
26553 || bitmap_bit_p (gen, LR_REGNO)
26554 || bitmap_bit_p (kill, LR_REGNO))
26555 bitmap_set_bit (components, 0);
26556
26557 /* The TOC save. */
26558 if (bitmap_bit_p (in, TOC_REGNUM)
26559 || bitmap_bit_p (gen, TOC_REGNUM)
26560 || bitmap_bit_p (kill, TOC_REGNUM))
26561 bitmap_set_bit (components, 2);
26562
26563 return components;
26564 }
26565
26566 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26567 static void
26568 rs6000_disqualify_components (sbitmap components, edge e,
26569 sbitmap edge_components, bool /*is_prologue*/)
26570 {
26571 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26572 live where we want to place that code. */
26573 if (bitmap_bit_p (edge_components, 0)
26574 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26575 {
26576 if (dump_file)
26577 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26578 "on entry to bb %d\n", e->dest->index);
26579 bitmap_clear_bit (components, 0);
26580 }
26581 }
26582
26583 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26584 static void
26585 rs6000_emit_prologue_components (sbitmap components)
26586 {
26587 rs6000_stack_t *info = rs6000_stack_info ();
26588 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26589 ? HARD_FRAME_POINTER_REGNUM
26590 : STACK_POINTER_REGNUM);
26591
26592 machine_mode reg_mode = Pmode;
26593 int reg_size = TARGET_32BIT ? 4 : 8;
26594 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26595 int fp_reg_size = 8;
26596
26597 /* Prologue for LR. */
26598 if (bitmap_bit_p (components, 0))
26599 {
26600 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26601 rtx reg = gen_rtx_REG (reg_mode, 0);
26602 rtx_insn *insn = emit_move_insn (reg, lr);
26603 RTX_FRAME_RELATED_P (insn) = 1;
26604 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26605
26606 int offset = info->lr_save_offset;
26607 if (info->push_p)
26608 offset += info->total_size;
26609
26610 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26611 RTX_FRAME_RELATED_P (insn) = 1;
26612 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26613 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26614 }
26615
26616 /* Prologue for TOC. */
26617 if (bitmap_bit_p (components, 2))
26618 {
26619 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26620 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26621 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26622 }
26623
26624 /* Prologue for the GPRs. */
26625 int offset = info->gp_save_offset;
26626 if (info->push_p)
26627 offset += info->total_size;
26628
26629 for (int i = info->first_gp_reg_save; i < 32; i++)
26630 {
26631 if (bitmap_bit_p (components, i))
26632 {
26633 rtx reg = gen_rtx_REG (reg_mode, i);
26634 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26635 RTX_FRAME_RELATED_P (insn) = 1;
26636 rtx set = copy_rtx (single_set (insn));
26637 add_reg_note (insn, REG_CFA_OFFSET, set);
26638 }
26639
26640 offset += reg_size;
26641 }
26642
26643 /* Prologue for the FPRs. */
26644 offset = info->fp_save_offset;
26645 if (info->push_p)
26646 offset += info->total_size;
26647
26648 for (int i = info->first_fp_reg_save; i < 64; i++)
26649 {
26650 if (bitmap_bit_p (components, i))
26651 {
26652 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26653 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26654 RTX_FRAME_RELATED_P (insn) = 1;
26655 rtx set = copy_rtx (single_set (insn));
26656 add_reg_note (insn, REG_CFA_OFFSET, set);
26657 }
26658
26659 offset += fp_reg_size;
26660 }
26661 }
26662
26663 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26664 static void
26665 rs6000_emit_epilogue_components (sbitmap components)
26666 {
26667 rs6000_stack_t *info = rs6000_stack_info ();
26668 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26669 ? HARD_FRAME_POINTER_REGNUM
26670 : STACK_POINTER_REGNUM);
26671
26672 machine_mode reg_mode = Pmode;
26673 int reg_size = TARGET_32BIT ? 4 : 8;
26674
26675 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26676 int fp_reg_size = 8;
26677
26678 /* Epilogue for the FPRs. */
26679 int offset = info->fp_save_offset;
26680 if (info->push_p)
26681 offset += info->total_size;
26682
26683 for (int i = info->first_fp_reg_save; i < 64; i++)
26684 {
26685 if (bitmap_bit_p (components, i))
26686 {
26687 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26688 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26689 RTX_FRAME_RELATED_P (insn) = 1;
26690 add_reg_note (insn, REG_CFA_RESTORE, reg);
26691 }
26692
26693 offset += fp_reg_size;
26694 }
26695
26696 /* Epilogue for the GPRs. */
26697 offset = info->gp_save_offset;
26698 if (info->push_p)
26699 offset += info->total_size;
26700
26701 for (int i = info->first_gp_reg_save; i < 32; i++)
26702 {
26703 if (bitmap_bit_p (components, i))
26704 {
26705 rtx reg = gen_rtx_REG (reg_mode, i);
26706 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26707 RTX_FRAME_RELATED_P (insn) = 1;
26708 add_reg_note (insn, REG_CFA_RESTORE, reg);
26709 }
26710
26711 offset += reg_size;
26712 }
26713
26714 /* Epilogue for LR. */
26715 if (bitmap_bit_p (components, 0))
26716 {
26717 int offset = info->lr_save_offset;
26718 if (info->push_p)
26719 offset += info->total_size;
26720
26721 rtx reg = gen_rtx_REG (reg_mode, 0);
26722 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26723
26724 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26725 insn = emit_move_insn (lr, reg);
26726 RTX_FRAME_RELATED_P (insn) = 1;
26727 add_reg_note (insn, REG_CFA_RESTORE, lr);
26728 }
26729 }
26730
26731 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26732 static void
26733 rs6000_set_handled_components (sbitmap components)
26734 {
26735 rs6000_stack_t *info = rs6000_stack_info ();
26736
26737 for (int i = info->first_gp_reg_save; i < 32; i++)
26738 if (bitmap_bit_p (components, i))
26739 cfun->machine->gpr_is_wrapped_separately[i] = true;
26740
26741 for (int i = info->first_fp_reg_save; i < 64; i++)
26742 if (bitmap_bit_p (components, i))
26743 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26744
26745 if (bitmap_bit_p (components, 0))
26746 cfun->machine->lr_is_wrapped_separately = true;
26747
26748 if (bitmap_bit_p (components, 2))
26749 cfun->machine->toc_is_wrapped_separately = true;
26750 }
26751
26752 /* VRSAVE is a bit vector representing which AltiVec registers
26753 are used. The OS uses this to determine which vector
26754 registers to save on a context switch. We need to save
26755 VRSAVE on the stack frame, add whatever AltiVec registers we
26756 used in this function, and do the corresponding magic in the
26757 epilogue. */
26758 static void
26759 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26760 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26761 {
26762 /* Get VRSAVE into a GPR. */
26763 rtx reg = gen_rtx_REG (SImode, save_regno);
26764 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26765 if (TARGET_MACHO)
26766 emit_insn (gen_get_vrsave_internal (reg));
26767 else
26768 emit_insn (gen_rtx_SET (reg, vrsave));
26769
26770 /* Save VRSAVE. */
26771 int offset = info->vrsave_save_offset + frame_off;
26772 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26773
26774 /* Include the registers in the mask. */
26775 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26776
26777 emit_insn (generate_set_vrsave (reg, info, 0));
26778 }
26779
26780 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26781 called, it left the arg pointer to the old stack in r29. Otherwise, the
26782 arg pointer is the top of the current frame. */
26783 static void
26784 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26785 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26786 {
26787 cfun->machine->split_stack_argp_used = true;
26788
26789 if (sp_adjust)
26790 {
26791 rtx r12 = gen_rtx_REG (Pmode, 12);
26792 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26793 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26794 emit_insn_before (set_r12, sp_adjust);
26795 }
26796 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26797 {
26798 rtx r12 = gen_rtx_REG (Pmode, 12);
26799 if (frame_off == 0)
26800 emit_move_insn (r12, frame_reg_rtx);
26801 else
26802 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26803 }
26804
26805 if (info->push_p)
26806 {
26807 rtx r12 = gen_rtx_REG (Pmode, 12);
26808 rtx r29 = gen_rtx_REG (Pmode, 29);
26809 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26810 rtx not_more = gen_label_rtx ();
26811 rtx jump;
26812
26813 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26814 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26815 gen_rtx_LABEL_REF (VOIDmode, not_more),
26816 pc_rtx);
26817 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26818 JUMP_LABEL (jump) = not_more;
26819 LABEL_NUSES (not_more) += 1;
26820 emit_move_insn (r12, r29);
26821 emit_label (not_more);
26822 }
26823 }
26824
26825 /* Emit function prologue as insns. */
26826
26827 void
26828 rs6000_emit_prologue (void)
26829 {
26830 rs6000_stack_t *info = rs6000_stack_info ();
26831 machine_mode reg_mode = Pmode;
26832 int reg_size = TARGET_32BIT ? 4 : 8;
26833 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26834 int fp_reg_size = 8;
26835 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26836 rtx frame_reg_rtx = sp_reg_rtx;
26837 unsigned int cr_save_regno;
26838 rtx cr_save_rtx = NULL_RTX;
26839 rtx_insn *insn;
26840 int strategy;
26841 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26842 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26843 && call_used_regs[STATIC_CHAIN_REGNUM]);
26844 int using_split_stack = (flag_split_stack
26845 && (lookup_attribute ("no_split_stack",
26846 DECL_ATTRIBUTES (cfun->decl))
26847 == NULL));
26848
26849 /* Offset to top of frame for frame_reg and sp respectively. */
26850 HOST_WIDE_INT frame_off = 0;
26851 HOST_WIDE_INT sp_off = 0;
26852 /* sp_adjust is the stack adjusting instruction, tracked so that the
26853 insn setting up the split-stack arg pointer can be emitted just
26854 prior to it, when r12 is not used here for other purposes. */
26855 rtx_insn *sp_adjust = 0;
26856
26857 #if CHECKING_P
26858 /* Track and check usage of r0, r11, r12. */
26859 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26860 #define START_USE(R) do \
26861 { \
26862 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26863 reg_inuse |= 1 << (R); \
26864 } while (0)
26865 #define END_USE(R) do \
26866 { \
26867 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26868 reg_inuse &= ~(1 << (R)); \
26869 } while (0)
26870 #define NOT_INUSE(R) do \
26871 { \
26872 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26873 } while (0)
26874 #else
26875 #define START_USE(R) do {} while (0)
26876 #define END_USE(R) do {} while (0)
26877 #define NOT_INUSE(R) do {} while (0)
26878 #endif
26879
26880 if (DEFAULT_ABI == ABI_ELFv2
26881 && !TARGET_SINGLE_PIC_BASE)
26882 {
26883 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26884
26885 /* With -mminimal-toc we may generate an extra use of r2 below. */
26886 if (TARGET_TOC && TARGET_MINIMAL_TOC
26887 && !constant_pool_empty_p ())
26888 cfun->machine->r2_setup_needed = true;
26889 }
26890
26891
26892 if (flag_stack_usage_info)
26893 current_function_static_stack_size = info->total_size;
26894
26895 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26896 {
26897 HOST_WIDE_INT size = info->total_size;
26898
26899 if (crtl->is_leaf && !cfun->calls_alloca)
26900 {
26901 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26902 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26903 size - get_stack_check_protect ());
26904 }
26905 else if (size > 0)
26906 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26907 }
26908
26909 if (TARGET_FIX_AND_CONTINUE)
26910 {
26911 /* gdb on darwin arranges to forward a function from the old
26912 address by modifying the first 5 instructions of the function
26913 to branch to the overriding function. This is necessary to
26914 permit function pointers that point to the old function to
26915 actually forward to the new function. */
26916 emit_insn (gen_nop ());
26917 emit_insn (gen_nop ());
26918 emit_insn (gen_nop ());
26919 emit_insn (gen_nop ());
26920 emit_insn (gen_nop ());
26921 }
26922
26923 /* Handle world saves specially here. */
26924 if (WORLD_SAVE_P (info))
26925 {
26926 int i, j, sz;
26927 rtx treg;
26928 rtvec p;
26929 rtx reg0;
26930
26931 /* save_world expects lr in r0. */
26932 reg0 = gen_rtx_REG (Pmode, 0);
26933 if (info->lr_save_p)
26934 {
26935 insn = emit_move_insn (reg0,
26936 gen_rtx_REG (Pmode, LR_REGNO));
26937 RTX_FRAME_RELATED_P (insn) = 1;
26938 }
26939
26940 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26941 assumptions about the offsets of various bits of the stack
26942 frame. */
26943 gcc_assert (info->gp_save_offset == -220
26944 && info->fp_save_offset == -144
26945 && info->lr_save_offset == 8
26946 && info->cr_save_offset == 4
26947 && info->push_p
26948 && info->lr_save_p
26949 && (!crtl->calls_eh_return
26950 || info->ehrd_offset == -432)
26951 && info->vrsave_save_offset == -224
26952 && info->altivec_save_offset == -416);
26953
26954 treg = gen_rtx_REG (SImode, 11);
26955 emit_move_insn (treg, GEN_INT (-info->total_size));
26956
26957 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26958 in R11. It also clobbers R12, so beware! */
26959
26960 /* Preserve CR2 for save_world prologues */
26961 sz = 5;
26962 sz += 32 - info->first_gp_reg_save;
26963 sz += 64 - info->first_fp_reg_save;
26964 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26965 p = rtvec_alloc (sz);
26966 j = 0;
26967 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
26968 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26969 gen_rtx_SYMBOL_REF (Pmode,
26970 "*save_world"));
26971 /* We do floats first so that the instruction pattern matches
26972 properly. */
26973 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26974 RTVEC_ELT (p, j++)
26975 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26976 info->first_fp_reg_save + i),
26977 frame_reg_rtx,
26978 info->fp_save_offset + frame_off + 8 * i);
26979 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26980 RTVEC_ELT (p, j++)
26981 = gen_frame_store (gen_rtx_REG (V4SImode,
26982 info->first_altivec_reg_save + i),
26983 frame_reg_rtx,
26984 info->altivec_save_offset + frame_off + 16 * i);
26985 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26986 RTVEC_ELT (p, j++)
26987 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26988 frame_reg_rtx,
26989 info->gp_save_offset + frame_off + reg_size * i);
26990
26991 /* CR register traditionally saved as CR2. */
26992 RTVEC_ELT (p, j++)
26993 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26994 frame_reg_rtx, info->cr_save_offset + frame_off);
26995 /* Explain about use of R0. */
26996 if (info->lr_save_p)
26997 RTVEC_ELT (p, j++)
26998 = gen_frame_store (reg0,
26999 frame_reg_rtx, info->lr_save_offset + frame_off);
27000 /* Explain what happens to the stack pointer. */
27001 {
27002 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27003 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27004 }
27005
27006 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27007 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27008 treg, GEN_INT (-info->total_size));
27009 sp_off = frame_off = info->total_size;
27010 }
27011
27012 strategy = info->savres_strategy;
27013
27014 /* For V.4, update stack before we do any saving and set back pointer. */
27015 if (! WORLD_SAVE_P (info)
27016 && info->push_p
27017 && (DEFAULT_ABI == ABI_V4
27018 || crtl->calls_eh_return))
27019 {
27020 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27021 || !(strategy & SAVE_INLINE_GPRS)
27022 || !(strategy & SAVE_INLINE_VRS));
27023 int ptr_regno = -1;
27024 rtx ptr_reg = NULL_RTX;
27025 int ptr_off = 0;
27026
27027 if (info->total_size < 32767)
27028 frame_off = info->total_size;
27029 else if (need_r11)
27030 ptr_regno = 11;
27031 else if (info->cr_save_p
27032 || info->lr_save_p
27033 || info->first_fp_reg_save < 64
27034 || info->first_gp_reg_save < 32
27035 || info->altivec_size != 0
27036 || info->vrsave_size != 0
27037 || crtl->calls_eh_return)
27038 ptr_regno = 12;
27039 else
27040 {
27041 /* The prologue won't be saving any regs so there is no need
27042 to set up a frame register to access any frame save area.
27043 We also won't be using frame_off anywhere below, but set
27044 the correct value anyway to protect against future
27045 changes to this function. */
27046 frame_off = info->total_size;
27047 }
27048 if (ptr_regno != -1)
27049 {
27050 /* Set up the frame offset to that needed by the first
27051 out-of-line save function. */
27052 START_USE (ptr_regno);
27053 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27054 frame_reg_rtx = ptr_reg;
27055 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27056 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27057 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27058 ptr_off = info->gp_save_offset + info->gp_size;
27059 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27060 ptr_off = info->altivec_save_offset + info->altivec_size;
27061 frame_off = -ptr_off;
27062 }
27063 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27064 ptr_reg, ptr_off);
27065 if (REGNO (frame_reg_rtx) == 12)
27066 sp_adjust = 0;
27067 sp_off = info->total_size;
27068 if (frame_reg_rtx != sp_reg_rtx)
27069 rs6000_emit_stack_tie (frame_reg_rtx, false);
27070 }
27071
27072 /* If we use the link register, get it into r0. */
27073 if (!WORLD_SAVE_P (info) && info->lr_save_p
27074 && !cfun->machine->lr_is_wrapped_separately)
27075 {
27076 rtx addr, reg, mem;
27077
27078 reg = gen_rtx_REG (Pmode, 0);
27079 START_USE (0);
27080 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27081 RTX_FRAME_RELATED_P (insn) = 1;
27082
27083 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27084 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27085 {
27086 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27087 GEN_INT (info->lr_save_offset + frame_off));
27088 mem = gen_rtx_MEM (Pmode, addr);
27089 /* This should not be of rs6000_sr_alias_set, because of
27090 __builtin_return_address. */
27091
27092 insn = emit_move_insn (mem, reg);
27093 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27094 NULL_RTX, NULL_RTX);
27095 END_USE (0);
27096 }
27097 }
27098
27099 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27100 r12 will be needed by out-of-line gpr save. */
27101 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27102 && !(strategy & (SAVE_INLINE_GPRS
27103 | SAVE_NOINLINE_GPRS_SAVES_LR))
27104 ? 11 : 12);
27105 if (!WORLD_SAVE_P (info)
27106 && info->cr_save_p
27107 && REGNO (frame_reg_rtx) != cr_save_regno
27108 && !(using_static_chain_p && cr_save_regno == 11)
27109 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27110 {
27111 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27112 START_USE (cr_save_regno);
27113 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27114 }
27115
27116 /* Do any required saving of fpr's. If only one or two to save, do
27117 it ourselves. Otherwise, call function. */
27118 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27119 {
27120 int offset = info->fp_save_offset + frame_off;
27121 for (int i = info->first_fp_reg_save; i < 64; i++)
27122 {
27123 if (save_reg_p (i)
27124 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27125 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27126 sp_off - frame_off);
27127
27128 offset += fp_reg_size;
27129 }
27130 }
27131 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27132 {
27133 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27134 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27135 unsigned ptr_regno = ptr_regno_for_savres (sel);
27136 rtx ptr_reg = frame_reg_rtx;
27137
27138 if (REGNO (frame_reg_rtx) == ptr_regno)
27139 gcc_checking_assert (frame_off == 0);
27140 else
27141 {
27142 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27143 NOT_INUSE (ptr_regno);
27144 emit_insn (gen_add3_insn (ptr_reg,
27145 frame_reg_rtx, GEN_INT (frame_off)));
27146 }
27147 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27148 info->fp_save_offset,
27149 info->lr_save_offset,
27150 DFmode, sel);
27151 rs6000_frame_related (insn, ptr_reg, sp_off,
27152 NULL_RTX, NULL_RTX);
27153 if (lr)
27154 END_USE (0);
27155 }
27156
27157 /* Save GPRs. This is done as a PARALLEL if we are using
27158 the store-multiple instructions. */
27159 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27160 {
27161 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27162 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27163 unsigned ptr_regno = ptr_regno_for_savres (sel);
27164 rtx ptr_reg = frame_reg_rtx;
27165 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27166 int end_save = info->gp_save_offset + info->gp_size;
27167 int ptr_off;
27168
27169 if (ptr_regno == 12)
27170 sp_adjust = 0;
27171 if (!ptr_set_up)
27172 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27173
27174 /* Need to adjust r11 (r12) if we saved any FPRs. */
27175 if (end_save + frame_off != 0)
27176 {
27177 rtx offset = GEN_INT (end_save + frame_off);
27178
27179 if (ptr_set_up)
27180 frame_off = -end_save;
27181 else
27182 NOT_INUSE (ptr_regno);
27183 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27184 }
27185 else if (!ptr_set_up)
27186 {
27187 NOT_INUSE (ptr_regno);
27188 emit_move_insn (ptr_reg, frame_reg_rtx);
27189 }
27190 ptr_off = -end_save;
27191 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27192 info->gp_save_offset + ptr_off,
27193 info->lr_save_offset + ptr_off,
27194 reg_mode, sel);
27195 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27196 NULL_RTX, NULL_RTX);
27197 if (lr)
27198 END_USE (0);
27199 }
27200 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27201 {
27202 rtvec p;
27203 int i;
27204 p = rtvec_alloc (32 - info->first_gp_reg_save);
27205 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27206 RTVEC_ELT (p, i)
27207 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27208 frame_reg_rtx,
27209 info->gp_save_offset + frame_off + reg_size * i);
27210 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27211 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27212 NULL_RTX, NULL_RTX);
27213 }
27214 else if (!WORLD_SAVE_P (info))
27215 {
27216 int offset = info->gp_save_offset + frame_off;
27217 for (int i = info->first_gp_reg_save; i < 32; i++)
27218 {
27219 if (save_reg_p (i)
27220 && !cfun->machine->gpr_is_wrapped_separately[i])
27221 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27222 sp_off - frame_off);
27223
27224 offset += reg_size;
27225 }
27226 }
27227
27228 if (crtl->calls_eh_return)
27229 {
27230 unsigned int i;
27231 rtvec p;
27232
27233 for (i = 0; ; ++i)
27234 {
27235 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27236 if (regno == INVALID_REGNUM)
27237 break;
27238 }
27239
27240 p = rtvec_alloc (i);
27241
27242 for (i = 0; ; ++i)
27243 {
27244 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27245 if (regno == INVALID_REGNUM)
27246 break;
27247
27248 rtx set
27249 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27250 sp_reg_rtx,
27251 info->ehrd_offset + sp_off + reg_size * (int) i);
27252 RTVEC_ELT (p, i) = set;
27253 RTX_FRAME_RELATED_P (set) = 1;
27254 }
27255
27256 insn = emit_insn (gen_blockage ());
27257 RTX_FRAME_RELATED_P (insn) = 1;
27258 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27259 }
27260
27261 /* In AIX ABI we need to make sure r2 is really saved. */
27262 if (TARGET_AIX && crtl->calls_eh_return)
27263 {
27264 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27265 rtx join_insn, note;
27266 rtx_insn *save_insn;
27267 long toc_restore_insn;
27268
27269 tmp_reg = gen_rtx_REG (Pmode, 11);
27270 tmp_reg_si = gen_rtx_REG (SImode, 11);
27271 if (using_static_chain_p)
27272 {
27273 START_USE (0);
27274 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27275 }
27276 else
27277 START_USE (11);
27278 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27279 /* Peek at instruction to which this function returns. If it's
27280 restoring r2, then we know we've already saved r2. We can't
27281 unconditionally save r2 because the value we have will already
27282 be updated if we arrived at this function via a plt call or
27283 toc adjusting stub. */
27284 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27285 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27286 + RS6000_TOC_SAVE_SLOT);
27287 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27288 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27289 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27290 validate_condition_mode (EQ, CCUNSmode);
27291 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27292 emit_insn (gen_rtx_SET (compare_result,
27293 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27294 toc_save_done = gen_label_rtx ();
27295 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27296 gen_rtx_EQ (VOIDmode, compare_result,
27297 const0_rtx),
27298 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27299 pc_rtx);
27300 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27301 JUMP_LABEL (jump) = toc_save_done;
27302 LABEL_NUSES (toc_save_done) += 1;
27303
27304 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27305 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27306 sp_off - frame_off);
27307
27308 emit_label (toc_save_done);
27309
27310 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27311 have a CFG that has different saves along different paths.
27312 Move the note to a dummy blockage insn, which describes that
27313 R2 is unconditionally saved after the label. */
27314 /* ??? An alternate representation might be a special insn pattern
27315 containing both the branch and the store. That might let the
27316 code that minimizes the number of DW_CFA_advance opcodes better
27317 freedom in placing the annotations. */
27318 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27319 if (note)
27320 remove_note (save_insn, note);
27321 else
27322 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27323 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27324 RTX_FRAME_RELATED_P (save_insn) = 0;
27325
27326 join_insn = emit_insn (gen_blockage ());
27327 REG_NOTES (join_insn) = note;
27328 RTX_FRAME_RELATED_P (join_insn) = 1;
27329
27330 if (using_static_chain_p)
27331 {
27332 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27333 END_USE (0);
27334 }
27335 else
27336 END_USE (11);
27337 }
27338
27339 /* Save CR if we use any that must be preserved. */
27340 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27341 {
27342 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27343 GEN_INT (info->cr_save_offset + frame_off));
27344 rtx mem = gen_frame_mem (SImode, addr);
27345
27346 /* If we didn't copy cr before, do so now using r0. */
27347 if (cr_save_rtx == NULL_RTX)
27348 {
27349 START_USE (0);
27350 cr_save_rtx = gen_rtx_REG (SImode, 0);
27351 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27352 }
27353
27354 /* Saving CR requires a two-instruction sequence: one instruction
27355 to move the CR to a general-purpose register, and a second
27356 instruction that stores the GPR to memory.
27357
27358 We do not emit any DWARF CFI records for the first of these,
27359 because we cannot properly represent the fact that CR is saved in
27360 a register. One reason is that we cannot express that multiple
27361 CR fields are saved; another reason is that on 64-bit, the size
27362 of the CR register in DWARF (4 bytes) differs from the size of
27363 a general-purpose register.
27364
27365 This means if any intervening instruction were to clobber one of
27366 the call-saved CR fields, we'd have incorrect CFI. To prevent
27367 this from happening, we mark the store to memory as a use of
27368 those CR fields, which prevents any such instruction from being
27369 scheduled in between the two instructions. */
27370 rtx crsave_v[9];
27371 int n_crsave = 0;
27372 int i;
27373
27374 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27375 for (i = 0; i < 8; i++)
27376 if (save_reg_p (CR0_REGNO + i))
27377 crsave_v[n_crsave++]
27378 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27379
27380 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27381 gen_rtvec_v (n_crsave, crsave_v)));
27382 END_USE (REGNO (cr_save_rtx));
27383
27384 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27385 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27386 so we need to construct a frame expression manually. */
27387 RTX_FRAME_RELATED_P (insn) = 1;
27388
27389 /* Update address to be stack-pointer relative, like
27390 rs6000_frame_related would do. */
27391 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27392 GEN_INT (info->cr_save_offset + sp_off));
27393 mem = gen_frame_mem (SImode, addr);
27394
27395 if (DEFAULT_ABI == ABI_ELFv2)
27396 {
27397 /* In the ELFv2 ABI we generate separate CFI records for each
27398 CR field that was actually saved. They all point to the
27399 same 32-bit stack slot. */
27400 rtx crframe[8];
27401 int n_crframe = 0;
27402
27403 for (i = 0; i < 8; i++)
27404 if (save_reg_p (CR0_REGNO + i))
27405 {
27406 crframe[n_crframe]
27407 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27408
27409 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27410 n_crframe++;
27411 }
27412
27413 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27414 gen_rtx_PARALLEL (VOIDmode,
27415 gen_rtvec_v (n_crframe, crframe)));
27416 }
27417 else
27418 {
27419 /* In other ABIs, by convention, we use a single CR regnum to
27420 represent the fact that all call-saved CR fields are saved.
27421 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27422 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27423 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27424 }
27425 }
27426
27427 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27428 *separate* slots if the routine calls __builtin_eh_return, so
27429 that they can be independently restored by the unwinder. */
27430 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27431 {
27432 int i, cr_off = info->ehcr_offset;
27433 rtx crsave;
27434
27435 /* ??? We might get better performance by using multiple mfocrf
27436 instructions. */
27437 crsave = gen_rtx_REG (SImode, 0);
27438 emit_insn (gen_prologue_movesi_from_cr (crsave));
27439
27440 for (i = 0; i < 8; i++)
27441 if (!call_used_regs[CR0_REGNO + i])
27442 {
27443 rtvec p = rtvec_alloc (2);
27444 RTVEC_ELT (p, 0)
27445 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27446 RTVEC_ELT (p, 1)
27447 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27448
27449 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27450
27451 RTX_FRAME_RELATED_P (insn) = 1;
27452 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27453 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27454 sp_reg_rtx, cr_off + sp_off));
27455
27456 cr_off += reg_size;
27457 }
27458 }
27459
27460 /* If we are emitting stack probes, but allocate no stack, then
27461 just note that in the dump file. */
27462 if (flag_stack_clash_protection
27463 && dump_file
27464 && !info->push_p)
27465 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27466
27467 /* Update stack and set back pointer unless this is V.4,
27468 for which it was done previously. */
27469 if (!WORLD_SAVE_P (info) && info->push_p
27470 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27471 {
27472 rtx ptr_reg = NULL;
27473 int ptr_off = 0;
27474
27475 /* If saving altivec regs we need to be able to address all save
27476 locations using a 16-bit offset. */
27477 if ((strategy & SAVE_INLINE_VRS) == 0
27478 || (info->altivec_size != 0
27479 && (info->altivec_save_offset + info->altivec_size - 16
27480 + info->total_size - frame_off) > 32767)
27481 || (info->vrsave_size != 0
27482 && (info->vrsave_save_offset
27483 + info->total_size - frame_off) > 32767))
27484 {
27485 int sel = SAVRES_SAVE | SAVRES_VR;
27486 unsigned ptr_regno = ptr_regno_for_savres (sel);
27487
27488 if (using_static_chain_p
27489 && ptr_regno == STATIC_CHAIN_REGNUM)
27490 ptr_regno = 12;
27491 if (REGNO (frame_reg_rtx) != ptr_regno)
27492 START_USE (ptr_regno);
27493 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27494 frame_reg_rtx = ptr_reg;
27495 ptr_off = info->altivec_save_offset + info->altivec_size;
27496 frame_off = -ptr_off;
27497 }
27498 else if (REGNO (frame_reg_rtx) == 1)
27499 frame_off = info->total_size;
27500 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27501 ptr_reg, ptr_off);
27502 if (REGNO (frame_reg_rtx) == 12)
27503 sp_adjust = 0;
27504 sp_off = info->total_size;
27505 if (frame_reg_rtx != sp_reg_rtx)
27506 rs6000_emit_stack_tie (frame_reg_rtx, false);
27507 }
27508
27509 /* Set frame pointer, if needed. */
27510 if (frame_pointer_needed)
27511 {
27512 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27513 sp_reg_rtx);
27514 RTX_FRAME_RELATED_P (insn) = 1;
27515 }
27516
27517 /* Save AltiVec registers if needed. Save here because the red zone does
27518 not always include AltiVec registers. */
27519 if (!WORLD_SAVE_P (info)
27520 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27521 {
27522 int end_save = info->altivec_save_offset + info->altivec_size;
27523 int ptr_off;
27524 /* Oddly, the vector save/restore functions point r0 at the end
27525 of the save area, then use r11 or r12 to load offsets for
27526 [reg+reg] addressing. */
27527 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27528 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27529 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27530
27531 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27532 NOT_INUSE (0);
27533 if (scratch_regno == 12)
27534 sp_adjust = 0;
27535 if (end_save + frame_off != 0)
27536 {
27537 rtx offset = GEN_INT (end_save + frame_off);
27538
27539 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27540 }
27541 else
27542 emit_move_insn (ptr_reg, frame_reg_rtx);
27543
27544 ptr_off = -end_save;
27545 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27546 info->altivec_save_offset + ptr_off,
27547 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27548 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27549 NULL_RTX, NULL_RTX);
27550 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27551 {
27552 /* The oddity mentioned above clobbered our frame reg. */
27553 emit_move_insn (frame_reg_rtx, ptr_reg);
27554 frame_off = ptr_off;
27555 }
27556 }
27557 else if (!WORLD_SAVE_P (info)
27558 && info->altivec_size != 0)
27559 {
27560 int i;
27561
27562 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27563 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27564 {
27565 rtx areg, savereg, mem;
27566 HOST_WIDE_INT offset;
27567
27568 offset = (info->altivec_save_offset + frame_off
27569 + 16 * (i - info->first_altivec_reg_save));
27570
27571 savereg = gen_rtx_REG (V4SImode, i);
27572
27573 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27574 {
27575 mem = gen_frame_mem (V4SImode,
27576 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27577 GEN_INT (offset)));
27578 insn = emit_insn (gen_rtx_SET (mem, savereg));
27579 areg = NULL_RTX;
27580 }
27581 else
27582 {
27583 NOT_INUSE (0);
27584 areg = gen_rtx_REG (Pmode, 0);
27585 emit_move_insn (areg, GEN_INT (offset));
27586
27587 /* AltiVec addressing mode is [reg+reg]. */
27588 mem = gen_frame_mem (V4SImode,
27589 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27590
27591 /* Rather than emitting a generic move, force use of the stvx
27592 instruction, which we always want on ISA 2.07 (power8) systems.
27593 In particular we don't want xxpermdi/stxvd2x for little
27594 endian. */
27595 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27596 }
27597
27598 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27599 areg, GEN_INT (offset));
27600 }
27601 }
27602
27603 /* VRSAVE is a bit vector representing which AltiVec registers
27604 are used. The OS uses this to determine which vector
27605 registers to save on a context switch. We need to save
27606 VRSAVE on the stack frame, add whatever AltiVec registers we
27607 used in this function, and do the corresponding magic in the
27608 epilogue. */
27609
27610 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27611 {
27612 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27613 be using r12 as frame_reg_rtx and r11 as the static chain
27614 pointer for nested functions. */
27615 int save_regno = 12;
27616 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27617 && !using_static_chain_p)
27618 save_regno = 11;
27619 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27620 {
27621 save_regno = 11;
27622 if (using_static_chain_p)
27623 save_regno = 0;
27624 }
27625 NOT_INUSE (save_regno);
27626
27627 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27628 }
27629
27630 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27631 if (!TARGET_SINGLE_PIC_BASE
27632 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27633 && !constant_pool_empty_p ())
27634 || (DEFAULT_ABI == ABI_V4
27635 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27636 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27637 {
27638 /* If emit_load_toc_table will use the link register, we need to save
27639 it. We use R12 for this purpose because emit_load_toc_table
27640 can use register 0. This allows us to use a plain 'blr' to return
27641 from the procedure more often. */
27642 int save_LR_around_toc_setup = (TARGET_ELF
27643 && DEFAULT_ABI == ABI_V4
27644 && flag_pic
27645 && ! info->lr_save_p
27646 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27647 if (save_LR_around_toc_setup)
27648 {
27649 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27650 rtx tmp = gen_rtx_REG (Pmode, 12);
27651
27652 sp_adjust = 0;
27653 insn = emit_move_insn (tmp, lr);
27654 RTX_FRAME_RELATED_P (insn) = 1;
27655
27656 rs6000_emit_load_toc_table (TRUE);
27657
27658 insn = emit_move_insn (lr, tmp);
27659 add_reg_note (insn, REG_CFA_RESTORE, lr);
27660 RTX_FRAME_RELATED_P (insn) = 1;
27661 }
27662 else
27663 rs6000_emit_load_toc_table (TRUE);
27664 }
27665
27666 #if TARGET_MACHO
27667 if (!TARGET_SINGLE_PIC_BASE
27668 && DEFAULT_ABI == ABI_DARWIN
27669 && flag_pic && crtl->uses_pic_offset_table)
27670 {
27671 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27672 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27673
27674 /* Save and restore LR locally around this call (in R0). */
27675 if (!info->lr_save_p)
27676 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27677
27678 emit_insn (gen_load_macho_picbase (src));
27679
27680 emit_move_insn (gen_rtx_REG (Pmode,
27681 RS6000_PIC_OFFSET_TABLE_REGNUM),
27682 lr);
27683
27684 if (!info->lr_save_p)
27685 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27686 }
27687 #endif
27688
27689 /* If we need to, save the TOC register after doing the stack setup.
27690 Do not emit eh frame info for this save. The unwinder wants info,
27691 conceptually attached to instructions in this function, about
27692 register values in the caller of this function. This R2 may have
27693 already been changed from the value in the caller.
27694 We don't attempt to write accurate DWARF EH frame info for R2
27695 because code emitted by gcc for a (non-pointer) function call
27696 doesn't save and restore R2. Instead, R2 is managed out-of-line
27697 by a linker generated plt call stub when the function resides in
27698 a shared library. This behavior is costly to describe in DWARF,
27699 both in terms of the size of DWARF info and the time taken in the
27700 unwinder to interpret it. R2 changes, apart from the
27701 calls_eh_return case earlier in this function, are handled by
27702 linux-unwind.h frob_update_context. */
27703 if (rs6000_save_toc_in_prologue_p ()
27704 && !cfun->machine->toc_is_wrapped_separately)
27705 {
27706 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27707 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27708 }
27709
27710 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27711 if (using_split_stack && split_stack_arg_pointer_used_p ())
27712 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27713 }
27714
27715 /* Output .extern statements for the save/restore routines we use. */
27716
27717 static void
27718 rs6000_output_savres_externs (FILE *file)
27719 {
27720 rs6000_stack_t *info = rs6000_stack_info ();
27721
27722 if (TARGET_DEBUG_STACK)
27723 debug_stack_info (info);
27724
27725 /* Write .extern for any function we will call to save and restore
27726 fp values. */
27727 if (info->first_fp_reg_save < 64
27728 && !TARGET_MACHO
27729 && !TARGET_ELF)
27730 {
27731 char *name;
27732 int regno = info->first_fp_reg_save - 32;
27733
27734 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27735 {
27736 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27737 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27738 name = rs6000_savres_routine_name (regno, sel);
27739 fprintf (file, "\t.extern %s\n", name);
27740 }
27741 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27742 {
27743 bool lr = (info->savres_strategy
27744 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27745 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27746 name = rs6000_savres_routine_name (regno, sel);
27747 fprintf (file, "\t.extern %s\n", name);
27748 }
27749 }
27750 }
27751
27752 /* Write function prologue. */
27753
27754 static void
27755 rs6000_output_function_prologue (FILE *file)
27756 {
27757 if (!cfun->is_thunk)
27758 rs6000_output_savres_externs (file);
27759
27760 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27761 immediately after the global entry point label. */
27762 if (rs6000_global_entry_point_needed_p ())
27763 {
27764 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27765
27766 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27767
27768 if (TARGET_CMODEL != CMODEL_LARGE)
27769 {
27770 /* In the small and medium code models, we assume the TOC is less
27771 2 GB away from the text section, so it can be computed via the
27772 following two-instruction sequence. */
27773 char buf[256];
27774
27775 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27776 fprintf (file, "0:\taddis 2,12,.TOC.-");
27777 assemble_name (file, buf);
27778 fprintf (file, "@ha\n");
27779 fprintf (file, "\taddi 2,2,.TOC.-");
27780 assemble_name (file, buf);
27781 fprintf (file, "@l\n");
27782 }
27783 else
27784 {
27785 /* In the large code model, we allow arbitrary offsets between the
27786 TOC and the text section, so we have to load the offset from
27787 memory. The data field is emitted directly before the global
27788 entry point in rs6000_elf_declare_function_name. */
27789 char buf[256];
27790
27791 #ifdef HAVE_AS_ENTRY_MARKERS
27792 /* If supported by the linker, emit a marker relocation. If the
27793 total code size of the final executable or shared library
27794 happens to fit into 2 GB after all, the linker will replace
27795 this code sequence with the sequence for the small or medium
27796 code model. */
27797 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27798 #endif
27799 fprintf (file, "\tld 2,");
27800 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27801 assemble_name (file, buf);
27802 fprintf (file, "-");
27803 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27804 assemble_name (file, buf);
27805 fprintf (file, "(12)\n");
27806 fprintf (file, "\tadd 2,2,12\n");
27807 }
27808
27809 fputs ("\t.localentry\t", file);
27810 assemble_name (file, name);
27811 fputs (",.-", file);
27812 assemble_name (file, name);
27813 fputs ("\n", file);
27814 }
27815
27816 /* Output -mprofile-kernel code. This needs to be done here instead of
27817 in output_function_profile since it must go after the ELFv2 ABI
27818 local entry point. */
27819 if (TARGET_PROFILE_KERNEL && crtl->profile)
27820 {
27821 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27822 gcc_assert (!TARGET_32BIT);
27823
27824 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27825
27826 /* In the ELFv2 ABI we have no compiler stack word. It must be
27827 the resposibility of _mcount to preserve the static chain
27828 register if required. */
27829 if (DEFAULT_ABI != ABI_ELFv2
27830 && cfun->static_chain_decl != NULL)
27831 {
27832 asm_fprintf (file, "\tstd %s,24(%s)\n",
27833 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27834 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27835 asm_fprintf (file, "\tld %s,24(%s)\n",
27836 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27837 }
27838 else
27839 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27840 }
27841
27842 rs6000_pic_labelno++;
27843 }
27844
27845 /* -mprofile-kernel code calls mcount before the function prolog,
27846 so a profiled leaf function should stay a leaf function. */
27847 static bool
27848 rs6000_keep_leaf_when_profiled ()
27849 {
27850 return TARGET_PROFILE_KERNEL;
27851 }
27852
27853 /* Non-zero if vmx regs are restored before the frame pop, zero if
27854 we restore after the pop when possible. */
27855 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27856
27857 /* Restoring cr is a two step process: loading a reg from the frame
27858 save, then moving the reg to cr. For ABI_V4 we must let the
27859 unwinder know that the stack location is no longer valid at or
27860 before the stack deallocation, but we can't emit a cfa_restore for
27861 cr at the stack deallocation like we do for other registers.
27862 The trouble is that it is possible for the move to cr to be
27863 scheduled after the stack deallocation. So say exactly where cr
27864 is located on each of the two insns. */
27865
27866 static rtx
27867 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27868 {
27869 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27870 rtx reg = gen_rtx_REG (SImode, regno);
27871 rtx_insn *insn = emit_move_insn (reg, mem);
27872
27873 if (!exit_func && DEFAULT_ABI == ABI_V4)
27874 {
27875 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27876 rtx set = gen_rtx_SET (reg, cr);
27877
27878 add_reg_note (insn, REG_CFA_REGISTER, set);
27879 RTX_FRAME_RELATED_P (insn) = 1;
27880 }
27881 return reg;
27882 }
27883
27884 /* Reload CR from REG. */
27885
27886 static void
27887 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27888 {
27889 int count = 0;
27890 int i;
27891
27892 if (using_mfcr_multiple)
27893 {
27894 for (i = 0; i < 8; i++)
27895 if (save_reg_p (CR0_REGNO + i))
27896 count++;
27897 gcc_assert (count);
27898 }
27899
27900 if (using_mfcr_multiple && count > 1)
27901 {
27902 rtx_insn *insn;
27903 rtvec p;
27904 int ndx;
27905
27906 p = rtvec_alloc (count);
27907
27908 ndx = 0;
27909 for (i = 0; i < 8; i++)
27910 if (save_reg_p (CR0_REGNO + i))
27911 {
27912 rtvec r = rtvec_alloc (2);
27913 RTVEC_ELT (r, 0) = reg;
27914 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27915 RTVEC_ELT (p, ndx) =
27916 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27917 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27918 ndx++;
27919 }
27920 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27921 gcc_assert (ndx == count);
27922
27923 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27924 CR field separately. */
27925 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27926 {
27927 for (i = 0; i < 8; i++)
27928 if (save_reg_p (CR0_REGNO + i))
27929 add_reg_note (insn, REG_CFA_RESTORE,
27930 gen_rtx_REG (SImode, CR0_REGNO + i));
27931
27932 RTX_FRAME_RELATED_P (insn) = 1;
27933 }
27934 }
27935 else
27936 for (i = 0; i < 8; i++)
27937 if (save_reg_p (CR0_REGNO + i))
27938 {
27939 rtx insn = emit_insn (gen_movsi_to_cr_one
27940 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27941
27942 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27943 CR field separately, attached to the insn that in fact
27944 restores this particular CR field. */
27945 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27946 {
27947 add_reg_note (insn, REG_CFA_RESTORE,
27948 gen_rtx_REG (SImode, CR0_REGNO + i));
27949
27950 RTX_FRAME_RELATED_P (insn) = 1;
27951 }
27952 }
27953
27954 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27955 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27956 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27957 {
27958 rtx_insn *insn = get_last_insn ();
27959 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27960
27961 add_reg_note (insn, REG_CFA_RESTORE, cr);
27962 RTX_FRAME_RELATED_P (insn) = 1;
27963 }
27964 }
27965
27966 /* Like cr, the move to lr instruction can be scheduled after the
27967 stack deallocation, but unlike cr, its stack frame save is still
27968 valid. So we only need to emit the cfa_restore on the correct
27969 instruction. */
27970
27971 static void
27972 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27973 {
27974 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27975 rtx reg = gen_rtx_REG (Pmode, regno);
27976
27977 emit_move_insn (reg, mem);
27978 }
27979
27980 static void
27981 restore_saved_lr (int regno, bool exit_func)
27982 {
27983 rtx reg = gen_rtx_REG (Pmode, regno);
27984 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27985 rtx_insn *insn = emit_move_insn (lr, reg);
27986
27987 if (!exit_func && flag_shrink_wrap)
27988 {
27989 add_reg_note (insn, REG_CFA_RESTORE, lr);
27990 RTX_FRAME_RELATED_P (insn) = 1;
27991 }
27992 }
27993
27994 static rtx
27995 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27996 {
27997 if (DEFAULT_ABI == ABI_ELFv2)
27998 {
27999 int i;
28000 for (i = 0; i < 8; i++)
28001 if (save_reg_p (CR0_REGNO + i))
28002 {
28003 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28004 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28005 cfa_restores);
28006 }
28007 }
28008 else if (info->cr_save_p)
28009 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28010 gen_rtx_REG (SImode, CR2_REGNO),
28011 cfa_restores);
28012
28013 if (info->lr_save_p)
28014 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28015 gen_rtx_REG (Pmode, LR_REGNO),
28016 cfa_restores);
28017 return cfa_restores;
28018 }
28019
28020 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28021 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28022 below stack pointer not cloberred by signals. */
28023
28024 static inline bool
28025 offset_below_red_zone_p (HOST_WIDE_INT offset)
28026 {
28027 return offset < (DEFAULT_ABI == ABI_V4
28028 ? 0
28029 : TARGET_32BIT ? -220 : -288);
28030 }
28031
28032 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28033
28034 static void
28035 emit_cfa_restores (rtx cfa_restores)
28036 {
28037 rtx_insn *insn = get_last_insn ();
28038 rtx *loc = &REG_NOTES (insn);
28039
28040 while (*loc)
28041 loc = &XEXP (*loc, 1);
28042 *loc = cfa_restores;
28043 RTX_FRAME_RELATED_P (insn) = 1;
28044 }
28045
28046 /* Emit function epilogue as insns. */
28047
28048 void
28049 rs6000_emit_epilogue (int sibcall)
28050 {
28051 rs6000_stack_t *info;
28052 int restoring_GPRs_inline;
28053 int restoring_FPRs_inline;
28054 int using_load_multiple;
28055 int using_mtcr_multiple;
28056 int use_backchain_to_restore_sp;
28057 int restore_lr;
28058 int strategy;
28059 HOST_WIDE_INT frame_off = 0;
28060 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28061 rtx frame_reg_rtx = sp_reg_rtx;
28062 rtx cfa_restores = NULL_RTX;
28063 rtx insn;
28064 rtx cr_save_reg = NULL_RTX;
28065 machine_mode reg_mode = Pmode;
28066 int reg_size = TARGET_32BIT ? 4 : 8;
28067 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
28068 int fp_reg_size = 8;
28069 int i;
28070 bool exit_func;
28071 unsigned ptr_regno;
28072
28073 info = rs6000_stack_info ();
28074
28075 strategy = info->savres_strategy;
28076 using_load_multiple = strategy & REST_MULTIPLE;
28077 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28078 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28079 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28080 || rs6000_tune == PROCESSOR_PPC603
28081 || rs6000_tune == PROCESSOR_PPC750
28082 || optimize_size);
28083 /* Restore via the backchain when we have a large frame, since this
28084 is more efficient than an addis, addi pair. The second condition
28085 here will not trigger at the moment; We don't actually need a
28086 frame pointer for alloca, but the generic parts of the compiler
28087 give us one anyway. */
28088 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28089 ? info->lr_save_offset
28090 : 0) > 32767
28091 || (cfun->calls_alloca
28092 && !frame_pointer_needed));
28093 restore_lr = (info->lr_save_p
28094 && (restoring_FPRs_inline
28095 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28096 && (restoring_GPRs_inline
28097 || info->first_fp_reg_save < 64)
28098 && !cfun->machine->lr_is_wrapped_separately);
28099
28100
28101 if (WORLD_SAVE_P (info))
28102 {
28103 int i, j;
28104 char rname[30];
28105 const char *alloc_rname;
28106 rtvec p;
28107
28108 /* eh_rest_world_r10 will return to the location saved in the LR
28109 stack slot (which is not likely to be our caller.)
28110 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28111 rest_world is similar, except any R10 parameter is ignored.
28112 The exception-handling stuff that was here in 2.95 is no
28113 longer necessary. */
28114
28115 p = rtvec_alloc (9
28116 + 32 - info->first_gp_reg_save
28117 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28118 + 63 + 1 - info->first_fp_reg_save);
28119
28120 strcpy (rname, ((crtl->calls_eh_return) ?
28121 "*eh_rest_world_r10" : "*rest_world"));
28122 alloc_rname = ggc_strdup (rname);
28123
28124 j = 0;
28125 RTVEC_ELT (p, j++) = ret_rtx;
28126 RTVEC_ELT (p, j++)
28127 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28128 /* The instruction pattern requires a clobber here;
28129 it is shared with the restVEC helper. */
28130 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
28131
28132 {
28133 /* CR register traditionally saved as CR2. */
28134 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28135 RTVEC_ELT (p, j++)
28136 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28137 if (flag_shrink_wrap)
28138 {
28139 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28140 gen_rtx_REG (Pmode, LR_REGNO),
28141 cfa_restores);
28142 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28143 }
28144 }
28145
28146 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28147 {
28148 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28149 RTVEC_ELT (p, j++)
28150 = gen_frame_load (reg,
28151 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28152 if (flag_shrink_wrap
28153 && save_reg_p (info->first_gp_reg_save + i))
28154 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28155 }
28156 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28157 {
28158 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28159 RTVEC_ELT (p, j++)
28160 = gen_frame_load (reg,
28161 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28162 if (flag_shrink_wrap
28163 && save_reg_p (info->first_altivec_reg_save + i))
28164 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28165 }
28166 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28167 {
28168 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
28169 info->first_fp_reg_save + i);
28170 RTVEC_ELT (p, j++)
28171 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28172 if (flag_shrink_wrap
28173 && save_reg_p (info->first_fp_reg_save + i))
28174 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28175 }
28176 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
28177 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
28178 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
28179 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
28180 RTVEC_ELT (p, j++)
28181 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28182 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28183
28184 if (flag_shrink_wrap)
28185 {
28186 REG_NOTES (insn) = cfa_restores;
28187 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28188 RTX_FRAME_RELATED_P (insn) = 1;
28189 }
28190 return;
28191 }
28192
28193 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28194 if (info->push_p)
28195 frame_off = info->total_size;
28196
28197 /* Restore AltiVec registers if we must do so before adjusting the
28198 stack. */
28199 if (info->altivec_size != 0
28200 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28201 || (DEFAULT_ABI != ABI_V4
28202 && offset_below_red_zone_p (info->altivec_save_offset))))
28203 {
28204 int i;
28205 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28206
28207 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28208 if (use_backchain_to_restore_sp)
28209 {
28210 int frame_regno = 11;
28211
28212 if ((strategy & REST_INLINE_VRS) == 0)
28213 {
28214 /* Of r11 and r12, select the one not clobbered by an
28215 out-of-line restore function for the frame register. */
28216 frame_regno = 11 + 12 - scratch_regno;
28217 }
28218 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28219 emit_move_insn (frame_reg_rtx,
28220 gen_rtx_MEM (Pmode, sp_reg_rtx));
28221 frame_off = 0;
28222 }
28223 else if (frame_pointer_needed)
28224 frame_reg_rtx = hard_frame_pointer_rtx;
28225
28226 if ((strategy & REST_INLINE_VRS) == 0)
28227 {
28228 int end_save = info->altivec_save_offset + info->altivec_size;
28229 int ptr_off;
28230 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28231 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28232
28233 if (end_save + frame_off != 0)
28234 {
28235 rtx offset = GEN_INT (end_save + frame_off);
28236
28237 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28238 }
28239 else
28240 emit_move_insn (ptr_reg, frame_reg_rtx);
28241
28242 ptr_off = -end_save;
28243 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28244 info->altivec_save_offset + ptr_off,
28245 0, V4SImode, SAVRES_VR);
28246 }
28247 else
28248 {
28249 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28250 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28251 {
28252 rtx addr, areg, mem, insn;
28253 rtx reg = gen_rtx_REG (V4SImode, i);
28254 HOST_WIDE_INT offset
28255 = (info->altivec_save_offset + frame_off
28256 + 16 * (i - info->first_altivec_reg_save));
28257
28258 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28259 {
28260 mem = gen_frame_mem (V4SImode,
28261 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28262 GEN_INT (offset)));
28263 insn = gen_rtx_SET (reg, mem);
28264 }
28265 else
28266 {
28267 areg = gen_rtx_REG (Pmode, 0);
28268 emit_move_insn (areg, GEN_INT (offset));
28269
28270 /* AltiVec addressing mode is [reg+reg]. */
28271 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28272 mem = gen_frame_mem (V4SImode, addr);
28273
28274 /* Rather than emitting a generic move, force use of the
28275 lvx instruction, which we always want. In particular we
28276 don't want lxvd2x/xxpermdi for little endian. */
28277 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28278 }
28279
28280 (void) emit_insn (insn);
28281 }
28282 }
28283
28284 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28285 if (((strategy & REST_INLINE_VRS) == 0
28286 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28287 && (flag_shrink_wrap
28288 || (offset_below_red_zone_p
28289 (info->altivec_save_offset
28290 + 16 * (i - info->first_altivec_reg_save))))
28291 && save_reg_p (i))
28292 {
28293 rtx reg = gen_rtx_REG (V4SImode, i);
28294 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28295 }
28296 }
28297
28298 /* Restore VRSAVE if we must do so before adjusting the stack. */
28299 if (info->vrsave_size != 0
28300 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28301 || (DEFAULT_ABI != ABI_V4
28302 && offset_below_red_zone_p (info->vrsave_save_offset))))
28303 {
28304 rtx reg;
28305
28306 if (frame_reg_rtx == sp_reg_rtx)
28307 {
28308 if (use_backchain_to_restore_sp)
28309 {
28310 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28311 emit_move_insn (frame_reg_rtx,
28312 gen_rtx_MEM (Pmode, sp_reg_rtx));
28313 frame_off = 0;
28314 }
28315 else if (frame_pointer_needed)
28316 frame_reg_rtx = hard_frame_pointer_rtx;
28317 }
28318
28319 reg = gen_rtx_REG (SImode, 12);
28320 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28321 info->vrsave_save_offset + frame_off));
28322
28323 emit_insn (generate_set_vrsave (reg, info, 1));
28324 }
28325
28326 insn = NULL_RTX;
28327 /* If we have a large stack frame, restore the old stack pointer
28328 using the backchain. */
28329 if (use_backchain_to_restore_sp)
28330 {
28331 if (frame_reg_rtx == sp_reg_rtx)
28332 {
28333 /* Under V.4, don't reset the stack pointer until after we're done
28334 loading the saved registers. */
28335 if (DEFAULT_ABI == ABI_V4)
28336 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28337
28338 insn = emit_move_insn (frame_reg_rtx,
28339 gen_rtx_MEM (Pmode, sp_reg_rtx));
28340 frame_off = 0;
28341 }
28342 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28343 && DEFAULT_ABI == ABI_V4)
28344 /* frame_reg_rtx has been set up by the altivec restore. */
28345 ;
28346 else
28347 {
28348 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28349 frame_reg_rtx = sp_reg_rtx;
28350 }
28351 }
28352 /* If we have a frame pointer, we can restore the old stack pointer
28353 from it. */
28354 else if (frame_pointer_needed)
28355 {
28356 frame_reg_rtx = sp_reg_rtx;
28357 if (DEFAULT_ABI == ABI_V4)
28358 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28359 /* Prevent reordering memory accesses against stack pointer restore. */
28360 else if (cfun->calls_alloca
28361 || offset_below_red_zone_p (-info->total_size))
28362 rs6000_emit_stack_tie (frame_reg_rtx, true);
28363
28364 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28365 GEN_INT (info->total_size)));
28366 frame_off = 0;
28367 }
28368 else if (info->push_p
28369 && DEFAULT_ABI != ABI_V4
28370 && !crtl->calls_eh_return)
28371 {
28372 /* Prevent reordering memory accesses against stack pointer restore. */
28373 if (cfun->calls_alloca
28374 || offset_below_red_zone_p (-info->total_size))
28375 rs6000_emit_stack_tie (frame_reg_rtx, false);
28376 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28377 GEN_INT (info->total_size)));
28378 frame_off = 0;
28379 }
28380 if (insn && frame_reg_rtx == sp_reg_rtx)
28381 {
28382 if (cfa_restores)
28383 {
28384 REG_NOTES (insn) = cfa_restores;
28385 cfa_restores = NULL_RTX;
28386 }
28387 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28388 RTX_FRAME_RELATED_P (insn) = 1;
28389 }
28390
28391 /* Restore AltiVec registers if we have not done so already. */
28392 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28393 && info->altivec_size != 0
28394 && (DEFAULT_ABI == ABI_V4
28395 || !offset_below_red_zone_p (info->altivec_save_offset)))
28396 {
28397 int i;
28398
28399 if ((strategy & REST_INLINE_VRS) == 0)
28400 {
28401 int end_save = info->altivec_save_offset + info->altivec_size;
28402 int ptr_off;
28403 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28404 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28405 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28406
28407 if (end_save + frame_off != 0)
28408 {
28409 rtx offset = GEN_INT (end_save + frame_off);
28410
28411 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28412 }
28413 else
28414 emit_move_insn (ptr_reg, frame_reg_rtx);
28415
28416 ptr_off = -end_save;
28417 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28418 info->altivec_save_offset + ptr_off,
28419 0, V4SImode, SAVRES_VR);
28420 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28421 {
28422 /* Frame reg was clobbered by out-of-line save. Restore it
28423 from ptr_reg, and if we are calling out-of-line gpr or
28424 fpr restore set up the correct pointer and offset. */
28425 unsigned newptr_regno = 1;
28426 if (!restoring_GPRs_inline)
28427 {
28428 bool lr = info->gp_save_offset + info->gp_size == 0;
28429 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28430 newptr_regno = ptr_regno_for_savres (sel);
28431 end_save = info->gp_save_offset + info->gp_size;
28432 }
28433 else if (!restoring_FPRs_inline)
28434 {
28435 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28436 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28437 newptr_regno = ptr_regno_for_savres (sel);
28438 end_save = info->fp_save_offset + info->fp_size;
28439 }
28440
28441 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28442 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28443
28444 if (end_save + ptr_off != 0)
28445 {
28446 rtx offset = GEN_INT (end_save + ptr_off);
28447
28448 frame_off = -end_save;
28449 if (TARGET_32BIT)
28450 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28451 ptr_reg, offset));
28452 else
28453 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28454 ptr_reg, offset));
28455 }
28456 else
28457 {
28458 frame_off = ptr_off;
28459 emit_move_insn (frame_reg_rtx, ptr_reg);
28460 }
28461 }
28462 }
28463 else
28464 {
28465 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28466 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28467 {
28468 rtx addr, areg, mem, insn;
28469 rtx reg = gen_rtx_REG (V4SImode, i);
28470 HOST_WIDE_INT offset
28471 = (info->altivec_save_offset + frame_off
28472 + 16 * (i - info->first_altivec_reg_save));
28473
28474 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28475 {
28476 mem = gen_frame_mem (V4SImode,
28477 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28478 GEN_INT (offset)));
28479 insn = gen_rtx_SET (reg, mem);
28480 }
28481 else
28482 {
28483 areg = gen_rtx_REG (Pmode, 0);
28484 emit_move_insn (areg, GEN_INT (offset));
28485
28486 /* AltiVec addressing mode is [reg+reg]. */
28487 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28488 mem = gen_frame_mem (V4SImode, addr);
28489
28490 /* Rather than emitting a generic move, force use of the
28491 lvx instruction, which we always want. In particular we
28492 don't want lxvd2x/xxpermdi for little endian. */
28493 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28494 }
28495
28496 (void) emit_insn (insn);
28497 }
28498 }
28499
28500 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28501 if (((strategy & REST_INLINE_VRS) == 0
28502 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28503 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28504 && save_reg_p (i))
28505 {
28506 rtx reg = gen_rtx_REG (V4SImode, i);
28507 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28508 }
28509 }
28510
28511 /* Restore VRSAVE if we have not done so already. */
28512 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28513 && info->vrsave_size != 0
28514 && (DEFAULT_ABI == ABI_V4
28515 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28516 {
28517 rtx reg;
28518
28519 reg = gen_rtx_REG (SImode, 12);
28520 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28521 info->vrsave_save_offset + frame_off));
28522
28523 emit_insn (generate_set_vrsave (reg, info, 1));
28524 }
28525
28526 /* If we exit by an out-of-line restore function on ABI_V4 then that
28527 function will deallocate the stack, so we don't need to worry
28528 about the unwinder restoring cr from an invalid stack frame
28529 location. */
28530 exit_func = (!restoring_FPRs_inline
28531 || (!restoring_GPRs_inline
28532 && info->first_fp_reg_save == 64));
28533
28534 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28535 *separate* slots if the routine calls __builtin_eh_return, so
28536 that they can be independently restored by the unwinder. */
28537 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28538 {
28539 int i, cr_off = info->ehcr_offset;
28540
28541 for (i = 0; i < 8; i++)
28542 if (!call_used_regs[CR0_REGNO + i])
28543 {
28544 rtx reg = gen_rtx_REG (SImode, 0);
28545 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28546 cr_off + frame_off));
28547
28548 insn = emit_insn (gen_movsi_to_cr_one
28549 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28550
28551 if (!exit_func && flag_shrink_wrap)
28552 {
28553 add_reg_note (insn, REG_CFA_RESTORE,
28554 gen_rtx_REG (SImode, CR0_REGNO + i));
28555
28556 RTX_FRAME_RELATED_P (insn) = 1;
28557 }
28558
28559 cr_off += reg_size;
28560 }
28561 }
28562
28563 /* Get the old lr if we saved it. If we are restoring registers
28564 out-of-line, then the out-of-line routines can do this for us. */
28565 if (restore_lr && restoring_GPRs_inline)
28566 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28567
28568 /* Get the old cr if we saved it. */
28569 if (info->cr_save_p)
28570 {
28571 unsigned cr_save_regno = 12;
28572
28573 if (!restoring_GPRs_inline)
28574 {
28575 /* Ensure we don't use the register used by the out-of-line
28576 gpr register restore below. */
28577 bool lr = info->gp_save_offset + info->gp_size == 0;
28578 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28579 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28580
28581 if (gpr_ptr_regno == 12)
28582 cr_save_regno = 11;
28583 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28584 }
28585 else if (REGNO (frame_reg_rtx) == 12)
28586 cr_save_regno = 11;
28587
28588 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28589 info->cr_save_offset + frame_off,
28590 exit_func);
28591 }
28592
28593 /* Set LR here to try to overlap restores below. */
28594 if (restore_lr && restoring_GPRs_inline)
28595 restore_saved_lr (0, exit_func);
28596
28597 /* Load exception handler data registers, if needed. */
28598 if (crtl->calls_eh_return)
28599 {
28600 unsigned int i, regno;
28601
28602 if (TARGET_AIX)
28603 {
28604 rtx reg = gen_rtx_REG (reg_mode, 2);
28605 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28606 frame_off + RS6000_TOC_SAVE_SLOT));
28607 }
28608
28609 for (i = 0; ; ++i)
28610 {
28611 rtx mem;
28612
28613 regno = EH_RETURN_DATA_REGNO (i);
28614 if (regno == INVALID_REGNUM)
28615 break;
28616
28617 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28618 info->ehrd_offset + frame_off
28619 + reg_size * (int) i);
28620
28621 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28622 }
28623 }
28624
28625 /* Restore GPRs. This is done as a PARALLEL if we are using
28626 the load-multiple instructions. */
28627 if (!restoring_GPRs_inline)
28628 {
28629 /* We are jumping to an out-of-line function. */
28630 rtx ptr_reg;
28631 int end_save = info->gp_save_offset + info->gp_size;
28632 bool can_use_exit = end_save == 0;
28633 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28634 int ptr_off;
28635
28636 /* Emit stack reset code if we need it. */
28637 ptr_regno = ptr_regno_for_savres (sel);
28638 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28639 if (can_use_exit)
28640 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28641 else if (end_save + frame_off != 0)
28642 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28643 GEN_INT (end_save + frame_off)));
28644 else if (REGNO (frame_reg_rtx) != ptr_regno)
28645 emit_move_insn (ptr_reg, frame_reg_rtx);
28646 if (REGNO (frame_reg_rtx) == ptr_regno)
28647 frame_off = -end_save;
28648
28649 if (can_use_exit && info->cr_save_p)
28650 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28651
28652 ptr_off = -end_save;
28653 rs6000_emit_savres_rtx (info, ptr_reg,
28654 info->gp_save_offset + ptr_off,
28655 info->lr_save_offset + ptr_off,
28656 reg_mode, sel);
28657 }
28658 else if (using_load_multiple)
28659 {
28660 rtvec p;
28661 p = rtvec_alloc (32 - info->first_gp_reg_save);
28662 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28663 RTVEC_ELT (p, i)
28664 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28665 frame_reg_rtx,
28666 info->gp_save_offset + frame_off + reg_size * i);
28667 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28668 }
28669 else
28670 {
28671 int offset = info->gp_save_offset + frame_off;
28672 for (i = info->first_gp_reg_save; i < 32; i++)
28673 {
28674 if (save_reg_p (i)
28675 && !cfun->machine->gpr_is_wrapped_separately[i])
28676 {
28677 rtx reg = gen_rtx_REG (reg_mode, i);
28678 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28679 }
28680
28681 offset += reg_size;
28682 }
28683 }
28684
28685 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28686 {
28687 /* If the frame pointer was used then we can't delay emitting
28688 a REG_CFA_DEF_CFA note. This must happen on the insn that
28689 restores the frame pointer, r31. We may have already emitted
28690 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28691 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28692 be harmless if emitted. */
28693 if (frame_pointer_needed)
28694 {
28695 insn = get_last_insn ();
28696 add_reg_note (insn, REG_CFA_DEF_CFA,
28697 plus_constant (Pmode, frame_reg_rtx, frame_off));
28698 RTX_FRAME_RELATED_P (insn) = 1;
28699 }
28700
28701 /* Set up cfa_restores. We always need these when
28702 shrink-wrapping. If not shrink-wrapping then we only need
28703 the cfa_restore when the stack location is no longer valid.
28704 The cfa_restores must be emitted on or before the insn that
28705 invalidates the stack, and of course must not be emitted
28706 before the insn that actually does the restore. The latter
28707 is why it is a bad idea to emit the cfa_restores as a group
28708 on the last instruction here that actually does a restore:
28709 That insn may be reordered with respect to others doing
28710 restores. */
28711 if (flag_shrink_wrap
28712 && !restoring_GPRs_inline
28713 && info->first_fp_reg_save == 64)
28714 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28715
28716 for (i = info->first_gp_reg_save; i < 32; i++)
28717 if (save_reg_p (i)
28718 && !cfun->machine->gpr_is_wrapped_separately[i])
28719 {
28720 rtx reg = gen_rtx_REG (reg_mode, i);
28721 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28722 }
28723 }
28724
28725 if (!restoring_GPRs_inline
28726 && info->first_fp_reg_save == 64)
28727 {
28728 /* We are jumping to an out-of-line function. */
28729 if (cfa_restores)
28730 emit_cfa_restores (cfa_restores);
28731 return;
28732 }
28733
28734 if (restore_lr && !restoring_GPRs_inline)
28735 {
28736 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28737 restore_saved_lr (0, exit_func);
28738 }
28739
28740 /* Restore fpr's if we need to do it without calling a function. */
28741 if (restoring_FPRs_inline)
28742 {
28743 int offset = info->fp_save_offset + frame_off;
28744 for (i = info->first_fp_reg_save; i < 64; i++)
28745 {
28746 if (save_reg_p (i)
28747 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28748 {
28749 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28750 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28751 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28752 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28753 cfa_restores);
28754 }
28755
28756 offset += fp_reg_size;
28757 }
28758 }
28759
28760 /* If we saved cr, restore it here. Just those that were used. */
28761 if (info->cr_save_p)
28762 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28763
28764 /* If this is V.4, unwind the stack pointer after all of the loads
28765 have been done, or set up r11 if we are restoring fp out of line. */
28766 ptr_regno = 1;
28767 if (!restoring_FPRs_inline)
28768 {
28769 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28770 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28771 ptr_regno = ptr_regno_for_savres (sel);
28772 }
28773
28774 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28775 if (REGNO (frame_reg_rtx) == ptr_regno)
28776 frame_off = 0;
28777
28778 if (insn && restoring_FPRs_inline)
28779 {
28780 if (cfa_restores)
28781 {
28782 REG_NOTES (insn) = cfa_restores;
28783 cfa_restores = NULL_RTX;
28784 }
28785 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28786 RTX_FRAME_RELATED_P (insn) = 1;
28787 }
28788
28789 if (crtl->calls_eh_return)
28790 {
28791 rtx sa = EH_RETURN_STACKADJ_RTX;
28792 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28793 }
28794
28795 if (!sibcall && restoring_FPRs_inline)
28796 {
28797 if (cfa_restores)
28798 {
28799 /* We can't hang the cfa_restores off a simple return,
28800 since the shrink-wrap code sometimes uses an existing
28801 return. This means there might be a path from
28802 pre-prologue code to this return, and dwarf2cfi code
28803 wants the eh_frame unwinder state to be the same on
28804 all paths to any point. So we need to emit the
28805 cfa_restores before the return. For -m64 we really
28806 don't need epilogue cfa_restores at all, except for
28807 this irritating dwarf2cfi with shrink-wrap
28808 requirement; The stack red-zone means eh_frame info
28809 from the prologue telling the unwinder to restore
28810 from the stack is perfectly good right to the end of
28811 the function. */
28812 emit_insn (gen_blockage ());
28813 emit_cfa_restores (cfa_restores);
28814 cfa_restores = NULL_RTX;
28815 }
28816
28817 emit_jump_insn (targetm.gen_simple_return ());
28818 }
28819
28820 if (!sibcall && !restoring_FPRs_inline)
28821 {
28822 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28823 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28824 int elt = 0;
28825 RTVEC_ELT (p, elt++) = ret_rtx;
28826 if (lr)
28827 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28828
28829 /* We have to restore more than two FP registers, so branch to the
28830 restore function. It will return to our caller. */
28831 int i;
28832 int reg;
28833 rtx sym;
28834
28835 if (flag_shrink_wrap)
28836 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28837
28838 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28839 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28840 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28841 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28842
28843 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28844 {
28845 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28846
28847 RTVEC_ELT (p, elt++)
28848 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28849 if (flag_shrink_wrap
28850 && save_reg_p (info->first_fp_reg_save + i))
28851 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28852 }
28853
28854 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28855 }
28856
28857 if (cfa_restores)
28858 {
28859 if (sibcall)
28860 /* Ensure the cfa_restores are hung off an insn that won't
28861 be reordered above other restores. */
28862 emit_insn (gen_blockage ());
28863
28864 emit_cfa_restores (cfa_restores);
28865 }
28866 }
28867
28868 /* Write function epilogue. */
28869
28870 static void
28871 rs6000_output_function_epilogue (FILE *file)
28872 {
28873 #if TARGET_MACHO
28874 macho_branch_islands ();
28875
28876 {
28877 rtx_insn *insn = get_last_insn ();
28878 rtx_insn *deleted_debug_label = NULL;
28879
28880 /* Mach-O doesn't support labels at the end of objects, so if
28881 it looks like we might want one, take special action.
28882
28883 First, collect any sequence of deleted debug labels. */
28884 while (insn
28885 && NOTE_P (insn)
28886 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28887 {
28888 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28889 notes only, instead set their CODE_LABEL_NUMBER to -1,
28890 otherwise there would be code generation differences
28891 in between -g and -g0. */
28892 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28893 deleted_debug_label = insn;
28894 insn = PREV_INSN (insn);
28895 }
28896
28897 /* Second, if we have:
28898 label:
28899 barrier
28900 then this needs to be detected, so skip past the barrier. */
28901
28902 if (insn && BARRIER_P (insn))
28903 insn = PREV_INSN (insn);
28904
28905 /* Up to now we've only seen notes or barriers. */
28906 if (insn)
28907 {
28908 if (LABEL_P (insn)
28909 || (NOTE_P (insn)
28910 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28911 /* Trailing label: <barrier>. */
28912 fputs ("\tnop\n", file);
28913 else
28914 {
28915 /* Lastly, see if we have a completely empty function body. */
28916 while (insn && ! INSN_P (insn))
28917 insn = PREV_INSN (insn);
28918 /* If we don't find any insns, we've got an empty function body;
28919 I.e. completely empty - without a return or branch. This is
28920 taken as the case where a function body has been removed
28921 because it contains an inline __builtin_unreachable(). GCC
28922 states that reaching __builtin_unreachable() means UB so we're
28923 not obliged to do anything special; however, we want
28924 non-zero-sized function bodies. To meet this, and help the
28925 user out, let's trap the case. */
28926 if (insn == NULL)
28927 fputs ("\ttrap\n", file);
28928 }
28929 }
28930 else if (deleted_debug_label)
28931 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28932 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28933 CODE_LABEL_NUMBER (insn) = -1;
28934 }
28935 #endif
28936
28937 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28938 on its format.
28939
28940 We don't output a traceback table if -finhibit-size-directive was
28941 used. The documentation for -finhibit-size-directive reads
28942 ``don't output a @code{.size} assembler directive, or anything
28943 else that would cause trouble if the function is split in the
28944 middle, and the two halves are placed at locations far apart in
28945 memory.'' The traceback table has this property, since it
28946 includes the offset from the start of the function to the
28947 traceback table itself.
28948
28949 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28950 different traceback table. */
28951 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28952 && ! flag_inhibit_size_directive
28953 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28954 {
28955 const char *fname = NULL;
28956 const char *language_string = lang_hooks.name;
28957 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28958 int i;
28959 int optional_tbtab;
28960 rs6000_stack_t *info = rs6000_stack_info ();
28961
28962 if (rs6000_traceback == traceback_full)
28963 optional_tbtab = 1;
28964 else if (rs6000_traceback == traceback_part)
28965 optional_tbtab = 0;
28966 else
28967 optional_tbtab = !optimize_size && !TARGET_ELF;
28968
28969 if (optional_tbtab)
28970 {
28971 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28972 while (*fname == '.') /* V.4 encodes . in the name */
28973 fname++;
28974
28975 /* Need label immediately before tbtab, so we can compute
28976 its offset from the function start. */
28977 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28978 ASM_OUTPUT_LABEL (file, fname);
28979 }
28980
28981 /* The .tbtab pseudo-op can only be used for the first eight
28982 expressions, since it can't handle the possibly variable
28983 length fields that follow. However, if you omit the optional
28984 fields, the assembler outputs zeros for all optional fields
28985 anyways, giving each variable length field is minimum length
28986 (as defined in sys/debug.h). Thus we cannot use the .tbtab
28987 pseudo-op at all. */
28988
28989 /* An all-zero word flags the start of the tbtab, for debuggers
28990 that have to find it by searching forward from the entry
28991 point or from the current pc. */
28992 fputs ("\t.long 0\n", file);
28993
28994 /* Tbtab format type. Use format type 0. */
28995 fputs ("\t.byte 0,", file);
28996
28997 /* Language type. Unfortunately, there does not seem to be any
28998 official way to discover the language being compiled, so we
28999 use language_string.
29000 C is 0. Fortran is 1. Ada is 3. C++ is 9.
29001 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29002 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
29003 numbers either, so for now use 0. */
29004 if (lang_GNU_C ()
29005 || ! strcmp (language_string, "GNU GIMPLE")
29006 || ! strcmp (language_string, "GNU Go")
29007 || ! strcmp (language_string, "GNU D")
29008 || ! strcmp (language_string, "libgccjit"))
29009 i = 0;
29010 else if (! strcmp (language_string, "GNU F77")
29011 || lang_GNU_Fortran ())
29012 i = 1;
29013 else if (! strcmp (language_string, "GNU Ada"))
29014 i = 3;
29015 else if (lang_GNU_CXX ()
29016 || ! strcmp (language_string, "GNU Objective-C++"))
29017 i = 9;
29018 else if (! strcmp (language_string, "GNU Java"))
29019 i = 13;
29020 else if (! strcmp (language_string, "GNU Objective-C"))
29021 i = 14;
29022 else
29023 gcc_unreachable ();
29024 fprintf (file, "%d,", i);
29025
29026 /* 8 single bit fields: global linkage (not set for C extern linkage,
29027 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29028 from start of procedure stored in tbtab, internal function, function
29029 has controlled storage, function has no toc, function uses fp,
29030 function logs/aborts fp operations. */
29031 /* Assume that fp operations are used if any fp reg must be saved. */
29032 fprintf (file, "%d,",
29033 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29034
29035 /* 6 bitfields: function is interrupt handler, name present in
29036 proc table, function calls alloca, on condition directives
29037 (controls stack walks, 3 bits), saves condition reg, saves
29038 link reg. */
29039 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29040 set up as a frame pointer, even when there is no alloca call. */
29041 fprintf (file, "%d,",
29042 ((optional_tbtab << 6)
29043 | ((optional_tbtab & frame_pointer_needed) << 5)
29044 | (info->cr_save_p << 1)
29045 | (info->lr_save_p)));
29046
29047 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29048 (6 bits). */
29049 fprintf (file, "%d,",
29050 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29051
29052 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29053 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29054
29055 if (optional_tbtab)
29056 {
29057 /* Compute the parameter info from the function decl argument
29058 list. */
29059 tree decl;
29060 int next_parm_info_bit = 31;
29061
29062 for (decl = DECL_ARGUMENTS (current_function_decl);
29063 decl; decl = DECL_CHAIN (decl))
29064 {
29065 rtx parameter = DECL_INCOMING_RTL (decl);
29066 machine_mode mode = GET_MODE (parameter);
29067
29068 if (GET_CODE (parameter) == REG)
29069 {
29070 if (SCALAR_FLOAT_MODE_P (mode))
29071 {
29072 int bits;
29073
29074 float_parms++;
29075
29076 switch (mode)
29077 {
29078 case E_SFmode:
29079 case E_SDmode:
29080 bits = 0x2;
29081 break;
29082
29083 case E_DFmode:
29084 case E_DDmode:
29085 case E_TFmode:
29086 case E_TDmode:
29087 case E_IFmode:
29088 case E_KFmode:
29089 bits = 0x3;
29090 break;
29091
29092 default:
29093 gcc_unreachable ();
29094 }
29095
29096 /* If only one bit will fit, don't or in this entry. */
29097 if (next_parm_info_bit > 0)
29098 parm_info |= (bits << (next_parm_info_bit - 1));
29099 next_parm_info_bit -= 2;
29100 }
29101 else
29102 {
29103 fixed_parms += ((GET_MODE_SIZE (mode)
29104 + (UNITS_PER_WORD - 1))
29105 / UNITS_PER_WORD);
29106 next_parm_info_bit -= 1;
29107 }
29108 }
29109 }
29110 }
29111
29112 /* Number of fixed point parameters. */
29113 /* This is actually the number of words of fixed point parameters; thus
29114 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29115 fprintf (file, "%d,", fixed_parms);
29116
29117 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29118 all on stack. */
29119 /* This is actually the number of fp registers that hold parameters;
29120 and thus the maximum value is 13. */
29121 /* Set parameters on stack bit if parameters are not in their original
29122 registers, regardless of whether they are on the stack? Xlc
29123 seems to set the bit when not optimizing. */
29124 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29125
29126 if (optional_tbtab)
29127 {
29128 /* Optional fields follow. Some are variable length. */
29129
29130 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29131 float, 11 double float. */
29132 /* There is an entry for each parameter in a register, in the order
29133 that they occur in the parameter list. Any intervening arguments
29134 on the stack are ignored. If the list overflows a long (max
29135 possible length 34 bits) then completely leave off all elements
29136 that don't fit. */
29137 /* Only emit this long if there was at least one parameter. */
29138 if (fixed_parms || float_parms)
29139 fprintf (file, "\t.long %d\n", parm_info);
29140
29141 /* Offset from start of code to tb table. */
29142 fputs ("\t.long ", file);
29143 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29144 RS6000_OUTPUT_BASENAME (file, fname);
29145 putc ('-', file);
29146 rs6000_output_function_entry (file, fname);
29147 putc ('\n', file);
29148
29149 /* Interrupt handler mask. */
29150 /* Omit this long, since we never set the interrupt handler bit
29151 above. */
29152
29153 /* Number of CTL (controlled storage) anchors. */
29154 /* Omit this long, since the has_ctl bit is never set above. */
29155
29156 /* Displacement into stack of each CTL anchor. */
29157 /* Omit this list of longs, because there are no CTL anchors. */
29158
29159 /* Length of function name. */
29160 if (*fname == '*')
29161 ++fname;
29162 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29163
29164 /* Function name. */
29165 assemble_string (fname, strlen (fname));
29166
29167 /* Register for alloca automatic storage; this is always reg 31.
29168 Only emit this if the alloca bit was set above. */
29169 if (frame_pointer_needed)
29170 fputs ("\t.byte 31\n", file);
29171
29172 fputs ("\t.align 2\n", file);
29173 }
29174 }
29175
29176 /* Arrange to define .LCTOC1 label, if not already done. */
29177 if (need_toc_init)
29178 {
29179 need_toc_init = 0;
29180 if (!toc_initialized)
29181 {
29182 switch_to_section (toc_section);
29183 switch_to_section (current_function_section ());
29184 }
29185 }
29186 }
29187
29188 /* -fsplit-stack support. */
29189
29190 /* A SYMBOL_REF for __morestack. */
29191 static GTY(()) rtx morestack_ref;
29192
29193 static rtx
29194 gen_add3_const (rtx rt, rtx ra, long c)
29195 {
29196 if (TARGET_64BIT)
29197 return gen_adddi3 (rt, ra, GEN_INT (c));
29198 else
29199 return gen_addsi3 (rt, ra, GEN_INT (c));
29200 }
29201
29202 /* Emit -fsplit-stack prologue, which goes before the regular function
29203 prologue (at local entry point in the case of ELFv2). */
29204
29205 void
29206 rs6000_expand_split_stack_prologue (void)
29207 {
29208 rs6000_stack_t *info = rs6000_stack_info ();
29209 unsigned HOST_WIDE_INT allocate;
29210 long alloc_hi, alloc_lo;
29211 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29212 rtx_insn *insn;
29213
29214 gcc_assert (flag_split_stack && reload_completed);
29215
29216 if (!info->push_p)
29217 return;
29218
29219 if (global_regs[29])
29220 {
29221 error ("%qs uses register r29", "-fsplit-stack");
29222 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29223 "conflicts with %qD", global_regs_decl[29]);
29224 }
29225
29226 allocate = info->total_size;
29227 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29228 {
29229 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29230 return;
29231 }
29232 if (morestack_ref == NULL_RTX)
29233 {
29234 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29235 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29236 | SYMBOL_FLAG_FUNCTION);
29237 }
29238
29239 r0 = gen_rtx_REG (Pmode, 0);
29240 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29241 r12 = gen_rtx_REG (Pmode, 12);
29242 emit_insn (gen_load_split_stack_limit (r0));
29243 /* Always emit two insns here to calculate the requested stack,
29244 so that the linker can edit them when adjusting size for calling
29245 non-split-stack code. */
29246 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29247 alloc_lo = -allocate - alloc_hi;
29248 if (alloc_hi != 0)
29249 {
29250 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29251 if (alloc_lo != 0)
29252 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29253 else
29254 emit_insn (gen_nop ());
29255 }
29256 else
29257 {
29258 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29259 emit_insn (gen_nop ());
29260 }
29261
29262 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29263 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29264 ok_label = gen_label_rtx ();
29265 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29266 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29267 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29268 pc_rtx);
29269 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29270 JUMP_LABEL (insn) = ok_label;
29271 /* Mark the jump as very likely to be taken. */
29272 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29273
29274 lr = gen_rtx_REG (Pmode, LR_REGNO);
29275 insn = emit_move_insn (r0, lr);
29276 RTX_FRAME_RELATED_P (insn) = 1;
29277 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29278 RTX_FRAME_RELATED_P (insn) = 1;
29279
29280 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29281 const0_rtx, const0_rtx));
29282 call_fusage = NULL_RTX;
29283 use_reg (&call_fusage, r12);
29284 /* Say the call uses r0, even though it doesn't, to stop regrename
29285 from twiddling with the insns saving lr, trashing args for cfun.
29286 The insns restoring lr are similarly protected by making
29287 split_stack_return use r0. */
29288 use_reg (&call_fusage, r0);
29289 add_function_usage_to (insn, call_fusage);
29290 /* Indicate that this function can't jump to non-local gotos. */
29291 make_reg_eh_region_note_nothrow_nononlocal (insn);
29292 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29293 insn = emit_move_insn (lr, r0);
29294 add_reg_note (insn, REG_CFA_RESTORE, lr);
29295 RTX_FRAME_RELATED_P (insn) = 1;
29296 emit_insn (gen_split_stack_return ());
29297
29298 emit_label (ok_label);
29299 LABEL_NUSES (ok_label) = 1;
29300 }
29301
29302 /* Return the internal arg pointer used for function incoming
29303 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29304 to copy it to a pseudo in order for it to be preserved over calls
29305 and suchlike. We'd really like to use a pseudo here for the
29306 internal arg pointer but data-flow analysis is not prepared to
29307 accept pseudos as live at the beginning of a function. */
29308
29309 static rtx
29310 rs6000_internal_arg_pointer (void)
29311 {
29312 if (flag_split_stack
29313 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29314 == NULL))
29315
29316 {
29317 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29318 {
29319 rtx pat;
29320
29321 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29322 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29323
29324 /* Put the pseudo initialization right after the note at the
29325 beginning of the function. */
29326 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29327 gen_rtx_REG (Pmode, 12));
29328 push_topmost_sequence ();
29329 emit_insn_after (pat, get_insns ());
29330 pop_topmost_sequence ();
29331 }
29332 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29333 FIRST_PARM_OFFSET (current_function_decl));
29334 return copy_to_reg (ret);
29335 }
29336 return virtual_incoming_args_rtx;
29337 }
29338
29339 /* We may have to tell the dataflow pass that the split stack prologue
29340 is initializing a register. */
29341
29342 static void
29343 rs6000_live_on_entry (bitmap regs)
29344 {
29345 if (flag_split_stack)
29346 bitmap_set_bit (regs, 12);
29347 }
29348
29349 /* Emit -fsplit-stack dynamic stack allocation space check. */
29350
29351 void
29352 rs6000_split_stack_space_check (rtx size, rtx label)
29353 {
29354 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29355 rtx limit = gen_reg_rtx (Pmode);
29356 rtx requested = gen_reg_rtx (Pmode);
29357 rtx cmp = gen_reg_rtx (CCUNSmode);
29358 rtx jump;
29359
29360 emit_insn (gen_load_split_stack_limit (limit));
29361 if (CONST_INT_P (size))
29362 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29363 else
29364 {
29365 size = force_reg (Pmode, size);
29366 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29367 }
29368 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29369 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29370 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29371 gen_rtx_LABEL_REF (VOIDmode, label),
29372 pc_rtx);
29373 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29374 JUMP_LABEL (jump) = label;
29375 }
29376 \f
29377 /* A C compound statement that outputs the assembler code for a thunk
29378 function, used to implement C++ virtual function calls with
29379 multiple inheritance. The thunk acts as a wrapper around a virtual
29380 function, adjusting the implicit object parameter before handing
29381 control off to the real function.
29382
29383 First, emit code to add the integer DELTA to the location that
29384 contains the incoming first argument. Assume that this argument
29385 contains a pointer, and is the one used to pass the `this' pointer
29386 in C++. This is the incoming argument *before* the function
29387 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29388 values of all other incoming arguments.
29389
29390 After the addition, emit code to jump to FUNCTION, which is a
29391 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29392 not touch the return address. Hence returning from FUNCTION will
29393 return to whoever called the current `thunk'.
29394
29395 The effect must be as if FUNCTION had been called directly with the
29396 adjusted first argument. This macro is responsible for emitting
29397 all of the code for a thunk function; output_function_prologue()
29398 and output_function_epilogue() are not invoked.
29399
29400 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29401 been extracted from it.) It might possibly be useful on some
29402 targets, but probably not.
29403
29404 If you do not define this macro, the target-independent code in the
29405 C++ frontend will generate a less efficient heavyweight thunk that
29406 calls FUNCTION instead of jumping to it. The generic approach does
29407 not support varargs. */
29408
29409 static void
29410 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29411 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29412 tree function)
29413 {
29414 rtx this_rtx, funexp;
29415 rtx_insn *insn;
29416
29417 reload_completed = 1;
29418 epilogue_completed = 1;
29419
29420 /* Mark the end of the (empty) prologue. */
29421 emit_note (NOTE_INSN_PROLOGUE_END);
29422
29423 /* Find the "this" pointer. If the function returns a structure,
29424 the structure return pointer is in r3. */
29425 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29426 this_rtx = gen_rtx_REG (Pmode, 4);
29427 else
29428 this_rtx = gen_rtx_REG (Pmode, 3);
29429
29430 /* Apply the constant offset, if required. */
29431 if (delta)
29432 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29433
29434 /* Apply the offset from the vtable, if required. */
29435 if (vcall_offset)
29436 {
29437 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29438 rtx tmp = gen_rtx_REG (Pmode, 12);
29439
29440 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29441 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29442 {
29443 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29444 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29445 }
29446 else
29447 {
29448 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29449
29450 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29451 }
29452 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29453 }
29454
29455 /* Generate a tail call to the target function. */
29456 if (!TREE_USED (function))
29457 {
29458 assemble_external (function);
29459 TREE_USED (function) = 1;
29460 }
29461 funexp = XEXP (DECL_RTL (function), 0);
29462 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29463
29464 #if TARGET_MACHO
29465 if (MACHOPIC_INDIRECT)
29466 funexp = machopic_indirect_call_target (funexp);
29467 #endif
29468
29469 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29470 generate sibcall RTL explicitly. */
29471 insn = emit_call_insn (
29472 gen_rtx_PARALLEL (VOIDmode,
29473 gen_rtvec (3,
29474 gen_rtx_CALL (VOIDmode,
29475 funexp, const0_rtx),
29476 gen_rtx_USE (VOIDmode, const0_rtx),
29477 simple_return_rtx)));
29478 SIBLING_CALL_P (insn) = 1;
29479 emit_barrier ();
29480
29481 /* Run just enough of rest_of_compilation to get the insns emitted.
29482 There's not really enough bulk here to make other passes such as
29483 instruction scheduling worth while. Note that use_thunk calls
29484 assemble_start_function and assemble_end_function. */
29485 insn = get_insns ();
29486 shorten_branches (insn);
29487 final_start_function (insn, file, 1);
29488 final (insn, file, 1);
29489 final_end_function ();
29490
29491 reload_completed = 0;
29492 epilogue_completed = 0;
29493 }
29494 \f
29495 /* A quick summary of the various types of 'constant-pool tables'
29496 under PowerPC:
29497
29498 Target Flags Name One table per
29499 AIX (none) AIX TOC object file
29500 AIX -mfull-toc AIX TOC object file
29501 AIX -mminimal-toc AIX minimal TOC translation unit
29502 SVR4/EABI (none) SVR4 SDATA object file
29503 SVR4/EABI -fpic SVR4 pic object file
29504 SVR4/EABI -fPIC SVR4 PIC translation unit
29505 SVR4/EABI -mrelocatable EABI TOC function
29506 SVR4/EABI -maix AIX TOC object file
29507 SVR4/EABI -maix -mminimal-toc
29508 AIX minimal TOC translation unit
29509
29510 Name Reg. Set by entries contains:
29511 made by addrs? fp? sum?
29512
29513 AIX TOC 2 crt0 as Y option option
29514 AIX minimal TOC 30 prolog gcc Y Y option
29515 SVR4 SDATA 13 crt0 gcc N Y N
29516 SVR4 pic 30 prolog ld Y not yet N
29517 SVR4 PIC 30 prolog gcc Y option option
29518 EABI TOC 30 prolog gcc Y option option
29519
29520 */
29521
29522 /* Hash functions for the hash table. */
29523
29524 static unsigned
29525 rs6000_hash_constant (rtx k)
29526 {
29527 enum rtx_code code = GET_CODE (k);
29528 machine_mode mode = GET_MODE (k);
29529 unsigned result = (code << 3) ^ mode;
29530 const char *format;
29531 int flen, fidx;
29532
29533 format = GET_RTX_FORMAT (code);
29534 flen = strlen (format);
29535 fidx = 0;
29536
29537 switch (code)
29538 {
29539 case LABEL_REF:
29540 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29541
29542 case CONST_WIDE_INT:
29543 {
29544 int i;
29545 flen = CONST_WIDE_INT_NUNITS (k);
29546 for (i = 0; i < flen; i++)
29547 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29548 return result;
29549 }
29550
29551 case CONST_DOUBLE:
29552 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29553
29554 case CODE_LABEL:
29555 fidx = 3;
29556 break;
29557
29558 default:
29559 break;
29560 }
29561
29562 for (; fidx < flen; fidx++)
29563 switch (format[fidx])
29564 {
29565 case 's':
29566 {
29567 unsigned i, len;
29568 const char *str = XSTR (k, fidx);
29569 len = strlen (str);
29570 result = result * 613 + len;
29571 for (i = 0; i < len; i++)
29572 result = result * 613 + (unsigned) str[i];
29573 break;
29574 }
29575 case 'u':
29576 case 'e':
29577 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29578 break;
29579 case 'i':
29580 case 'n':
29581 result = result * 613 + (unsigned) XINT (k, fidx);
29582 break;
29583 case 'w':
29584 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29585 result = result * 613 + (unsigned) XWINT (k, fidx);
29586 else
29587 {
29588 size_t i;
29589 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29590 result = result * 613 + (unsigned) (XWINT (k, fidx)
29591 >> CHAR_BIT * i);
29592 }
29593 break;
29594 case '0':
29595 break;
29596 default:
29597 gcc_unreachable ();
29598 }
29599
29600 return result;
29601 }
29602
29603 hashval_t
29604 toc_hasher::hash (toc_hash_struct *thc)
29605 {
29606 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29607 }
29608
29609 /* Compare H1 and H2 for equivalence. */
29610
29611 bool
29612 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29613 {
29614 rtx r1 = h1->key;
29615 rtx r2 = h2->key;
29616
29617 if (h1->key_mode != h2->key_mode)
29618 return 0;
29619
29620 return rtx_equal_p (r1, r2);
29621 }
29622
29623 /* These are the names given by the C++ front-end to vtables, and
29624 vtable-like objects. Ideally, this logic should not be here;
29625 instead, there should be some programmatic way of inquiring as
29626 to whether or not an object is a vtable. */
29627
29628 #define VTABLE_NAME_P(NAME) \
29629 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29630 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29631 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29632 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29633 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29634
29635 #ifdef NO_DOLLAR_IN_LABEL
29636 /* Return a GGC-allocated character string translating dollar signs in
29637 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29638
29639 const char *
29640 rs6000_xcoff_strip_dollar (const char *name)
29641 {
29642 char *strip, *p;
29643 const char *q;
29644 size_t len;
29645
29646 q = (const char *) strchr (name, '$');
29647
29648 if (q == 0 || q == name)
29649 return name;
29650
29651 len = strlen (name);
29652 strip = XALLOCAVEC (char, len + 1);
29653 strcpy (strip, name);
29654 p = strip + (q - name);
29655 while (p)
29656 {
29657 *p = '_';
29658 p = strchr (p + 1, '$');
29659 }
29660
29661 return ggc_alloc_string (strip, len);
29662 }
29663 #endif
29664
29665 void
29666 rs6000_output_symbol_ref (FILE *file, rtx x)
29667 {
29668 const char *name = XSTR (x, 0);
29669
29670 /* Currently C++ toc references to vtables can be emitted before it
29671 is decided whether the vtable is public or private. If this is
29672 the case, then the linker will eventually complain that there is
29673 a reference to an unknown section. Thus, for vtables only,
29674 we emit the TOC reference to reference the identifier and not the
29675 symbol. */
29676 if (VTABLE_NAME_P (name))
29677 {
29678 RS6000_OUTPUT_BASENAME (file, name);
29679 }
29680 else
29681 assemble_name (file, name);
29682 }
29683
29684 /* Output a TOC entry. We derive the entry name from what is being
29685 written. */
29686
29687 void
29688 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29689 {
29690 char buf[256];
29691 const char *name = buf;
29692 rtx base = x;
29693 HOST_WIDE_INT offset = 0;
29694
29695 gcc_assert (!TARGET_NO_TOC);
29696
29697 /* When the linker won't eliminate them, don't output duplicate
29698 TOC entries (this happens on AIX if there is any kind of TOC,
29699 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29700 CODE_LABELs. */
29701 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29702 {
29703 struct toc_hash_struct *h;
29704
29705 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29706 time because GGC is not initialized at that point. */
29707 if (toc_hash_table == NULL)
29708 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29709
29710 h = ggc_alloc<toc_hash_struct> ();
29711 h->key = x;
29712 h->key_mode = mode;
29713 h->labelno = labelno;
29714
29715 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29716 if (*found == NULL)
29717 *found = h;
29718 else /* This is indeed a duplicate.
29719 Set this label equal to that label. */
29720 {
29721 fputs ("\t.set ", file);
29722 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29723 fprintf (file, "%d,", labelno);
29724 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29725 fprintf (file, "%d\n", ((*found)->labelno));
29726
29727 #ifdef HAVE_AS_TLS
29728 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29729 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29730 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29731 {
29732 fputs ("\t.set ", file);
29733 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29734 fprintf (file, "%d,", labelno);
29735 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29736 fprintf (file, "%d\n", ((*found)->labelno));
29737 }
29738 #endif
29739 return;
29740 }
29741 }
29742
29743 /* If we're going to put a double constant in the TOC, make sure it's
29744 aligned properly when strict alignment is on. */
29745 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29746 && STRICT_ALIGNMENT
29747 && GET_MODE_BITSIZE (mode) >= 64
29748 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29749 ASM_OUTPUT_ALIGN (file, 3);
29750 }
29751
29752 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29753
29754 /* Handle FP constants specially. Note that if we have a minimal
29755 TOC, things we put here aren't actually in the TOC, so we can allow
29756 FP constants. */
29757 if (CONST_DOUBLE_P (x)
29758 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29759 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29760 {
29761 long k[4];
29762
29763 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29764 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29765 else
29766 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29767
29768 if (TARGET_64BIT)
29769 {
29770 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29771 fputs (DOUBLE_INT_ASM_OP, file);
29772 else
29773 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29774 k[0] & 0xffffffff, k[1] & 0xffffffff,
29775 k[2] & 0xffffffff, k[3] & 0xffffffff);
29776 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29777 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29778 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29779 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29780 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29781 return;
29782 }
29783 else
29784 {
29785 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29786 fputs ("\t.long ", file);
29787 else
29788 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29789 k[0] & 0xffffffff, k[1] & 0xffffffff,
29790 k[2] & 0xffffffff, k[3] & 0xffffffff);
29791 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29792 k[0] & 0xffffffff, k[1] & 0xffffffff,
29793 k[2] & 0xffffffff, k[3] & 0xffffffff);
29794 return;
29795 }
29796 }
29797 else if (CONST_DOUBLE_P (x)
29798 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29799 {
29800 long k[2];
29801
29802 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29803 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29804 else
29805 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29806
29807 if (TARGET_64BIT)
29808 {
29809 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29810 fputs (DOUBLE_INT_ASM_OP, file);
29811 else
29812 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29813 k[0] & 0xffffffff, k[1] & 0xffffffff);
29814 fprintf (file, "0x%lx%08lx\n",
29815 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29816 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29817 return;
29818 }
29819 else
29820 {
29821 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29822 fputs ("\t.long ", file);
29823 else
29824 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29825 k[0] & 0xffffffff, k[1] & 0xffffffff);
29826 fprintf (file, "0x%lx,0x%lx\n",
29827 k[0] & 0xffffffff, k[1] & 0xffffffff);
29828 return;
29829 }
29830 }
29831 else if (CONST_DOUBLE_P (x)
29832 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29833 {
29834 long l;
29835
29836 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29837 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29838 else
29839 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29840
29841 if (TARGET_64BIT)
29842 {
29843 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29844 fputs (DOUBLE_INT_ASM_OP, file);
29845 else
29846 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29847 if (WORDS_BIG_ENDIAN)
29848 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29849 else
29850 fprintf (file, "0x%lx\n", l & 0xffffffff);
29851 return;
29852 }
29853 else
29854 {
29855 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29856 fputs ("\t.long ", file);
29857 else
29858 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29859 fprintf (file, "0x%lx\n", l & 0xffffffff);
29860 return;
29861 }
29862 }
29863 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29864 {
29865 unsigned HOST_WIDE_INT low;
29866 HOST_WIDE_INT high;
29867
29868 low = INTVAL (x) & 0xffffffff;
29869 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29870
29871 /* TOC entries are always Pmode-sized, so when big-endian
29872 smaller integer constants in the TOC need to be padded.
29873 (This is still a win over putting the constants in
29874 a separate constant pool, because then we'd have
29875 to have both a TOC entry _and_ the actual constant.)
29876
29877 For a 32-bit target, CONST_INT values are loaded and shifted
29878 entirely within `low' and can be stored in one TOC entry. */
29879
29880 /* It would be easy to make this work, but it doesn't now. */
29881 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29882
29883 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29884 {
29885 low |= high << 32;
29886 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29887 high = (HOST_WIDE_INT) low >> 32;
29888 low &= 0xffffffff;
29889 }
29890
29891 if (TARGET_64BIT)
29892 {
29893 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29894 fputs (DOUBLE_INT_ASM_OP, file);
29895 else
29896 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29897 (long) high & 0xffffffff, (long) low & 0xffffffff);
29898 fprintf (file, "0x%lx%08lx\n",
29899 (long) high & 0xffffffff, (long) low & 0xffffffff);
29900 return;
29901 }
29902 else
29903 {
29904 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29905 {
29906 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29907 fputs ("\t.long ", file);
29908 else
29909 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29910 (long) high & 0xffffffff, (long) low & 0xffffffff);
29911 fprintf (file, "0x%lx,0x%lx\n",
29912 (long) high & 0xffffffff, (long) low & 0xffffffff);
29913 }
29914 else
29915 {
29916 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29917 fputs ("\t.long ", file);
29918 else
29919 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29920 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29921 }
29922 return;
29923 }
29924 }
29925
29926 if (GET_CODE (x) == CONST)
29927 {
29928 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29929 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29930
29931 base = XEXP (XEXP (x, 0), 0);
29932 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29933 }
29934
29935 switch (GET_CODE (base))
29936 {
29937 case SYMBOL_REF:
29938 name = XSTR (base, 0);
29939 break;
29940
29941 case LABEL_REF:
29942 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29943 CODE_LABEL_NUMBER (XEXP (base, 0)));
29944 break;
29945
29946 case CODE_LABEL:
29947 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29948 break;
29949
29950 default:
29951 gcc_unreachable ();
29952 }
29953
29954 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29955 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29956 else
29957 {
29958 fputs ("\t.tc ", file);
29959 RS6000_OUTPUT_BASENAME (file, name);
29960
29961 if (offset < 0)
29962 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29963 else if (offset)
29964 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29965
29966 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29967 after other TOC symbols, reducing overflow of small TOC access
29968 to [TC] symbols. */
29969 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29970 ? "[TE]," : "[TC],", file);
29971 }
29972
29973 /* Currently C++ toc references to vtables can be emitted before it
29974 is decided whether the vtable is public or private. If this is
29975 the case, then the linker will eventually complain that there is
29976 a TOC reference to an unknown section. Thus, for vtables only,
29977 we emit the TOC reference to reference the symbol and not the
29978 section. */
29979 if (VTABLE_NAME_P (name))
29980 {
29981 RS6000_OUTPUT_BASENAME (file, name);
29982 if (offset < 0)
29983 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29984 else if (offset > 0)
29985 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29986 }
29987 else
29988 output_addr_const (file, x);
29989
29990 #if HAVE_AS_TLS
29991 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29992 {
29993 switch (SYMBOL_REF_TLS_MODEL (base))
29994 {
29995 case 0:
29996 break;
29997 case TLS_MODEL_LOCAL_EXEC:
29998 fputs ("@le", file);
29999 break;
30000 case TLS_MODEL_INITIAL_EXEC:
30001 fputs ("@ie", file);
30002 break;
30003 /* Use global-dynamic for local-dynamic. */
30004 case TLS_MODEL_GLOBAL_DYNAMIC:
30005 case TLS_MODEL_LOCAL_DYNAMIC:
30006 putc ('\n', file);
30007 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30008 fputs ("\t.tc .", file);
30009 RS6000_OUTPUT_BASENAME (file, name);
30010 fputs ("[TC],", file);
30011 output_addr_const (file, x);
30012 fputs ("@m", file);
30013 break;
30014 default:
30015 gcc_unreachable ();
30016 }
30017 }
30018 #endif
30019
30020 putc ('\n', file);
30021 }
30022 \f
30023 /* Output an assembler pseudo-op to write an ASCII string of N characters
30024 starting at P to FILE.
30025
30026 On the RS/6000, we have to do this using the .byte operation and
30027 write out special characters outside the quoted string.
30028 Also, the assembler is broken; very long strings are truncated,
30029 so we must artificially break them up early. */
30030
30031 void
30032 output_ascii (FILE *file, const char *p, int n)
30033 {
30034 char c;
30035 int i, count_string;
30036 const char *for_string = "\t.byte \"";
30037 const char *for_decimal = "\t.byte ";
30038 const char *to_close = NULL;
30039
30040 count_string = 0;
30041 for (i = 0; i < n; i++)
30042 {
30043 c = *p++;
30044 if (c >= ' ' && c < 0177)
30045 {
30046 if (for_string)
30047 fputs (for_string, file);
30048 putc (c, file);
30049
30050 /* Write two quotes to get one. */
30051 if (c == '"')
30052 {
30053 putc (c, file);
30054 ++count_string;
30055 }
30056
30057 for_string = NULL;
30058 for_decimal = "\"\n\t.byte ";
30059 to_close = "\"\n";
30060 ++count_string;
30061
30062 if (count_string >= 512)
30063 {
30064 fputs (to_close, file);
30065
30066 for_string = "\t.byte \"";
30067 for_decimal = "\t.byte ";
30068 to_close = NULL;
30069 count_string = 0;
30070 }
30071 }
30072 else
30073 {
30074 if (for_decimal)
30075 fputs (for_decimal, file);
30076 fprintf (file, "%d", c);
30077
30078 for_string = "\n\t.byte \"";
30079 for_decimal = ", ";
30080 to_close = "\n";
30081 count_string = 0;
30082 }
30083 }
30084
30085 /* Now close the string if we have written one. Then end the line. */
30086 if (to_close)
30087 fputs (to_close, file);
30088 }
30089 \f
30090 /* Generate a unique section name for FILENAME for a section type
30091 represented by SECTION_DESC. Output goes into BUF.
30092
30093 SECTION_DESC can be any string, as long as it is different for each
30094 possible section type.
30095
30096 We name the section in the same manner as xlc. The name begins with an
30097 underscore followed by the filename (after stripping any leading directory
30098 names) with the last period replaced by the string SECTION_DESC. If
30099 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30100 the name. */
30101
30102 void
30103 rs6000_gen_section_name (char **buf, const char *filename,
30104 const char *section_desc)
30105 {
30106 const char *q, *after_last_slash, *last_period = 0;
30107 char *p;
30108 int len;
30109
30110 after_last_slash = filename;
30111 for (q = filename; *q; q++)
30112 {
30113 if (*q == '/')
30114 after_last_slash = q + 1;
30115 else if (*q == '.')
30116 last_period = q;
30117 }
30118
30119 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30120 *buf = (char *) xmalloc (len);
30121
30122 p = *buf;
30123 *p++ = '_';
30124
30125 for (q = after_last_slash; *q; q++)
30126 {
30127 if (q == last_period)
30128 {
30129 strcpy (p, section_desc);
30130 p += strlen (section_desc);
30131 break;
30132 }
30133
30134 else if (ISALNUM (*q))
30135 *p++ = *q;
30136 }
30137
30138 if (last_period == 0)
30139 strcpy (p, section_desc);
30140 else
30141 *p = '\0';
30142 }
30143 \f
30144 /* Emit profile function. */
30145
30146 void
30147 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30148 {
30149 /* Non-standard profiling for kernels, which just saves LR then calls
30150 _mcount without worrying about arg saves. The idea is to change
30151 the function prologue as little as possible as it isn't easy to
30152 account for arg save/restore code added just for _mcount. */
30153 if (TARGET_PROFILE_KERNEL)
30154 return;
30155
30156 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30157 {
30158 #ifndef NO_PROFILE_COUNTERS
30159 # define NO_PROFILE_COUNTERS 0
30160 #endif
30161 if (NO_PROFILE_COUNTERS)
30162 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30163 LCT_NORMAL, VOIDmode);
30164 else
30165 {
30166 char buf[30];
30167 const char *label_name;
30168 rtx fun;
30169
30170 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30171 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30172 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30173
30174 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30175 LCT_NORMAL, VOIDmode, fun, Pmode);
30176 }
30177 }
30178 else if (DEFAULT_ABI == ABI_DARWIN)
30179 {
30180 const char *mcount_name = RS6000_MCOUNT;
30181 int caller_addr_regno = LR_REGNO;
30182
30183 /* Be conservative and always set this, at least for now. */
30184 crtl->uses_pic_offset_table = 1;
30185
30186 #if TARGET_MACHO
30187 /* For PIC code, set up a stub and collect the caller's address
30188 from r0, which is where the prologue puts it. */
30189 if (MACHOPIC_INDIRECT
30190 && crtl->uses_pic_offset_table)
30191 caller_addr_regno = 0;
30192 #endif
30193 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30194 LCT_NORMAL, VOIDmode,
30195 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30196 }
30197 }
30198
30199 /* Write function profiler code. */
30200
30201 void
30202 output_function_profiler (FILE *file, int labelno)
30203 {
30204 char buf[100];
30205
30206 switch (DEFAULT_ABI)
30207 {
30208 default:
30209 gcc_unreachable ();
30210
30211 case ABI_V4:
30212 if (!TARGET_32BIT)
30213 {
30214 warning (0, "no profiling of 64-bit code for this ABI");
30215 return;
30216 }
30217 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30218 fprintf (file, "\tmflr %s\n", reg_names[0]);
30219 if (NO_PROFILE_COUNTERS)
30220 {
30221 asm_fprintf (file, "\tstw %s,4(%s)\n",
30222 reg_names[0], reg_names[1]);
30223 }
30224 else if (TARGET_SECURE_PLT && flag_pic)
30225 {
30226 if (TARGET_LINK_STACK)
30227 {
30228 char name[32];
30229 get_ppc476_thunk_name (name);
30230 asm_fprintf (file, "\tbl %s\n", name);
30231 }
30232 else
30233 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30234 asm_fprintf (file, "\tstw %s,4(%s)\n",
30235 reg_names[0], reg_names[1]);
30236 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30237 asm_fprintf (file, "\taddis %s,%s,",
30238 reg_names[12], reg_names[12]);
30239 assemble_name (file, buf);
30240 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30241 assemble_name (file, buf);
30242 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30243 }
30244 else if (flag_pic == 1)
30245 {
30246 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30247 asm_fprintf (file, "\tstw %s,4(%s)\n",
30248 reg_names[0], reg_names[1]);
30249 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30250 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30251 assemble_name (file, buf);
30252 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30253 }
30254 else if (flag_pic > 1)
30255 {
30256 asm_fprintf (file, "\tstw %s,4(%s)\n",
30257 reg_names[0], reg_names[1]);
30258 /* Now, we need to get the address of the label. */
30259 if (TARGET_LINK_STACK)
30260 {
30261 char name[32];
30262 get_ppc476_thunk_name (name);
30263 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30264 assemble_name (file, buf);
30265 fputs ("-.\n1:", file);
30266 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30267 asm_fprintf (file, "\taddi %s,%s,4\n",
30268 reg_names[11], reg_names[11]);
30269 }
30270 else
30271 {
30272 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30273 assemble_name (file, buf);
30274 fputs ("-.\n1:", file);
30275 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30276 }
30277 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30278 reg_names[0], reg_names[11]);
30279 asm_fprintf (file, "\tadd %s,%s,%s\n",
30280 reg_names[0], reg_names[0], reg_names[11]);
30281 }
30282 else
30283 {
30284 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30285 assemble_name (file, buf);
30286 fputs ("@ha\n", file);
30287 asm_fprintf (file, "\tstw %s,4(%s)\n",
30288 reg_names[0], reg_names[1]);
30289 asm_fprintf (file, "\tla %s,", reg_names[0]);
30290 assemble_name (file, buf);
30291 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30292 }
30293
30294 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30295 fprintf (file, "\tbl %s%s\n",
30296 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30297 break;
30298
30299 case ABI_AIX:
30300 case ABI_ELFv2:
30301 case ABI_DARWIN:
30302 /* Don't do anything, done in output_profile_hook (). */
30303 break;
30304 }
30305 }
30306
30307 \f
30308
30309 /* The following variable value is the last issued insn. */
30310
30311 static rtx_insn *last_scheduled_insn;
30312
30313 /* The following variable helps to balance issuing of load and
30314 store instructions */
30315
30316 static int load_store_pendulum;
30317
30318 /* The following variable helps pair divide insns during scheduling. */
30319 static int divide_cnt;
30320 /* The following variable helps pair and alternate vector and vector load
30321 insns during scheduling. */
30322 static int vec_pairing;
30323
30324
30325 /* Power4 load update and store update instructions are cracked into a
30326 load or store and an integer insn which are executed in the same cycle.
30327 Branches have their own dispatch slot which does not count against the
30328 GCC issue rate, but it changes the program flow so there are no other
30329 instructions to issue in this cycle. */
30330
30331 static int
30332 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30333 {
30334 last_scheduled_insn = insn;
30335 if (GET_CODE (PATTERN (insn)) == USE
30336 || GET_CODE (PATTERN (insn)) == CLOBBER)
30337 {
30338 cached_can_issue_more = more;
30339 return cached_can_issue_more;
30340 }
30341
30342 if (insn_terminates_group_p (insn, current_group))
30343 {
30344 cached_can_issue_more = 0;
30345 return cached_can_issue_more;
30346 }
30347
30348 /* If no reservation, but reach here */
30349 if (recog_memoized (insn) < 0)
30350 return more;
30351
30352 if (rs6000_sched_groups)
30353 {
30354 if (is_microcoded_insn (insn))
30355 cached_can_issue_more = 0;
30356 else if (is_cracked_insn (insn))
30357 cached_can_issue_more = more > 2 ? more - 2 : 0;
30358 else
30359 cached_can_issue_more = more - 1;
30360
30361 return cached_can_issue_more;
30362 }
30363
30364 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30365 return 0;
30366
30367 cached_can_issue_more = more - 1;
30368 return cached_can_issue_more;
30369 }
30370
30371 static int
30372 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30373 {
30374 int r = rs6000_variable_issue_1 (insn, more);
30375 if (verbose)
30376 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30377 return r;
30378 }
30379
30380 /* Adjust the cost of a scheduling dependency. Return the new cost of
30381 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30382
30383 static int
30384 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30385 unsigned int)
30386 {
30387 enum attr_type attr_type;
30388
30389 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30390 return cost;
30391
30392 switch (dep_type)
30393 {
30394 case REG_DEP_TRUE:
30395 {
30396 /* Data dependency; DEP_INSN writes a register that INSN reads
30397 some cycles later. */
30398
30399 /* Separate a load from a narrower, dependent store. */
30400 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30401 && GET_CODE (PATTERN (insn)) == SET
30402 && GET_CODE (PATTERN (dep_insn)) == SET
30403 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30404 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30405 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30406 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30407 return cost + 14;
30408
30409 attr_type = get_attr_type (insn);
30410
30411 switch (attr_type)
30412 {
30413 case TYPE_JMPREG:
30414 /* Tell the first scheduling pass about the latency between
30415 a mtctr and bctr (and mtlr and br/blr). The first
30416 scheduling pass will not know about this latency since
30417 the mtctr instruction, which has the latency associated
30418 to it, will be generated by reload. */
30419 return 4;
30420 case TYPE_BRANCH:
30421 /* Leave some extra cycles between a compare and its
30422 dependent branch, to inhibit expensive mispredicts. */
30423 if ((rs6000_tune == PROCESSOR_PPC603
30424 || rs6000_tune == PROCESSOR_PPC604
30425 || rs6000_tune == PROCESSOR_PPC604e
30426 || rs6000_tune == PROCESSOR_PPC620
30427 || rs6000_tune == PROCESSOR_PPC630
30428 || rs6000_tune == PROCESSOR_PPC750
30429 || rs6000_tune == PROCESSOR_PPC7400
30430 || rs6000_tune == PROCESSOR_PPC7450
30431 || rs6000_tune == PROCESSOR_PPCE5500
30432 || rs6000_tune == PROCESSOR_PPCE6500
30433 || rs6000_tune == PROCESSOR_POWER4
30434 || rs6000_tune == PROCESSOR_POWER5
30435 || rs6000_tune == PROCESSOR_POWER7
30436 || rs6000_tune == PROCESSOR_POWER8
30437 || rs6000_tune == PROCESSOR_POWER9
30438 || rs6000_tune == PROCESSOR_CELL)
30439 && recog_memoized (dep_insn)
30440 && (INSN_CODE (dep_insn) >= 0))
30441
30442 switch (get_attr_type (dep_insn))
30443 {
30444 case TYPE_CMP:
30445 case TYPE_FPCOMPARE:
30446 case TYPE_CR_LOGICAL:
30447 return cost + 2;
30448 case TYPE_EXTS:
30449 case TYPE_MUL:
30450 if (get_attr_dot (dep_insn) == DOT_YES)
30451 return cost + 2;
30452 else
30453 break;
30454 case TYPE_SHIFT:
30455 if (get_attr_dot (dep_insn) == DOT_YES
30456 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30457 return cost + 2;
30458 else
30459 break;
30460 default:
30461 break;
30462 }
30463 break;
30464
30465 case TYPE_STORE:
30466 case TYPE_FPSTORE:
30467 if ((rs6000_tune == PROCESSOR_POWER6)
30468 && recog_memoized (dep_insn)
30469 && (INSN_CODE (dep_insn) >= 0))
30470 {
30471
30472 if (GET_CODE (PATTERN (insn)) != SET)
30473 /* If this happens, we have to extend this to schedule
30474 optimally. Return default for now. */
30475 return cost;
30476
30477 /* Adjust the cost for the case where the value written
30478 by a fixed point operation is used as the address
30479 gen value on a store. */
30480 switch (get_attr_type (dep_insn))
30481 {
30482 case TYPE_LOAD:
30483 case TYPE_CNTLZ:
30484 {
30485 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30486 return get_attr_sign_extend (dep_insn)
30487 == SIGN_EXTEND_YES ? 6 : 4;
30488 break;
30489 }
30490 case TYPE_SHIFT:
30491 {
30492 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30493 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30494 6 : 3;
30495 break;
30496 }
30497 case TYPE_INTEGER:
30498 case TYPE_ADD:
30499 case TYPE_LOGICAL:
30500 case TYPE_EXTS:
30501 case TYPE_INSERT:
30502 {
30503 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30504 return 3;
30505 break;
30506 }
30507 case TYPE_STORE:
30508 case TYPE_FPLOAD:
30509 case TYPE_FPSTORE:
30510 {
30511 if (get_attr_update (dep_insn) == UPDATE_YES
30512 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30513 return 3;
30514 break;
30515 }
30516 case TYPE_MUL:
30517 {
30518 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30519 return 17;
30520 break;
30521 }
30522 case TYPE_DIV:
30523 {
30524 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30525 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30526 break;
30527 }
30528 default:
30529 break;
30530 }
30531 }
30532 break;
30533
30534 case TYPE_LOAD:
30535 if ((rs6000_tune == PROCESSOR_POWER6)
30536 && recog_memoized (dep_insn)
30537 && (INSN_CODE (dep_insn) >= 0))
30538 {
30539
30540 /* Adjust the cost for the case where the value written
30541 by a fixed point instruction is used within the address
30542 gen portion of a subsequent load(u)(x) */
30543 switch (get_attr_type (dep_insn))
30544 {
30545 case TYPE_LOAD:
30546 case TYPE_CNTLZ:
30547 {
30548 if (set_to_load_agen (dep_insn, insn))
30549 return get_attr_sign_extend (dep_insn)
30550 == SIGN_EXTEND_YES ? 6 : 4;
30551 break;
30552 }
30553 case TYPE_SHIFT:
30554 {
30555 if (set_to_load_agen (dep_insn, insn))
30556 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30557 6 : 3;
30558 break;
30559 }
30560 case TYPE_INTEGER:
30561 case TYPE_ADD:
30562 case TYPE_LOGICAL:
30563 case TYPE_EXTS:
30564 case TYPE_INSERT:
30565 {
30566 if (set_to_load_agen (dep_insn, insn))
30567 return 3;
30568 break;
30569 }
30570 case TYPE_STORE:
30571 case TYPE_FPLOAD:
30572 case TYPE_FPSTORE:
30573 {
30574 if (get_attr_update (dep_insn) == UPDATE_YES
30575 && set_to_load_agen (dep_insn, insn))
30576 return 3;
30577 break;
30578 }
30579 case TYPE_MUL:
30580 {
30581 if (set_to_load_agen (dep_insn, insn))
30582 return 17;
30583 break;
30584 }
30585 case TYPE_DIV:
30586 {
30587 if (set_to_load_agen (dep_insn, insn))
30588 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30589 break;
30590 }
30591 default:
30592 break;
30593 }
30594 }
30595 break;
30596
30597 case TYPE_FPLOAD:
30598 if ((rs6000_tune == PROCESSOR_POWER6)
30599 && get_attr_update (insn) == UPDATE_NO
30600 && recog_memoized (dep_insn)
30601 && (INSN_CODE (dep_insn) >= 0)
30602 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30603 return 2;
30604
30605 default:
30606 break;
30607 }
30608
30609 /* Fall out to return default cost. */
30610 }
30611 break;
30612
30613 case REG_DEP_OUTPUT:
30614 /* Output dependency; DEP_INSN writes a register that INSN writes some
30615 cycles later. */
30616 if ((rs6000_tune == PROCESSOR_POWER6)
30617 && recog_memoized (dep_insn)
30618 && (INSN_CODE (dep_insn) >= 0))
30619 {
30620 attr_type = get_attr_type (insn);
30621
30622 switch (attr_type)
30623 {
30624 case TYPE_FP:
30625 case TYPE_FPSIMPLE:
30626 if (get_attr_type (dep_insn) == TYPE_FP
30627 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30628 return 1;
30629 break;
30630 case TYPE_FPLOAD:
30631 if (get_attr_update (insn) == UPDATE_NO
30632 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30633 return 2;
30634 break;
30635 default:
30636 break;
30637 }
30638 }
30639 /* Fall through, no cost for output dependency. */
30640 /* FALLTHRU */
30641
30642 case REG_DEP_ANTI:
30643 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30644 cycles later. */
30645 return 0;
30646
30647 default:
30648 gcc_unreachable ();
30649 }
30650
30651 return cost;
30652 }
30653
30654 /* Debug version of rs6000_adjust_cost. */
30655
30656 static int
30657 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30658 int cost, unsigned int dw)
30659 {
30660 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30661
30662 if (ret != cost)
30663 {
30664 const char *dep;
30665
30666 switch (dep_type)
30667 {
30668 default: dep = "unknown depencency"; break;
30669 case REG_DEP_TRUE: dep = "data dependency"; break;
30670 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30671 case REG_DEP_ANTI: dep = "anti depencency"; break;
30672 }
30673
30674 fprintf (stderr,
30675 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30676 "%s, insn:\n", ret, cost, dep);
30677
30678 debug_rtx (insn);
30679 }
30680
30681 return ret;
30682 }
30683
30684 /* The function returns a true if INSN is microcoded.
30685 Return false otherwise. */
30686
30687 static bool
30688 is_microcoded_insn (rtx_insn *insn)
30689 {
30690 if (!insn || !NONDEBUG_INSN_P (insn)
30691 || GET_CODE (PATTERN (insn)) == USE
30692 || GET_CODE (PATTERN (insn)) == CLOBBER)
30693 return false;
30694
30695 if (rs6000_tune == PROCESSOR_CELL)
30696 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30697
30698 if (rs6000_sched_groups
30699 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30700 {
30701 enum attr_type type = get_attr_type (insn);
30702 if ((type == TYPE_LOAD
30703 && get_attr_update (insn) == UPDATE_YES
30704 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30705 || ((type == TYPE_LOAD || type == TYPE_STORE)
30706 && get_attr_update (insn) == UPDATE_YES
30707 && get_attr_indexed (insn) == INDEXED_YES)
30708 || type == TYPE_MFCR)
30709 return true;
30710 }
30711
30712 return false;
30713 }
30714
30715 /* The function returns true if INSN is cracked into 2 instructions
30716 by the processor (and therefore occupies 2 issue slots). */
30717
30718 static bool
30719 is_cracked_insn (rtx_insn *insn)
30720 {
30721 if (!insn || !NONDEBUG_INSN_P (insn)
30722 || GET_CODE (PATTERN (insn)) == USE
30723 || GET_CODE (PATTERN (insn)) == CLOBBER)
30724 return false;
30725
30726 if (rs6000_sched_groups
30727 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30728 {
30729 enum attr_type type = get_attr_type (insn);
30730 if ((type == TYPE_LOAD
30731 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30732 && get_attr_update (insn) == UPDATE_NO)
30733 || (type == TYPE_LOAD
30734 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30735 && get_attr_update (insn) == UPDATE_YES
30736 && get_attr_indexed (insn) == INDEXED_NO)
30737 || (type == TYPE_STORE
30738 && get_attr_update (insn) == UPDATE_YES
30739 && get_attr_indexed (insn) == INDEXED_NO)
30740 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30741 && get_attr_update (insn) == UPDATE_YES)
30742 || (type == TYPE_CR_LOGICAL
30743 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30744 || (type == TYPE_EXTS
30745 && get_attr_dot (insn) == DOT_YES)
30746 || (type == TYPE_SHIFT
30747 && get_attr_dot (insn) == DOT_YES
30748 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30749 || (type == TYPE_MUL
30750 && get_attr_dot (insn) == DOT_YES)
30751 || type == TYPE_DIV
30752 || (type == TYPE_INSERT
30753 && get_attr_size (insn) == SIZE_32))
30754 return true;
30755 }
30756
30757 return false;
30758 }
30759
30760 /* The function returns true if INSN can be issued only from
30761 the branch slot. */
30762
30763 static bool
30764 is_branch_slot_insn (rtx_insn *insn)
30765 {
30766 if (!insn || !NONDEBUG_INSN_P (insn)
30767 || GET_CODE (PATTERN (insn)) == USE
30768 || GET_CODE (PATTERN (insn)) == CLOBBER)
30769 return false;
30770
30771 if (rs6000_sched_groups)
30772 {
30773 enum attr_type type = get_attr_type (insn);
30774 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30775 return true;
30776 return false;
30777 }
30778
30779 return false;
30780 }
30781
30782 /* The function returns true if out_inst sets a value that is
30783 used in the address generation computation of in_insn */
30784 static bool
30785 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30786 {
30787 rtx out_set, in_set;
30788
30789 /* For performance reasons, only handle the simple case where
30790 both loads are a single_set. */
30791 out_set = single_set (out_insn);
30792 if (out_set)
30793 {
30794 in_set = single_set (in_insn);
30795 if (in_set)
30796 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30797 }
30798
30799 return false;
30800 }
30801
30802 /* Try to determine base/offset/size parts of the given MEM.
30803 Return true if successful, false if all the values couldn't
30804 be determined.
30805
30806 This function only looks for REG or REG+CONST address forms.
30807 REG+REG address form will return false. */
30808
30809 static bool
30810 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30811 HOST_WIDE_INT *size)
30812 {
30813 rtx addr_rtx;
30814 if MEM_SIZE_KNOWN_P (mem)
30815 *size = MEM_SIZE (mem);
30816 else
30817 return false;
30818
30819 addr_rtx = (XEXP (mem, 0));
30820 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30821 addr_rtx = XEXP (addr_rtx, 1);
30822
30823 *offset = 0;
30824 while (GET_CODE (addr_rtx) == PLUS
30825 && CONST_INT_P (XEXP (addr_rtx, 1)))
30826 {
30827 *offset += INTVAL (XEXP (addr_rtx, 1));
30828 addr_rtx = XEXP (addr_rtx, 0);
30829 }
30830 if (!REG_P (addr_rtx))
30831 return false;
30832
30833 *base = addr_rtx;
30834 return true;
30835 }
30836
30837 /* The function returns true if the target storage location of
30838 mem1 is adjacent to the target storage location of mem2 */
30839 /* Return 1 if memory locations are adjacent. */
30840
30841 static bool
30842 adjacent_mem_locations (rtx mem1, rtx mem2)
30843 {
30844 rtx reg1, reg2;
30845 HOST_WIDE_INT off1, size1, off2, size2;
30846
30847 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30848 && get_memref_parts (mem2, &reg2, &off2, &size2))
30849 return ((REGNO (reg1) == REGNO (reg2))
30850 && ((off1 + size1 == off2)
30851 || (off2 + size2 == off1)));
30852
30853 return false;
30854 }
30855
30856 /* This function returns true if it can be determined that the two MEM
30857 locations overlap by at least 1 byte based on base reg/offset/size. */
30858
30859 static bool
30860 mem_locations_overlap (rtx mem1, rtx mem2)
30861 {
30862 rtx reg1, reg2;
30863 HOST_WIDE_INT off1, size1, off2, size2;
30864
30865 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30866 && get_memref_parts (mem2, &reg2, &off2, &size2))
30867 return ((REGNO (reg1) == REGNO (reg2))
30868 && (((off1 <= off2) && (off1 + size1 > off2))
30869 || ((off2 <= off1) && (off2 + size2 > off1))));
30870
30871 return false;
30872 }
30873
30874 /* A C statement (sans semicolon) to update the integer scheduling
30875 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30876 INSN earlier, reduce the priority to execute INSN later. Do not
30877 define this macro if you do not need to adjust the scheduling
30878 priorities of insns. */
30879
30880 static int
30881 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30882 {
30883 rtx load_mem, str_mem;
30884 /* On machines (like the 750) which have asymmetric integer units,
30885 where one integer unit can do multiply and divides and the other
30886 can't, reduce the priority of multiply/divide so it is scheduled
30887 before other integer operations. */
30888
30889 #if 0
30890 if (! INSN_P (insn))
30891 return priority;
30892
30893 if (GET_CODE (PATTERN (insn)) == USE)
30894 return priority;
30895
30896 switch (rs6000_tune) {
30897 case PROCESSOR_PPC750:
30898 switch (get_attr_type (insn))
30899 {
30900 default:
30901 break;
30902
30903 case TYPE_MUL:
30904 case TYPE_DIV:
30905 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30906 priority, priority);
30907 if (priority >= 0 && priority < 0x01000000)
30908 priority >>= 3;
30909 break;
30910 }
30911 }
30912 #endif
30913
30914 if (insn_must_be_first_in_group (insn)
30915 && reload_completed
30916 && current_sched_info->sched_max_insns_priority
30917 && rs6000_sched_restricted_insns_priority)
30918 {
30919
30920 /* Prioritize insns that can be dispatched only in the first
30921 dispatch slot. */
30922 if (rs6000_sched_restricted_insns_priority == 1)
30923 /* Attach highest priority to insn. This means that in
30924 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30925 precede 'priority' (critical path) considerations. */
30926 return current_sched_info->sched_max_insns_priority;
30927 else if (rs6000_sched_restricted_insns_priority == 2)
30928 /* Increase priority of insn by a minimal amount. This means that in
30929 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30930 considerations precede dispatch-slot restriction considerations. */
30931 return (priority + 1);
30932 }
30933
30934 if (rs6000_tune == PROCESSOR_POWER6
30935 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30936 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30937 /* Attach highest priority to insn if the scheduler has just issued two
30938 stores and this instruction is a load, or two loads and this instruction
30939 is a store. Power6 wants loads and stores scheduled alternately
30940 when possible */
30941 return current_sched_info->sched_max_insns_priority;
30942
30943 return priority;
30944 }
30945
30946 /* Return true if the instruction is nonpipelined on the Cell. */
30947 static bool
30948 is_nonpipeline_insn (rtx_insn *insn)
30949 {
30950 enum attr_type type;
30951 if (!insn || !NONDEBUG_INSN_P (insn)
30952 || GET_CODE (PATTERN (insn)) == USE
30953 || GET_CODE (PATTERN (insn)) == CLOBBER)
30954 return false;
30955
30956 type = get_attr_type (insn);
30957 if (type == TYPE_MUL
30958 || type == TYPE_DIV
30959 || type == TYPE_SDIV
30960 || type == TYPE_DDIV
30961 || type == TYPE_SSQRT
30962 || type == TYPE_DSQRT
30963 || type == TYPE_MFCR
30964 || type == TYPE_MFCRF
30965 || type == TYPE_MFJMPR)
30966 {
30967 return true;
30968 }
30969 return false;
30970 }
30971
30972
30973 /* Return how many instructions the machine can issue per cycle. */
30974
30975 static int
30976 rs6000_issue_rate (void)
30977 {
30978 /* Unless scheduling for register pressure, use issue rate of 1 for
30979 first scheduling pass to decrease degradation. */
30980 if (!reload_completed && !flag_sched_pressure)
30981 return 1;
30982
30983 switch (rs6000_tune) {
30984 case PROCESSOR_RS64A:
30985 case PROCESSOR_PPC601: /* ? */
30986 case PROCESSOR_PPC7450:
30987 return 3;
30988 case PROCESSOR_PPC440:
30989 case PROCESSOR_PPC603:
30990 case PROCESSOR_PPC750:
30991 case PROCESSOR_PPC7400:
30992 case PROCESSOR_PPC8540:
30993 case PROCESSOR_PPC8548:
30994 case PROCESSOR_CELL:
30995 case PROCESSOR_PPCE300C2:
30996 case PROCESSOR_PPCE300C3:
30997 case PROCESSOR_PPCE500MC:
30998 case PROCESSOR_PPCE500MC64:
30999 case PROCESSOR_PPCE5500:
31000 case PROCESSOR_PPCE6500:
31001 case PROCESSOR_TITAN:
31002 return 2;
31003 case PROCESSOR_PPC476:
31004 case PROCESSOR_PPC604:
31005 case PROCESSOR_PPC604e:
31006 case PROCESSOR_PPC620:
31007 case PROCESSOR_PPC630:
31008 return 4;
31009 case PROCESSOR_POWER4:
31010 case PROCESSOR_POWER5:
31011 case PROCESSOR_POWER6:
31012 case PROCESSOR_POWER7:
31013 return 5;
31014 case PROCESSOR_POWER8:
31015 return 7;
31016 case PROCESSOR_POWER9:
31017 return 6;
31018 default:
31019 return 1;
31020 }
31021 }
31022
31023 /* Return how many instructions to look ahead for better insn
31024 scheduling. */
31025
31026 static int
31027 rs6000_use_sched_lookahead (void)
31028 {
31029 switch (rs6000_tune)
31030 {
31031 case PROCESSOR_PPC8540:
31032 case PROCESSOR_PPC8548:
31033 return 4;
31034
31035 case PROCESSOR_CELL:
31036 return (reload_completed ? 8 : 0);
31037
31038 default:
31039 return 0;
31040 }
31041 }
31042
31043 /* We are choosing insn from the ready queue. Return zero if INSN can be
31044 chosen. */
31045 static int
31046 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31047 {
31048 if (ready_index == 0)
31049 return 0;
31050
31051 if (rs6000_tune != PROCESSOR_CELL)
31052 return 0;
31053
31054 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31055
31056 if (!reload_completed
31057 || is_nonpipeline_insn (insn)
31058 || is_microcoded_insn (insn))
31059 return 1;
31060
31061 return 0;
31062 }
31063
31064 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31065 and return true. */
31066
31067 static bool
31068 find_mem_ref (rtx pat, rtx *mem_ref)
31069 {
31070 const char * fmt;
31071 int i, j;
31072
31073 /* stack_tie does not produce any real memory traffic. */
31074 if (tie_operand (pat, VOIDmode))
31075 return false;
31076
31077 if (GET_CODE (pat) == MEM)
31078 {
31079 *mem_ref = pat;
31080 return true;
31081 }
31082
31083 /* Recursively process the pattern. */
31084 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31085
31086 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31087 {
31088 if (fmt[i] == 'e')
31089 {
31090 if (find_mem_ref (XEXP (pat, i), mem_ref))
31091 return true;
31092 }
31093 else if (fmt[i] == 'E')
31094 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31095 {
31096 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31097 return true;
31098 }
31099 }
31100
31101 return false;
31102 }
31103
31104 /* Determine if PAT is a PATTERN of a load insn. */
31105
31106 static bool
31107 is_load_insn1 (rtx pat, rtx *load_mem)
31108 {
31109 if (!pat || pat == NULL_RTX)
31110 return false;
31111
31112 if (GET_CODE (pat) == SET)
31113 return find_mem_ref (SET_SRC (pat), load_mem);
31114
31115 if (GET_CODE (pat) == PARALLEL)
31116 {
31117 int i;
31118
31119 for (i = 0; i < XVECLEN (pat, 0); i++)
31120 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31121 return true;
31122 }
31123
31124 return false;
31125 }
31126
31127 /* Determine if INSN loads from memory. */
31128
31129 static bool
31130 is_load_insn (rtx insn, rtx *load_mem)
31131 {
31132 if (!insn || !INSN_P (insn))
31133 return false;
31134
31135 if (CALL_P (insn))
31136 return false;
31137
31138 return is_load_insn1 (PATTERN (insn), load_mem);
31139 }
31140
31141 /* Determine if PAT is a PATTERN of a store insn. */
31142
31143 static bool
31144 is_store_insn1 (rtx pat, rtx *str_mem)
31145 {
31146 if (!pat || pat == NULL_RTX)
31147 return false;
31148
31149 if (GET_CODE (pat) == SET)
31150 return find_mem_ref (SET_DEST (pat), str_mem);
31151
31152 if (GET_CODE (pat) == PARALLEL)
31153 {
31154 int i;
31155
31156 for (i = 0; i < XVECLEN (pat, 0); i++)
31157 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31158 return true;
31159 }
31160
31161 return false;
31162 }
31163
31164 /* Determine if INSN stores to memory. */
31165
31166 static bool
31167 is_store_insn (rtx insn, rtx *str_mem)
31168 {
31169 if (!insn || !INSN_P (insn))
31170 return false;
31171
31172 return is_store_insn1 (PATTERN (insn), str_mem);
31173 }
31174
31175 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31176
31177 static bool
31178 is_power9_pairable_vec_type (enum attr_type type)
31179 {
31180 switch (type)
31181 {
31182 case TYPE_VECSIMPLE:
31183 case TYPE_VECCOMPLEX:
31184 case TYPE_VECDIV:
31185 case TYPE_VECCMP:
31186 case TYPE_VECPERM:
31187 case TYPE_VECFLOAT:
31188 case TYPE_VECFDIV:
31189 case TYPE_VECDOUBLE:
31190 return true;
31191 default:
31192 break;
31193 }
31194 return false;
31195 }
31196
31197 /* Returns whether the dependence between INSN and NEXT is considered
31198 costly by the given target. */
31199
31200 static bool
31201 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31202 {
31203 rtx insn;
31204 rtx next;
31205 rtx load_mem, str_mem;
31206
31207 /* If the flag is not enabled - no dependence is considered costly;
31208 allow all dependent insns in the same group.
31209 This is the most aggressive option. */
31210 if (rs6000_sched_costly_dep == no_dep_costly)
31211 return false;
31212
31213 /* If the flag is set to 1 - a dependence is always considered costly;
31214 do not allow dependent instructions in the same group.
31215 This is the most conservative option. */
31216 if (rs6000_sched_costly_dep == all_deps_costly)
31217 return true;
31218
31219 insn = DEP_PRO (dep);
31220 next = DEP_CON (dep);
31221
31222 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31223 && is_load_insn (next, &load_mem)
31224 && is_store_insn (insn, &str_mem))
31225 /* Prevent load after store in the same group. */
31226 return true;
31227
31228 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31229 && is_load_insn (next, &load_mem)
31230 && is_store_insn (insn, &str_mem)
31231 && DEP_TYPE (dep) == REG_DEP_TRUE
31232 && mem_locations_overlap(str_mem, load_mem))
31233 /* Prevent load after store in the same group if it is a true
31234 dependence. */
31235 return true;
31236
31237 /* The flag is set to X; dependences with latency >= X are considered costly,
31238 and will not be scheduled in the same group. */
31239 if (rs6000_sched_costly_dep <= max_dep_latency
31240 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31241 return true;
31242
31243 return false;
31244 }
31245
31246 /* Return the next insn after INSN that is found before TAIL is reached,
31247 skipping any "non-active" insns - insns that will not actually occupy
31248 an issue slot. Return NULL_RTX if such an insn is not found. */
31249
31250 static rtx_insn *
31251 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31252 {
31253 if (insn == NULL_RTX || insn == tail)
31254 return NULL;
31255
31256 while (1)
31257 {
31258 insn = NEXT_INSN (insn);
31259 if (insn == NULL_RTX || insn == tail)
31260 return NULL;
31261
31262 if (CALL_P (insn)
31263 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31264 || (NONJUMP_INSN_P (insn)
31265 && GET_CODE (PATTERN (insn)) != USE
31266 && GET_CODE (PATTERN (insn)) != CLOBBER
31267 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31268 break;
31269 }
31270 return insn;
31271 }
31272
31273 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31274
31275 static int
31276 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31277 {
31278 int pos;
31279 int i;
31280 rtx_insn *tmp;
31281 enum attr_type type, type2;
31282
31283 type = get_attr_type (last_scheduled_insn);
31284
31285 /* Try to issue fixed point divides back-to-back in pairs so they will be
31286 routed to separate execution units and execute in parallel. */
31287 if (type == TYPE_DIV && divide_cnt == 0)
31288 {
31289 /* First divide has been scheduled. */
31290 divide_cnt = 1;
31291
31292 /* Scan the ready list looking for another divide, if found move it
31293 to the end of the list so it is chosen next. */
31294 pos = lastpos;
31295 while (pos >= 0)
31296 {
31297 if (recog_memoized (ready[pos]) >= 0
31298 && get_attr_type (ready[pos]) == TYPE_DIV)
31299 {
31300 tmp = ready[pos];
31301 for (i = pos; i < lastpos; i++)
31302 ready[i] = ready[i + 1];
31303 ready[lastpos] = tmp;
31304 break;
31305 }
31306 pos--;
31307 }
31308 }
31309 else
31310 {
31311 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31312 divide_cnt = 0;
31313
31314 /* The best dispatch throughput for vector and vector load insns can be
31315 achieved by interleaving a vector and vector load such that they'll
31316 dispatch to the same superslice. If this pairing cannot be achieved
31317 then it is best to pair vector insns together and vector load insns
31318 together.
31319
31320 To aid in this pairing, vec_pairing maintains the current state with
31321 the following values:
31322
31323 0 : Initial state, no vecload/vector pairing has been started.
31324
31325 1 : A vecload or vector insn has been issued and a candidate for
31326 pairing has been found and moved to the end of the ready
31327 list. */
31328 if (type == TYPE_VECLOAD)
31329 {
31330 /* Issued a vecload. */
31331 if (vec_pairing == 0)
31332 {
31333 int vecload_pos = -1;
31334 /* We issued a single vecload, look for a vector insn to pair it
31335 with. If one isn't found, try to pair another vecload. */
31336 pos = lastpos;
31337 while (pos >= 0)
31338 {
31339 if (recog_memoized (ready[pos]) >= 0)
31340 {
31341 type2 = get_attr_type (ready[pos]);
31342 if (is_power9_pairable_vec_type (type2))
31343 {
31344 /* Found a vector insn to pair with, move it to the
31345 end of the ready list so it is scheduled next. */
31346 tmp = ready[pos];
31347 for (i = pos; i < lastpos; i++)
31348 ready[i] = ready[i + 1];
31349 ready[lastpos] = tmp;
31350 vec_pairing = 1;
31351 return cached_can_issue_more;
31352 }
31353 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31354 /* Remember position of first vecload seen. */
31355 vecload_pos = pos;
31356 }
31357 pos--;
31358 }
31359 if (vecload_pos >= 0)
31360 {
31361 /* Didn't find a vector to pair with but did find a vecload,
31362 move it to the end of the ready list. */
31363 tmp = ready[vecload_pos];
31364 for (i = vecload_pos; i < lastpos; i++)
31365 ready[i] = ready[i + 1];
31366 ready[lastpos] = tmp;
31367 vec_pairing = 1;
31368 return cached_can_issue_more;
31369 }
31370 }
31371 }
31372 else if (is_power9_pairable_vec_type (type))
31373 {
31374 /* Issued a vector operation. */
31375 if (vec_pairing == 0)
31376 {
31377 int vec_pos = -1;
31378 /* We issued a single vector insn, look for a vecload to pair it
31379 with. If one isn't found, try to pair another vector. */
31380 pos = lastpos;
31381 while (pos >= 0)
31382 {
31383 if (recog_memoized (ready[pos]) >= 0)
31384 {
31385 type2 = get_attr_type (ready[pos]);
31386 if (type2 == TYPE_VECLOAD)
31387 {
31388 /* Found a vecload insn to pair with, move it to the
31389 end of the ready list so it is scheduled next. */
31390 tmp = ready[pos];
31391 for (i = pos; i < lastpos; i++)
31392 ready[i] = ready[i + 1];
31393 ready[lastpos] = tmp;
31394 vec_pairing = 1;
31395 return cached_can_issue_more;
31396 }
31397 else if (is_power9_pairable_vec_type (type2)
31398 && vec_pos == -1)
31399 /* Remember position of first vector insn seen. */
31400 vec_pos = pos;
31401 }
31402 pos--;
31403 }
31404 if (vec_pos >= 0)
31405 {
31406 /* Didn't find a vecload to pair with but did find a vector
31407 insn, move it to the end of the ready list. */
31408 tmp = ready[vec_pos];
31409 for (i = vec_pos; i < lastpos; i++)
31410 ready[i] = ready[i + 1];
31411 ready[lastpos] = tmp;
31412 vec_pairing = 1;
31413 return cached_can_issue_more;
31414 }
31415 }
31416 }
31417
31418 /* We've either finished a vec/vecload pair, couldn't find an insn to
31419 continue the current pair, or the last insn had nothing to do with
31420 with pairing. In any case, reset the state. */
31421 vec_pairing = 0;
31422 }
31423
31424 return cached_can_issue_more;
31425 }
31426
31427 /* We are about to begin issuing insns for this clock cycle. */
31428
31429 static int
31430 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31431 rtx_insn **ready ATTRIBUTE_UNUSED,
31432 int *pn_ready ATTRIBUTE_UNUSED,
31433 int clock_var ATTRIBUTE_UNUSED)
31434 {
31435 int n_ready = *pn_ready;
31436
31437 if (sched_verbose)
31438 fprintf (dump, "// rs6000_sched_reorder :\n");
31439
31440 /* Reorder the ready list, if the second to last ready insn
31441 is a nonepipeline insn. */
31442 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31443 {
31444 if (is_nonpipeline_insn (ready[n_ready - 1])
31445 && (recog_memoized (ready[n_ready - 2]) > 0))
31446 /* Simply swap first two insns. */
31447 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31448 }
31449
31450 if (rs6000_tune == PROCESSOR_POWER6)
31451 load_store_pendulum = 0;
31452
31453 return rs6000_issue_rate ();
31454 }
31455
31456 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31457
31458 static int
31459 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31460 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31461 {
31462 if (sched_verbose)
31463 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31464
31465 /* For Power6, we need to handle some special cases to try and keep the
31466 store queue from overflowing and triggering expensive flushes.
31467
31468 This code monitors how load and store instructions are being issued
31469 and skews the ready list one way or the other to increase the likelihood
31470 that a desired instruction is issued at the proper time.
31471
31472 A couple of things are done. First, we maintain a "load_store_pendulum"
31473 to track the current state of load/store issue.
31474
31475 - If the pendulum is at zero, then no loads or stores have been
31476 issued in the current cycle so we do nothing.
31477
31478 - If the pendulum is 1, then a single load has been issued in this
31479 cycle and we attempt to locate another load in the ready list to
31480 issue with it.
31481
31482 - If the pendulum is -2, then two stores have already been
31483 issued in this cycle, so we increase the priority of the first load
31484 in the ready list to increase it's likelihood of being chosen first
31485 in the next cycle.
31486
31487 - If the pendulum is -1, then a single store has been issued in this
31488 cycle and we attempt to locate another store in the ready list to
31489 issue with it, preferring a store to an adjacent memory location to
31490 facilitate store pairing in the store queue.
31491
31492 - If the pendulum is 2, then two loads have already been
31493 issued in this cycle, so we increase the priority of the first store
31494 in the ready list to increase it's likelihood of being chosen first
31495 in the next cycle.
31496
31497 - If the pendulum < -2 or > 2, then do nothing.
31498
31499 Note: This code covers the most common scenarios. There exist non
31500 load/store instructions which make use of the LSU and which
31501 would need to be accounted for to strictly model the behavior
31502 of the machine. Those instructions are currently unaccounted
31503 for to help minimize compile time overhead of this code.
31504 */
31505 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31506 {
31507 int pos;
31508 int i;
31509 rtx_insn *tmp;
31510 rtx load_mem, str_mem;
31511
31512 if (is_store_insn (last_scheduled_insn, &str_mem))
31513 /* Issuing a store, swing the load_store_pendulum to the left */
31514 load_store_pendulum--;
31515 else if (is_load_insn (last_scheduled_insn, &load_mem))
31516 /* Issuing a load, swing the load_store_pendulum to the right */
31517 load_store_pendulum++;
31518 else
31519 return cached_can_issue_more;
31520
31521 /* If the pendulum is balanced, or there is only one instruction on
31522 the ready list, then all is well, so return. */
31523 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31524 return cached_can_issue_more;
31525
31526 if (load_store_pendulum == 1)
31527 {
31528 /* A load has been issued in this cycle. Scan the ready list
31529 for another load to issue with it */
31530 pos = *pn_ready-1;
31531
31532 while (pos >= 0)
31533 {
31534 if (is_load_insn (ready[pos], &load_mem))
31535 {
31536 /* Found a load. Move it to the head of the ready list,
31537 and adjust it's priority so that it is more likely to
31538 stay there */
31539 tmp = ready[pos];
31540 for (i=pos; i<*pn_ready-1; i++)
31541 ready[i] = ready[i + 1];
31542 ready[*pn_ready-1] = tmp;
31543
31544 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31545 INSN_PRIORITY (tmp)++;
31546 break;
31547 }
31548 pos--;
31549 }
31550 }
31551 else if (load_store_pendulum == -2)
31552 {
31553 /* Two stores have been issued in this cycle. Increase the
31554 priority of the first load in the ready list to favor it for
31555 issuing in the next cycle. */
31556 pos = *pn_ready-1;
31557
31558 while (pos >= 0)
31559 {
31560 if (is_load_insn (ready[pos], &load_mem)
31561 && !sel_sched_p ()
31562 && INSN_PRIORITY_KNOWN (ready[pos]))
31563 {
31564 INSN_PRIORITY (ready[pos])++;
31565
31566 /* Adjust the pendulum to account for the fact that a load
31567 was found and increased in priority. This is to prevent
31568 increasing the priority of multiple loads */
31569 load_store_pendulum--;
31570
31571 break;
31572 }
31573 pos--;
31574 }
31575 }
31576 else if (load_store_pendulum == -1)
31577 {
31578 /* A store has been issued in this cycle. Scan the ready list for
31579 another store to issue with it, preferring a store to an adjacent
31580 memory location */
31581 int first_store_pos = -1;
31582
31583 pos = *pn_ready-1;
31584
31585 while (pos >= 0)
31586 {
31587 if (is_store_insn (ready[pos], &str_mem))
31588 {
31589 rtx str_mem2;
31590 /* Maintain the index of the first store found on the
31591 list */
31592 if (first_store_pos == -1)
31593 first_store_pos = pos;
31594
31595 if (is_store_insn (last_scheduled_insn, &str_mem2)
31596 && adjacent_mem_locations (str_mem, str_mem2))
31597 {
31598 /* Found an adjacent store. Move it to the head of the
31599 ready list, and adjust it's priority so that it is
31600 more likely to stay there */
31601 tmp = ready[pos];
31602 for (i=pos; i<*pn_ready-1; i++)
31603 ready[i] = ready[i + 1];
31604 ready[*pn_ready-1] = tmp;
31605
31606 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31607 INSN_PRIORITY (tmp)++;
31608
31609 first_store_pos = -1;
31610
31611 break;
31612 };
31613 }
31614 pos--;
31615 }
31616
31617 if (first_store_pos >= 0)
31618 {
31619 /* An adjacent store wasn't found, but a non-adjacent store was,
31620 so move the non-adjacent store to the front of the ready
31621 list, and adjust its priority so that it is more likely to
31622 stay there. */
31623 tmp = ready[first_store_pos];
31624 for (i=first_store_pos; i<*pn_ready-1; i++)
31625 ready[i] = ready[i + 1];
31626 ready[*pn_ready-1] = tmp;
31627 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31628 INSN_PRIORITY (tmp)++;
31629 }
31630 }
31631 else if (load_store_pendulum == 2)
31632 {
31633 /* Two loads have been issued in this cycle. Increase the priority
31634 of the first store in the ready list to favor it for issuing in
31635 the next cycle. */
31636 pos = *pn_ready-1;
31637
31638 while (pos >= 0)
31639 {
31640 if (is_store_insn (ready[pos], &str_mem)
31641 && !sel_sched_p ()
31642 && INSN_PRIORITY_KNOWN (ready[pos]))
31643 {
31644 INSN_PRIORITY (ready[pos])++;
31645
31646 /* Adjust the pendulum to account for the fact that a store
31647 was found and increased in priority. This is to prevent
31648 increasing the priority of multiple stores */
31649 load_store_pendulum++;
31650
31651 break;
31652 }
31653 pos--;
31654 }
31655 }
31656 }
31657
31658 /* Do Power9 dependent reordering if necessary. */
31659 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31660 && recog_memoized (last_scheduled_insn) >= 0)
31661 return power9_sched_reorder2 (ready, *pn_ready - 1);
31662
31663 return cached_can_issue_more;
31664 }
31665
31666 /* Return whether the presence of INSN causes a dispatch group termination
31667 of group WHICH_GROUP.
31668
31669 If WHICH_GROUP == current_group, this function will return true if INSN
31670 causes the termination of the current group (i.e, the dispatch group to
31671 which INSN belongs). This means that INSN will be the last insn in the
31672 group it belongs to.
31673
31674 If WHICH_GROUP == previous_group, this function will return true if INSN
31675 causes the termination of the previous group (i.e, the dispatch group that
31676 precedes the group to which INSN belongs). This means that INSN will be
31677 the first insn in the group it belongs to). */
31678
31679 static bool
31680 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31681 {
31682 bool first, last;
31683
31684 if (! insn)
31685 return false;
31686
31687 first = insn_must_be_first_in_group (insn);
31688 last = insn_must_be_last_in_group (insn);
31689
31690 if (first && last)
31691 return true;
31692
31693 if (which_group == current_group)
31694 return last;
31695 else if (which_group == previous_group)
31696 return first;
31697
31698 return false;
31699 }
31700
31701
31702 static bool
31703 insn_must_be_first_in_group (rtx_insn *insn)
31704 {
31705 enum attr_type type;
31706
31707 if (!insn
31708 || NOTE_P (insn)
31709 || DEBUG_INSN_P (insn)
31710 || GET_CODE (PATTERN (insn)) == USE
31711 || GET_CODE (PATTERN (insn)) == CLOBBER)
31712 return false;
31713
31714 switch (rs6000_tune)
31715 {
31716 case PROCESSOR_POWER5:
31717 if (is_cracked_insn (insn))
31718 return true;
31719 /* FALLTHRU */
31720 case PROCESSOR_POWER4:
31721 if (is_microcoded_insn (insn))
31722 return true;
31723
31724 if (!rs6000_sched_groups)
31725 return false;
31726
31727 type = get_attr_type (insn);
31728
31729 switch (type)
31730 {
31731 case TYPE_MFCR:
31732 case TYPE_MFCRF:
31733 case TYPE_MTCR:
31734 case TYPE_CR_LOGICAL:
31735 case TYPE_MTJMPR:
31736 case TYPE_MFJMPR:
31737 case TYPE_DIV:
31738 case TYPE_LOAD_L:
31739 case TYPE_STORE_C:
31740 case TYPE_ISYNC:
31741 case TYPE_SYNC:
31742 return true;
31743 default:
31744 break;
31745 }
31746 break;
31747 case PROCESSOR_POWER6:
31748 type = get_attr_type (insn);
31749
31750 switch (type)
31751 {
31752 case TYPE_EXTS:
31753 case TYPE_CNTLZ:
31754 case TYPE_TRAP:
31755 case TYPE_MUL:
31756 case TYPE_INSERT:
31757 case TYPE_FPCOMPARE:
31758 case TYPE_MFCR:
31759 case TYPE_MTCR:
31760 case TYPE_MFJMPR:
31761 case TYPE_MTJMPR:
31762 case TYPE_ISYNC:
31763 case TYPE_SYNC:
31764 case TYPE_LOAD_L:
31765 case TYPE_STORE_C:
31766 return true;
31767 case TYPE_SHIFT:
31768 if (get_attr_dot (insn) == DOT_NO
31769 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31770 return true;
31771 else
31772 break;
31773 case TYPE_DIV:
31774 if (get_attr_size (insn) == SIZE_32)
31775 return true;
31776 else
31777 break;
31778 case TYPE_LOAD:
31779 case TYPE_STORE:
31780 case TYPE_FPLOAD:
31781 case TYPE_FPSTORE:
31782 if (get_attr_update (insn) == UPDATE_YES)
31783 return true;
31784 else
31785 break;
31786 default:
31787 break;
31788 }
31789 break;
31790 case PROCESSOR_POWER7:
31791 type = get_attr_type (insn);
31792
31793 switch (type)
31794 {
31795 case TYPE_CR_LOGICAL:
31796 case TYPE_MFCR:
31797 case TYPE_MFCRF:
31798 case TYPE_MTCR:
31799 case TYPE_DIV:
31800 case TYPE_ISYNC:
31801 case TYPE_LOAD_L:
31802 case TYPE_STORE_C:
31803 case TYPE_MFJMPR:
31804 case TYPE_MTJMPR:
31805 return true;
31806 case TYPE_MUL:
31807 case TYPE_SHIFT:
31808 case TYPE_EXTS:
31809 if (get_attr_dot (insn) == DOT_YES)
31810 return true;
31811 else
31812 break;
31813 case TYPE_LOAD:
31814 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31815 || get_attr_update (insn) == UPDATE_YES)
31816 return true;
31817 else
31818 break;
31819 case TYPE_STORE:
31820 case TYPE_FPLOAD:
31821 case TYPE_FPSTORE:
31822 if (get_attr_update (insn) == UPDATE_YES)
31823 return true;
31824 else
31825 break;
31826 default:
31827 break;
31828 }
31829 break;
31830 case PROCESSOR_POWER8:
31831 type = get_attr_type (insn);
31832
31833 switch (type)
31834 {
31835 case TYPE_CR_LOGICAL:
31836 case TYPE_MFCR:
31837 case TYPE_MFCRF:
31838 case TYPE_MTCR:
31839 case TYPE_SYNC:
31840 case TYPE_ISYNC:
31841 case TYPE_LOAD_L:
31842 case TYPE_STORE_C:
31843 case TYPE_VECSTORE:
31844 case TYPE_MFJMPR:
31845 case TYPE_MTJMPR:
31846 return true;
31847 case TYPE_SHIFT:
31848 case TYPE_EXTS:
31849 case TYPE_MUL:
31850 if (get_attr_dot (insn) == DOT_YES)
31851 return true;
31852 else
31853 break;
31854 case TYPE_LOAD:
31855 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31856 || get_attr_update (insn) == UPDATE_YES)
31857 return true;
31858 else
31859 break;
31860 case TYPE_STORE:
31861 if (get_attr_update (insn) == UPDATE_YES
31862 && get_attr_indexed (insn) == INDEXED_YES)
31863 return true;
31864 else
31865 break;
31866 default:
31867 break;
31868 }
31869 break;
31870 default:
31871 break;
31872 }
31873
31874 return false;
31875 }
31876
31877 static bool
31878 insn_must_be_last_in_group (rtx_insn *insn)
31879 {
31880 enum attr_type type;
31881
31882 if (!insn
31883 || NOTE_P (insn)
31884 || DEBUG_INSN_P (insn)
31885 || GET_CODE (PATTERN (insn)) == USE
31886 || GET_CODE (PATTERN (insn)) == CLOBBER)
31887 return false;
31888
31889 switch (rs6000_tune) {
31890 case PROCESSOR_POWER4:
31891 case PROCESSOR_POWER5:
31892 if (is_microcoded_insn (insn))
31893 return true;
31894
31895 if (is_branch_slot_insn (insn))
31896 return true;
31897
31898 break;
31899 case PROCESSOR_POWER6:
31900 type = get_attr_type (insn);
31901
31902 switch (type)
31903 {
31904 case TYPE_EXTS:
31905 case TYPE_CNTLZ:
31906 case TYPE_TRAP:
31907 case TYPE_MUL:
31908 case TYPE_FPCOMPARE:
31909 case TYPE_MFCR:
31910 case TYPE_MTCR:
31911 case TYPE_MFJMPR:
31912 case TYPE_MTJMPR:
31913 case TYPE_ISYNC:
31914 case TYPE_SYNC:
31915 case TYPE_LOAD_L:
31916 case TYPE_STORE_C:
31917 return true;
31918 case TYPE_SHIFT:
31919 if (get_attr_dot (insn) == DOT_NO
31920 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31921 return true;
31922 else
31923 break;
31924 case TYPE_DIV:
31925 if (get_attr_size (insn) == SIZE_32)
31926 return true;
31927 else
31928 break;
31929 default:
31930 break;
31931 }
31932 break;
31933 case PROCESSOR_POWER7:
31934 type = get_attr_type (insn);
31935
31936 switch (type)
31937 {
31938 case TYPE_ISYNC:
31939 case TYPE_SYNC:
31940 case TYPE_LOAD_L:
31941 case TYPE_STORE_C:
31942 return true;
31943 case TYPE_LOAD:
31944 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31945 && get_attr_update (insn) == UPDATE_YES)
31946 return true;
31947 else
31948 break;
31949 case TYPE_STORE:
31950 if (get_attr_update (insn) == UPDATE_YES
31951 && get_attr_indexed (insn) == INDEXED_YES)
31952 return true;
31953 else
31954 break;
31955 default:
31956 break;
31957 }
31958 break;
31959 case PROCESSOR_POWER8:
31960 type = get_attr_type (insn);
31961
31962 switch (type)
31963 {
31964 case TYPE_MFCR:
31965 case TYPE_MTCR:
31966 case TYPE_ISYNC:
31967 case TYPE_SYNC:
31968 case TYPE_LOAD_L:
31969 case TYPE_STORE_C:
31970 return true;
31971 case TYPE_LOAD:
31972 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31973 && get_attr_update (insn) == UPDATE_YES)
31974 return true;
31975 else
31976 break;
31977 case TYPE_STORE:
31978 if (get_attr_update (insn) == UPDATE_YES
31979 && get_attr_indexed (insn) == INDEXED_YES)
31980 return true;
31981 else
31982 break;
31983 default:
31984 break;
31985 }
31986 break;
31987 default:
31988 break;
31989 }
31990
31991 return false;
31992 }
31993
31994 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31995 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31996
31997 static bool
31998 is_costly_group (rtx *group_insns, rtx next_insn)
31999 {
32000 int i;
32001 int issue_rate = rs6000_issue_rate ();
32002
32003 for (i = 0; i < issue_rate; i++)
32004 {
32005 sd_iterator_def sd_it;
32006 dep_t dep;
32007 rtx insn = group_insns[i];
32008
32009 if (!insn)
32010 continue;
32011
32012 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32013 {
32014 rtx next = DEP_CON (dep);
32015
32016 if (next == next_insn
32017 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32018 return true;
32019 }
32020 }
32021
32022 return false;
32023 }
32024
32025 /* Utility of the function redefine_groups.
32026 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32027 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32028 to keep it "far" (in a separate group) from GROUP_INSNS, following
32029 one of the following schemes, depending on the value of the flag
32030 -minsert_sched_nops = X:
32031 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32032 in order to force NEXT_INSN into a separate group.
32033 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32034 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32035 insertion (has a group just ended, how many vacant issue slots remain in the
32036 last group, and how many dispatch groups were encountered so far). */
32037
32038 static int
32039 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32040 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32041 int *group_count)
32042 {
32043 rtx nop;
32044 bool force;
32045 int issue_rate = rs6000_issue_rate ();
32046 bool end = *group_end;
32047 int i;
32048
32049 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32050 return can_issue_more;
32051
32052 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32053 return can_issue_more;
32054
32055 force = is_costly_group (group_insns, next_insn);
32056 if (!force)
32057 return can_issue_more;
32058
32059 if (sched_verbose > 6)
32060 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32061 *group_count ,can_issue_more);
32062
32063 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32064 {
32065 if (*group_end)
32066 can_issue_more = 0;
32067
32068 /* Since only a branch can be issued in the last issue_slot, it is
32069 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32070 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32071 in this case the last nop will start a new group and the branch
32072 will be forced to the new group. */
32073 if (can_issue_more && !is_branch_slot_insn (next_insn))
32074 can_issue_more--;
32075
32076 /* Do we have a special group ending nop? */
32077 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32078 || rs6000_tune == PROCESSOR_POWER8)
32079 {
32080 nop = gen_group_ending_nop ();
32081 emit_insn_before (nop, next_insn);
32082 can_issue_more = 0;
32083 }
32084 else
32085 while (can_issue_more > 0)
32086 {
32087 nop = gen_nop ();
32088 emit_insn_before (nop, next_insn);
32089 can_issue_more--;
32090 }
32091
32092 *group_end = true;
32093 return 0;
32094 }
32095
32096 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32097 {
32098 int n_nops = rs6000_sched_insert_nops;
32099
32100 /* Nops can't be issued from the branch slot, so the effective
32101 issue_rate for nops is 'issue_rate - 1'. */
32102 if (can_issue_more == 0)
32103 can_issue_more = issue_rate;
32104 can_issue_more--;
32105 if (can_issue_more == 0)
32106 {
32107 can_issue_more = issue_rate - 1;
32108 (*group_count)++;
32109 end = true;
32110 for (i = 0; i < issue_rate; i++)
32111 {
32112 group_insns[i] = 0;
32113 }
32114 }
32115
32116 while (n_nops > 0)
32117 {
32118 nop = gen_nop ();
32119 emit_insn_before (nop, next_insn);
32120 if (can_issue_more == issue_rate - 1) /* new group begins */
32121 end = false;
32122 can_issue_more--;
32123 if (can_issue_more == 0)
32124 {
32125 can_issue_more = issue_rate - 1;
32126 (*group_count)++;
32127 end = true;
32128 for (i = 0; i < issue_rate; i++)
32129 {
32130 group_insns[i] = 0;
32131 }
32132 }
32133 n_nops--;
32134 }
32135
32136 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32137 can_issue_more++;
32138
32139 /* Is next_insn going to start a new group? */
32140 *group_end
32141 = (end
32142 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32143 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32144 || (can_issue_more < issue_rate &&
32145 insn_terminates_group_p (next_insn, previous_group)));
32146 if (*group_end && end)
32147 (*group_count)--;
32148
32149 if (sched_verbose > 6)
32150 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32151 *group_count, can_issue_more);
32152 return can_issue_more;
32153 }
32154
32155 return can_issue_more;
32156 }
32157
32158 /* This function tries to synch the dispatch groups that the compiler "sees"
32159 with the dispatch groups that the processor dispatcher is expected to
32160 form in practice. It tries to achieve this synchronization by forcing the
32161 estimated processor grouping on the compiler (as opposed to the function
32162 'pad_goups' which tries to force the scheduler's grouping on the processor).
32163
32164 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32165 examines the (estimated) dispatch groups that will be formed by the processor
32166 dispatcher. It marks these group boundaries to reflect the estimated
32167 processor grouping, overriding the grouping that the scheduler had marked.
32168 Depending on the value of the flag '-minsert-sched-nops' this function can
32169 force certain insns into separate groups or force a certain distance between
32170 them by inserting nops, for example, if there exists a "costly dependence"
32171 between the insns.
32172
32173 The function estimates the group boundaries that the processor will form as
32174 follows: It keeps track of how many vacant issue slots are available after
32175 each insn. A subsequent insn will start a new group if one of the following
32176 4 cases applies:
32177 - no more vacant issue slots remain in the current dispatch group.
32178 - only the last issue slot, which is the branch slot, is vacant, but the next
32179 insn is not a branch.
32180 - only the last 2 or less issue slots, including the branch slot, are vacant,
32181 which means that a cracked insn (which occupies two issue slots) can't be
32182 issued in this group.
32183 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32184 start a new group. */
32185
32186 static int
32187 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32188 rtx_insn *tail)
32189 {
32190 rtx_insn *insn, *next_insn;
32191 int issue_rate;
32192 int can_issue_more;
32193 int slot, i;
32194 bool group_end;
32195 int group_count = 0;
32196 rtx *group_insns;
32197
32198 /* Initialize. */
32199 issue_rate = rs6000_issue_rate ();
32200 group_insns = XALLOCAVEC (rtx, issue_rate);
32201 for (i = 0; i < issue_rate; i++)
32202 {
32203 group_insns[i] = 0;
32204 }
32205 can_issue_more = issue_rate;
32206 slot = 0;
32207 insn = get_next_active_insn (prev_head_insn, tail);
32208 group_end = false;
32209
32210 while (insn != NULL_RTX)
32211 {
32212 slot = (issue_rate - can_issue_more);
32213 group_insns[slot] = insn;
32214 can_issue_more =
32215 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32216 if (insn_terminates_group_p (insn, current_group))
32217 can_issue_more = 0;
32218
32219 next_insn = get_next_active_insn (insn, tail);
32220 if (next_insn == NULL_RTX)
32221 return group_count + 1;
32222
32223 /* Is next_insn going to start a new group? */
32224 group_end
32225 = (can_issue_more == 0
32226 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32227 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32228 || (can_issue_more < issue_rate &&
32229 insn_terminates_group_p (next_insn, previous_group)));
32230
32231 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32232 next_insn, &group_end, can_issue_more,
32233 &group_count);
32234
32235 if (group_end)
32236 {
32237 group_count++;
32238 can_issue_more = 0;
32239 for (i = 0; i < issue_rate; i++)
32240 {
32241 group_insns[i] = 0;
32242 }
32243 }
32244
32245 if (GET_MODE (next_insn) == TImode && can_issue_more)
32246 PUT_MODE (next_insn, VOIDmode);
32247 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32248 PUT_MODE (next_insn, TImode);
32249
32250 insn = next_insn;
32251 if (can_issue_more == 0)
32252 can_issue_more = issue_rate;
32253 } /* while */
32254
32255 return group_count;
32256 }
32257
32258 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32259 dispatch group boundaries that the scheduler had marked. Pad with nops
32260 any dispatch groups which have vacant issue slots, in order to force the
32261 scheduler's grouping on the processor dispatcher. The function
32262 returns the number of dispatch groups found. */
32263
32264 static int
32265 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32266 rtx_insn *tail)
32267 {
32268 rtx_insn *insn, *next_insn;
32269 rtx nop;
32270 int issue_rate;
32271 int can_issue_more;
32272 int group_end;
32273 int group_count = 0;
32274
32275 /* Initialize issue_rate. */
32276 issue_rate = rs6000_issue_rate ();
32277 can_issue_more = issue_rate;
32278
32279 insn = get_next_active_insn (prev_head_insn, tail);
32280 next_insn = get_next_active_insn (insn, tail);
32281
32282 while (insn != NULL_RTX)
32283 {
32284 can_issue_more =
32285 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32286
32287 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32288
32289 if (next_insn == NULL_RTX)
32290 break;
32291
32292 if (group_end)
32293 {
32294 /* If the scheduler had marked group termination at this location
32295 (between insn and next_insn), and neither insn nor next_insn will
32296 force group termination, pad the group with nops to force group
32297 termination. */
32298 if (can_issue_more
32299 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32300 && !insn_terminates_group_p (insn, current_group)
32301 && !insn_terminates_group_p (next_insn, previous_group))
32302 {
32303 if (!is_branch_slot_insn (next_insn))
32304 can_issue_more--;
32305
32306 while (can_issue_more)
32307 {
32308 nop = gen_nop ();
32309 emit_insn_before (nop, next_insn);
32310 can_issue_more--;
32311 }
32312 }
32313
32314 can_issue_more = issue_rate;
32315 group_count++;
32316 }
32317
32318 insn = next_insn;
32319 next_insn = get_next_active_insn (insn, tail);
32320 }
32321
32322 return group_count;
32323 }
32324
32325 /* We're beginning a new block. Initialize data structures as necessary. */
32326
32327 static void
32328 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32329 int sched_verbose ATTRIBUTE_UNUSED,
32330 int max_ready ATTRIBUTE_UNUSED)
32331 {
32332 last_scheduled_insn = NULL;
32333 load_store_pendulum = 0;
32334 divide_cnt = 0;
32335 vec_pairing = 0;
32336 }
32337
32338 /* The following function is called at the end of scheduling BB.
32339 After reload, it inserts nops at insn group bundling. */
32340
32341 static void
32342 rs6000_sched_finish (FILE *dump, int sched_verbose)
32343 {
32344 int n_groups;
32345
32346 if (sched_verbose)
32347 fprintf (dump, "=== Finishing schedule.\n");
32348
32349 if (reload_completed && rs6000_sched_groups)
32350 {
32351 /* Do not run sched_finish hook when selective scheduling enabled. */
32352 if (sel_sched_p ())
32353 return;
32354
32355 if (rs6000_sched_insert_nops == sched_finish_none)
32356 return;
32357
32358 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32359 n_groups = pad_groups (dump, sched_verbose,
32360 current_sched_info->prev_head,
32361 current_sched_info->next_tail);
32362 else
32363 n_groups = redefine_groups (dump, sched_verbose,
32364 current_sched_info->prev_head,
32365 current_sched_info->next_tail);
32366
32367 if (sched_verbose >= 6)
32368 {
32369 fprintf (dump, "ngroups = %d\n", n_groups);
32370 print_rtl (dump, current_sched_info->prev_head);
32371 fprintf (dump, "Done finish_sched\n");
32372 }
32373 }
32374 }
32375
32376 struct rs6000_sched_context
32377 {
32378 short cached_can_issue_more;
32379 rtx_insn *last_scheduled_insn;
32380 int load_store_pendulum;
32381 int divide_cnt;
32382 int vec_pairing;
32383 };
32384
32385 typedef struct rs6000_sched_context rs6000_sched_context_def;
32386 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32387
32388 /* Allocate store for new scheduling context. */
32389 static void *
32390 rs6000_alloc_sched_context (void)
32391 {
32392 return xmalloc (sizeof (rs6000_sched_context_def));
32393 }
32394
32395 /* If CLEAN_P is true then initializes _SC with clean data,
32396 and from the global context otherwise. */
32397 static void
32398 rs6000_init_sched_context (void *_sc, bool clean_p)
32399 {
32400 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32401
32402 if (clean_p)
32403 {
32404 sc->cached_can_issue_more = 0;
32405 sc->last_scheduled_insn = NULL;
32406 sc->load_store_pendulum = 0;
32407 sc->divide_cnt = 0;
32408 sc->vec_pairing = 0;
32409 }
32410 else
32411 {
32412 sc->cached_can_issue_more = cached_can_issue_more;
32413 sc->last_scheduled_insn = last_scheduled_insn;
32414 sc->load_store_pendulum = load_store_pendulum;
32415 sc->divide_cnt = divide_cnt;
32416 sc->vec_pairing = vec_pairing;
32417 }
32418 }
32419
32420 /* Sets the global scheduling context to the one pointed to by _SC. */
32421 static void
32422 rs6000_set_sched_context (void *_sc)
32423 {
32424 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32425
32426 gcc_assert (sc != NULL);
32427
32428 cached_can_issue_more = sc->cached_can_issue_more;
32429 last_scheduled_insn = sc->last_scheduled_insn;
32430 load_store_pendulum = sc->load_store_pendulum;
32431 divide_cnt = sc->divide_cnt;
32432 vec_pairing = sc->vec_pairing;
32433 }
32434
32435 /* Free _SC. */
32436 static void
32437 rs6000_free_sched_context (void *_sc)
32438 {
32439 gcc_assert (_sc != NULL);
32440
32441 free (_sc);
32442 }
32443
32444 static bool
32445 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32446 {
32447 switch (get_attr_type (insn))
32448 {
32449 case TYPE_DIV:
32450 case TYPE_SDIV:
32451 case TYPE_DDIV:
32452 case TYPE_VECDIV:
32453 case TYPE_SSQRT:
32454 case TYPE_DSQRT:
32455 return false;
32456
32457 default:
32458 return true;
32459 }
32460 }
32461 \f
32462 /* Length in units of the trampoline for entering a nested function. */
32463
32464 int
32465 rs6000_trampoline_size (void)
32466 {
32467 int ret = 0;
32468
32469 switch (DEFAULT_ABI)
32470 {
32471 default:
32472 gcc_unreachable ();
32473
32474 case ABI_AIX:
32475 ret = (TARGET_32BIT) ? 12 : 24;
32476 break;
32477
32478 case ABI_ELFv2:
32479 gcc_assert (!TARGET_32BIT);
32480 ret = 32;
32481 break;
32482
32483 case ABI_DARWIN:
32484 case ABI_V4:
32485 ret = (TARGET_32BIT) ? 40 : 48;
32486 break;
32487 }
32488
32489 return ret;
32490 }
32491
32492 /* Emit RTL insns to initialize the variable parts of a trampoline.
32493 FNADDR is an RTX for the address of the function's pure code.
32494 CXT is an RTX for the static chain value for the function. */
32495
32496 static void
32497 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32498 {
32499 int regsize = (TARGET_32BIT) ? 4 : 8;
32500 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32501 rtx ctx_reg = force_reg (Pmode, cxt);
32502 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32503
32504 switch (DEFAULT_ABI)
32505 {
32506 default:
32507 gcc_unreachable ();
32508
32509 /* Under AIX, just build the 3 word function descriptor */
32510 case ABI_AIX:
32511 {
32512 rtx fnmem, fn_reg, toc_reg;
32513
32514 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32515 error ("you cannot take the address of a nested function if you use "
32516 "the %qs option", "-mno-pointers-to-nested-functions");
32517
32518 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32519 fn_reg = gen_reg_rtx (Pmode);
32520 toc_reg = gen_reg_rtx (Pmode);
32521
32522 /* Macro to shorten the code expansions below. */
32523 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32524
32525 m_tramp = replace_equiv_address (m_tramp, addr);
32526
32527 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32528 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32529 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32530 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32531 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32532
32533 # undef MEM_PLUS
32534 }
32535 break;
32536
32537 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32538 case ABI_ELFv2:
32539 case ABI_DARWIN:
32540 case ABI_V4:
32541 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32542 LCT_NORMAL, VOIDmode,
32543 addr, Pmode,
32544 GEN_INT (rs6000_trampoline_size ()), SImode,
32545 fnaddr, Pmode,
32546 ctx_reg, Pmode);
32547 break;
32548 }
32549 }
32550
32551 \f
32552 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32553 identifier as an argument, so the front end shouldn't look it up. */
32554
32555 static bool
32556 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32557 {
32558 return is_attribute_p ("altivec", attr_id);
32559 }
32560
32561 /* Handle the "altivec" attribute. The attribute may have
32562 arguments as follows:
32563
32564 __attribute__((altivec(vector__)))
32565 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32566 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32567
32568 and may appear more than once (e.g., 'vector bool char') in a
32569 given declaration. */
32570
32571 static tree
32572 rs6000_handle_altivec_attribute (tree *node,
32573 tree name ATTRIBUTE_UNUSED,
32574 tree args,
32575 int flags ATTRIBUTE_UNUSED,
32576 bool *no_add_attrs)
32577 {
32578 tree type = *node, result = NULL_TREE;
32579 machine_mode mode;
32580 int unsigned_p;
32581 char altivec_type
32582 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32583 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32584 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32585 : '?');
32586
32587 while (POINTER_TYPE_P (type)
32588 || TREE_CODE (type) == FUNCTION_TYPE
32589 || TREE_CODE (type) == METHOD_TYPE
32590 || TREE_CODE (type) == ARRAY_TYPE)
32591 type = TREE_TYPE (type);
32592
32593 mode = TYPE_MODE (type);
32594
32595 /* Check for invalid AltiVec type qualifiers. */
32596 if (type == long_double_type_node)
32597 error ("use of %<long double%> in AltiVec types is invalid");
32598 else if (type == boolean_type_node)
32599 error ("use of boolean types in AltiVec types is invalid");
32600 else if (TREE_CODE (type) == COMPLEX_TYPE)
32601 error ("use of %<complex%> in AltiVec types is invalid");
32602 else if (DECIMAL_FLOAT_MODE_P (mode))
32603 error ("use of decimal floating point types in AltiVec types is invalid");
32604 else if (!TARGET_VSX)
32605 {
32606 if (type == long_unsigned_type_node || type == long_integer_type_node)
32607 {
32608 if (TARGET_64BIT)
32609 error ("use of %<long%> in AltiVec types is invalid for "
32610 "64-bit code without %qs", "-mvsx");
32611 else if (rs6000_warn_altivec_long)
32612 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32613 "use %<int%>");
32614 }
32615 else if (type == long_long_unsigned_type_node
32616 || type == long_long_integer_type_node)
32617 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32618 "-mvsx");
32619 else if (type == double_type_node)
32620 error ("use of %<double%> in AltiVec types is invalid without %qs",
32621 "-mvsx");
32622 }
32623
32624 switch (altivec_type)
32625 {
32626 case 'v':
32627 unsigned_p = TYPE_UNSIGNED (type);
32628 switch (mode)
32629 {
32630 case E_TImode:
32631 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32632 break;
32633 case E_DImode:
32634 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32635 break;
32636 case E_SImode:
32637 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32638 break;
32639 case E_HImode:
32640 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32641 break;
32642 case E_QImode:
32643 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32644 break;
32645 case E_SFmode: result = V4SF_type_node; break;
32646 case E_DFmode: result = V2DF_type_node; break;
32647 /* If the user says 'vector int bool', we may be handed the 'bool'
32648 attribute _before_ the 'vector' attribute, and so select the
32649 proper type in the 'b' case below. */
32650 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32651 case E_V2DImode: case E_V2DFmode:
32652 result = type;
32653 default: break;
32654 }
32655 break;
32656 case 'b':
32657 switch (mode)
32658 {
32659 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32660 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32661 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32662 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32663 default: break;
32664 }
32665 break;
32666 case 'p':
32667 switch (mode)
32668 {
32669 case E_V8HImode: result = pixel_V8HI_type_node;
32670 default: break;
32671 }
32672 default: break;
32673 }
32674
32675 /* Propagate qualifiers attached to the element type
32676 onto the vector type. */
32677 if (result && result != type && TYPE_QUALS (type))
32678 result = build_qualified_type (result, TYPE_QUALS (type));
32679
32680 *no_add_attrs = true; /* No need to hang on to the attribute. */
32681
32682 if (result)
32683 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32684
32685 return NULL_TREE;
32686 }
32687
32688 /* AltiVec defines five built-in scalar types that serve as vector
32689 elements; we must teach the compiler how to mangle them. The 128-bit
32690 floating point mangling is target-specific as well. */
32691
32692 static const char *
32693 rs6000_mangle_type (const_tree type)
32694 {
32695 type = TYPE_MAIN_VARIANT (type);
32696
32697 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32698 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32699 return NULL;
32700
32701 if (type == bool_char_type_node) return "U6__boolc";
32702 if (type == bool_short_type_node) return "U6__bools";
32703 if (type == pixel_type_node) return "u7__pixel";
32704 if (type == bool_int_type_node) return "U6__booli";
32705 if (type == bool_long_long_type_node) return "U6__boolx";
32706
32707 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32708 return "g";
32709 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32710 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32711
32712 /* For all other types, use the default mangling. */
32713 return NULL;
32714 }
32715
32716 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32717 struct attribute_spec.handler. */
32718
32719 static tree
32720 rs6000_handle_longcall_attribute (tree *node, tree name,
32721 tree args ATTRIBUTE_UNUSED,
32722 int flags ATTRIBUTE_UNUSED,
32723 bool *no_add_attrs)
32724 {
32725 if (TREE_CODE (*node) != FUNCTION_TYPE
32726 && TREE_CODE (*node) != FIELD_DECL
32727 && TREE_CODE (*node) != TYPE_DECL)
32728 {
32729 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32730 name);
32731 *no_add_attrs = true;
32732 }
32733
32734 return NULL_TREE;
32735 }
32736
32737 /* Set longcall attributes on all functions declared when
32738 rs6000_default_long_calls is true. */
32739 static void
32740 rs6000_set_default_type_attributes (tree type)
32741 {
32742 if (rs6000_default_long_calls
32743 && (TREE_CODE (type) == FUNCTION_TYPE
32744 || TREE_CODE (type) == METHOD_TYPE))
32745 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32746 NULL_TREE,
32747 TYPE_ATTRIBUTES (type));
32748
32749 #if TARGET_MACHO
32750 darwin_set_default_type_attributes (type);
32751 #endif
32752 }
32753
32754 /* Return a reference suitable for calling a function with the
32755 longcall attribute. */
32756
32757 static rtx
32758 rs6000_longcall_ref (rtx call_ref, rtx arg)
32759 {
32760 /* System V adds '.' to the internal name, so skip them. */
32761 const char *call_name = XSTR (call_ref, 0);
32762 if (*call_name == '.')
32763 {
32764 while (*call_name == '.')
32765 call_name++;
32766
32767 tree node = get_identifier (call_name);
32768 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32769 }
32770
32771 if (HAVE_AS_PLTSEQ
32772 && TARGET_TLS_MARKERS
32773 && (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4))
32774 {
32775 rtx base = const0_rtx;
32776 int regno;
32777 if (DEFAULT_ABI == ABI_ELFv2)
32778 {
32779 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32780 regno = 12;
32781 }
32782 else
32783 {
32784 if (flag_pic)
32785 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32786 regno = 11;
32787 }
32788 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32789 may be used by a function global entry point. For SysV4, r11
32790 is used by __glink_PLTresolve lazy resolver entry. */
32791 rtx reg = gen_rtx_REG (Pmode, regno);
32792 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32793 UNSPEC_PLT16_HA);
32794 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32795 UNSPEC_PLT16_LO);
32796 emit_insn (gen_rtx_SET (reg, hi));
32797 emit_insn (gen_rtx_SET (reg, lo));
32798 return reg;
32799 }
32800
32801 return force_reg (Pmode, call_ref);
32802 }
32803 \f
32804 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32805 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32806 #endif
32807
32808 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32809 struct attribute_spec.handler. */
32810 static tree
32811 rs6000_handle_struct_attribute (tree *node, tree name,
32812 tree args ATTRIBUTE_UNUSED,
32813 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32814 {
32815 tree *type = NULL;
32816 if (DECL_P (*node))
32817 {
32818 if (TREE_CODE (*node) == TYPE_DECL)
32819 type = &TREE_TYPE (*node);
32820 }
32821 else
32822 type = node;
32823
32824 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32825 || TREE_CODE (*type) == UNION_TYPE)))
32826 {
32827 warning (OPT_Wattributes, "%qE attribute ignored", name);
32828 *no_add_attrs = true;
32829 }
32830
32831 else if ((is_attribute_p ("ms_struct", name)
32832 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32833 || ((is_attribute_p ("gcc_struct", name)
32834 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32835 {
32836 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32837 name);
32838 *no_add_attrs = true;
32839 }
32840
32841 return NULL_TREE;
32842 }
32843
32844 static bool
32845 rs6000_ms_bitfield_layout_p (const_tree record_type)
32846 {
32847 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32848 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32849 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32850 }
32851 \f
32852 #ifdef USING_ELFOS_H
32853
32854 /* A get_unnamed_section callback, used for switching to toc_section. */
32855
32856 static void
32857 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32858 {
32859 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32860 && TARGET_MINIMAL_TOC)
32861 {
32862 if (!toc_initialized)
32863 {
32864 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32865 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32866 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32867 fprintf (asm_out_file, "\t.tc ");
32868 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32869 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32870 fprintf (asm_out_file, "\n");
32871
32872 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32873 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32874 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32875 fprintf (asm_out_file, " = .+32768\n");
32876 toc_initialized = 1;
32877 }
32878 else
32879 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32880 }
32881 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32882 {
32883 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32884 if (!toc_initialized)
32885 {
32886 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32887 toc_initialized = 1;
32888 }
32889 }
32890 else
32891 {
32892 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32893 if (!toc_initialized)
32894 {
32895 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32896 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32897 fprintf (asm_out_file, " = .+32768\n");
32898 toc_initialized = 1;
32899 }
32900 }
32901 }
32902
32903 /* Implement TARGET_ASM_INIT_SECTIONS. */
32904
32905 static void
32906 rs6000_elf_asm_init_sections (void)
32907 {
32908 toc_section
32909 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32910
32911 sdata2_section
32912 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32913 SDATA2_SECTION_ASM_OP);
32914 }
32915
32916 /* Implement TARGET_SELECT_RTX_SECTION. */
32917
32918 static section *
32919 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32920 unsigned HOST_WIDE_INT align)
32921 {
32922 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32923 return toc_section;
32924 else
32925 return default_elf_select_rtx_section (mode, x, align);
32926 }
32927 \f
32928 /* For a SYMBOL_REF, set generic flags and then perform some
32929 target-specific processing.
32930
32931 When the AIX ABI is requested on a non-AIX system, replace the
32932 function name with the real name (with a leading .) rather than the
32933 function descriptor name. This saves a lot of overriding code to
32934 read the prefixes. */
32935
32936 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32937 static void
32938 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32939 {
32940 default_encode_section_info (decl, rtl, first);
32941
32942 if (first
32943 && TREE_CODE (decl) == FUNCTION_DECL
32944 && !TARGET_AIX
32945 && DEFAULT_ABI == ABI_AIX)
32946 {
32947 rtx sym_ref = XEXP (rtl, 0);
32948 size_t len = strlen (XSTR (sym_ref, 0));
32949 char *str = XALLOCAVEC (char, len + 2);
32950 str[0] = '.';
32951 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32952 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32953 }
32954 }
32955
32956 static inline bool
32957 compare_section_name (const char *section, const char *templ)
32958 {
32959 int len;
32960
32961 len = strlen (templ);
32962 return (strncmp (section, templ, len) == 0
32963 && (section[len] == 0 || section[len] == '.'));
32964 }
32965
32966 bool
32967 rs6000_elf_in_small_data_p (const_tree decl)
32968 {
32969 if (rs6000_sdata == SDATA_NONE)
32970 return false;
32971
32972 /* We want to merge strings, so we never consider them small data. */
32973 if (TREE_CODE (decl) == STRING_CST)
32974 return false;
32975
32976 /* Functions are never in the small data area. */
32977 if (TREE_CODE (decl) == FUNCTION_DECL)
32978 return false;
32979
32980 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32981 {
32982 const char *section = DECL_SECTION_NAME (decl);
32983 if (compare_section_name (section, ".sdata")
32984 || compare_section_name (section, ".sdata2")
32985 || compare_section_name (section, ".gnu.linkonce.s")
32986 || compare_section_name (section, ".sbss")
32987 || compare_section_name (section, ".sbss2")
32988 || compare_section_name (section, ".gnu.linkonce.sb")
32989 || strcmp (section, ".PPC.EMB.sdata0") == 0
32990 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32991 return true;
32992 }
32993 else
32994 {
32995 /* If we are told not to put readonly data in sdata, then don't. */
32996 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32997 && !rs6000_readonly_in_sdata)
32998 return false;
32999
33000 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33001
33002 if (size > 0
33003 && size <= g_switch_value
33004 /* If it's not public, and we're not going to reference it there,
33005 there's no need to put it in the small data section. */
33006 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33007 return true;
33008 }
33009
33010 return false;
33011 }
33012
33013 #endif /* USING_ELFOS_H */
33014 \f
33015 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33016
33017 static bool
33018 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33019 {
33020 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33021 }
33022
33023 /* Do not place thread-local symbols refs in the object blocks. */
33024
33025 static bool
33026 rs6000_use_blocks_for_decl_p (const_tree decl)
33027 {
33028 return !DECL_THREAD_LOCAL_P (decl);
33029 }
33030 \f
33031 /* Return a REG that occurs in ADDR with coefficient 1.
33032 ADDR can be effectively incremented by incrementing REG.
33033
33034 r0 is special and we must not select it as an address
33035 register by this routine since our caller will try to
33036 increment the returned register via an "la" instruction. */
33037
33038 rtx
33039 find_addr_reg (rtx addr)
33040 {
33041 while (GET_CODE (addr) == PLUS)
33042 {
33043 if (GET_CODE (XEXP (addr, 0)) == REG
33044 && REGNO (XEXP (addr, 0)) != 0)
33045 addr = XEXP (addr, 0);
33046 else if (GET_CODE (XEXP (addr, 1)) == REG
33047 && REGNO (XEXP (addr, 1)) != 0)
33048 addr = XEXP (addr, 1);
33049 else if (CONSTANT_P (XEXP (addr, 0)))
33050 addr = XEXP (addr, 1);
33051 else if (CONSTANT_P (XEXP (addr, 1)))
33052 addr = XEXP (addr, 0);
33053 else
33054 gcc_unreachable ();
33055 }
33056 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
33057 return addr;
33058 }
33059
33060 void
33061 rs6000_fatal_bad_address (rtx op)
33062 {
33063 fatal_insn ("bad address", op);
33064 }
33065
33066 #if TARGET_MACHO
33067
33068 typedef struct branch_island_d {
33069 tree function_name;
33070 tree label_name;
33071 int line_number;
33072 } branch_island;
33073
33074
33075 static vec<branch_island, va_gc> *branch_islands;
33076
33077 /* Remember to generate a branch island for far calls to the given
33078 function. */
33079
33080 static void
33081 add_compiler_branch_island (tree label_name, tree function_name,
33082 int line_number)
33083 {
33084 branch_island bi = {function_name, label_name, line_number};
33085 vec_safe_push (branch_islands, bi);
33086 }
33087
33088 /* Generate far-jump branch islands for everything recorded in
33089 branch_islands. Invoked immediately after the last instruction of
33090 the epilogue has been emitted; the branch islands must be appended
33091 to, and contiguous with, the function body. Mach-O stubs are
33092 generated in machopic_output_stub(). */
33093
33094 static void
33095 macho_branch_islands (void)
33096 {
33097 char tmp_buf[512];
33098
33099 while (!vec_safe_is_empty (branch_islands))
33100 {
33101 branch_island *bi = &branch_islands->last ();
33102 const char *label = IDENTIFIER_POINTER (bi->label_name);
33103 const char *name = IDENTIFIER_POINTER (bi->function_name);
33104 char name_buf[512];
33105 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33106 if (name[0] == '*' || name[0] == '&')
33107 strcpy (name_buf, name+1);
33108 else
33109 {
33110 name_buf[0] = '_';
33111 strcpy (name_buf+1, name);
33112 }
33113 strcpy (tmp_buf, "\n");
33114 strcat (tmp_buf, label);
33115 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33116 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33117 dbxout_stabd (N_SLINE, bi->line_number);
33118 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33119 if (flag_pic)
33120 {
33121 if (TARGET_LINK_STACK)
33122 {
33123 char name[32];
33124 get_ppc476_thunk_name (name);
33125 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33126 strcat (tmp_buf, name);
33127 strcat (tmp_buf, "\n");
33128 strcat (tmp_buf, label);
33129 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33130 }
33131 else
33132 {
33133 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33134 strcat (tmp_buf, label);
33135 strcat (tmp_buf, "_pic\n");
33136 strcat (tmp_buf, label);
33137 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33138 }
33139
33140 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33141 strcat (tmp_buf, name_buf);
33142 strcat (tmp_buf, " - ");
33143 strcat (tmp_buf, label);
33144 strcat (tmp_buf, "_pic)\n");
33145
33146 strcat (tmp_buf, "\tmtlr r0\n");
33147
33148 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33149 strcat (tmp_buf, name_buf);
33150 strcat (tmp_buf, " - ");
33151 strcat (tmp_buf, label);
33152 strcat (tmp_buf, "_pic)\n");
33153
33154 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33155 }
33156 else
33157 {
33158 strcat (tmp_buf, ":\nlis r12,hi16(");
33159 strcat (tmp_buf, name_buf);
33160 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33161 strcat (tmp_buf, name_buf);
33162 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33163 }
33164 output_asm_insn (tmp_buf, 0);
33165 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33166 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33167 dbxout_stabd (N_SLINE, bi->line_number);
33168 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33169 branch_islands->pop ();
33170 }
33171 }
33172
33173 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33174 already there or not. */
33175
33176 static int
33177 no_previous_def (tree function_name)
33178 {
33179 branch_island *bi;
33180 unsigned ix;
33181
33182 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33183 if (function_name == bi->function_name)
33184 return 0;
33185 return 1;
33186 }
33187
33188 /* GET_PREV_LABEL gets the label name from the previous definition of
33189 the function. */
33190
33191 static tree
33192 get_prev_label (tree function_name)
33193 {
33194 branch_island *bi;
33195 unsigned ix;
33196
33197 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33198 if (function_name == bi->function_name)
33199 return bi->label_name;
33200 return NULL_TREE;
33201 }
33202
33203 /* Generate PIC and indirect symbol stubs. */
33204
33205 void
33206 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33207 {
33208 unsigned int length;
33209 char *symbol_name, *lazy_ptr_name;
33210 char *local_label_0;
33211 static int label = 0;
33212
33213 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33214 symb = (*targetm.strip_name_encoding) (symb);
33215
33216
33217 length = strlen (symb);
33218 symbol_name = XALLOCAVEC (char, length + 32);
33219 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33220
33221 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33222 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33223
33224 if (flag_pic == 2)
33225 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33226 else
33227 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33228
33229 if (flag_pic == 2)
33230 {
33231 fprintf (file, "\t.align 5\n");
33232
33233 fprintf (file, "%s:\n", stub);
33234 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33235
33236 label++;
33237 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33238 sprintf (local_label_0, "\"L%011d$spb\"", label);
33239
33240 fprintf (file, "\tmflr r0\n");
33241 if (TARGET_LINK_STACK)
33242 {
33243 char name[32];
33244 get_ppc476_thunk_name (name);
33245 fprintf (file, "\tbl %s\n", name);
33246 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33247 }
33248 else
33249 {
33250 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33251 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33252 }
33253 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33254 lazy_ptr_name, local_label_0);
33255 fprintf (file, "\tmtlr r0\n");
33256 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33257 (TARGET_64BIT ? "ldu" : "lwzu"),
33258 lazy_ptr_name, local_label_0);
33259 fprintf (file, "\tmtctr r12\n");
33260 fprintf (file, "\tbctr\n");
33261 }
33262 else
33263 {
33264 fprintf (file, "\t.align 4\n");
33265
33266 fprintf (file, "%s:\n", stub);
33267 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33268
33269 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33270 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33271 (TARGET_64BIT ? "ldu" : "lwzu"),
33272 lazy_ptr_name);
33273 fprintf (file, "\tmtctr r12\n");
33274 fprintf (file, "\tbctr\n");
33275 }
33276
33277 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33278 fprintf (file, "%s:\n", lazy_ptr_name);
33279 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33280 fprintf (file, "%sdyld_stub_binding_helper\n",
33281 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33282 }
33283
33284 /* Legitimize PIC addresses. If the address is already
33285 position-independent, we return ORIG. Newly generated
33286 position-independent addresses go into a reg. This is REG if non
33287 zero, otherwise we allocate register(s) as necessary. */
33288
33289 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33290
33291 rtx
33292 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33293 rtx reg)
33294 {
33295 rtx base, offset;
33296
33297 if (reg == NULL && !reload_completed)
33298 reg = gen_reg_rtx (Pmode);
33299
33300 if (GET_CODE (orig) == CONST)
33301 {
33302 rtx reg_temp;
33303
33304 if (GET_CODE (XEXP (orig, 0)) == PLUS
33305 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33306 return orig;
33307
33308 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33309
33310 /* Use a different reg for the intermediate value, as
33311 it will be marked UNCHANGING. */
33312 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33313 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33314 Pmode, reg_temp);
33315 offset =
33316 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33317 Pmode, reg);
33318
33319 if (GET_CODE (offset) == CONST_INT)
33320 {
33321 if (SMALL_INT (offset))
33322 return plus_constant (Pmode, base, INTVAL (offset));
33323 else if (!reload_completed)
33324 offset = force_reg (Pmode, offset);
33325 else
33326 {
33327 rtx mem = force_const_mem (Pmode, orig);
33328 return machopic_legitimize_pic_address (mem, Pmode, reg);
33329 }
33330 }
33331 return gen_rtx_PLUS (Pmode, base, offset);
33332 }
33333
33334 /* Fall back on generic machopic code. */
33335 return machopic_legitimize_pic_address (orig, mode, reg);
33336 }
33337
33338 /* Output a .machine directive for the Darwin assembler, and call
33339 the generic start_file routine. */
33340
33341 static void
33342 rs6000_darwin_file_start (void)
33343 {
33344 static const struct
33345 {
33346 const char *arg;
33347 const char *name;
33348 HOST_WIDE_INT if_set;
33349 } mapping[] = {
33350 { "ppc64", "ppc64", MASK_64BIT },
33351 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33352 { "power4", "ppc970", 0 },
33353 { "G5", "ppc970", 0 },
33354 { "7450", "ppc7450", 0 },
33355 { "7400", "ppc7400", MASK_ALTIVEC },
33356 { "G4", "ppc7400", 0 },
33357 { "750", "ppc750", 0 },
33358 { "740", "ppc750", 0 },
33359 { "G3", "ppc750", 0 },
33360 { "604e", "ppc604e", 0 },
33361 { "604", "ppc604", 0 },
33362 { "603e", "ppc603", 0 },
33363 { "603", "ppc603", 0 },
33364 { "601", "ppc601", 0 },
33365 { NULL, "ppc", 0 } };
33366 const char *cpu_id = "";
33367 size_t i;
33368
33369 rs6000_file_start ();
33370 darwin_file_start ();
33371
33372 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33373
33374 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33375 cpu_id = rs6000_default_cpu;
33376
33377 if (global_options_set.x_rs6000_cpu_index)
33378 cpu_id = processor_target_table[rs6000_cpu_index].name;
33379
33380 /* Look through the mapping array. Pick the first name that either
33381 matches the argument, has a bit set in IF_SET that is also set
33382 in the target flags, or has a NULL name. */
33383
33384 i = 0;
33385 while (mapping[i].arg != NULL
33386 && strcmp (mapping[i].arg, cpu_id) != 0
33387 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33388 i++;
33389
33390 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33391 }
33392
33393 #endif /* TARGET_MACHO */
33394
33395 #if TARGET_ELF
33396 static int
33397 rs6000_elf_reloc_rw_mask (void)
33398 {
33399 if (flag_pic)
33400 return 3;
33401 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33402 return 2;
33403 else
33404 return 0;
33405 }
33406
33407 /* Record an element in the table of global constructors. SYMBOL is
33408 a SYMBOL_REF of the function to be called; PRIORITY is a number
33409 between 0 and MAX_INIT_PRIORITY.
33410
33411 This differs from default_named_section_asm_out_constructor in
33412 that we have special handling for -mrelocatable. */
33413
33414 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33415 static void
33416 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33417 {
33418 const char *section = ".ctors";
33419 char buf[18];
33420
33421 if (priority != DEFAULT_INIT_PRIORITY)
33422 {
33423 sprintf (buf, ".ctors.%.5u",
33424 /* Invert the numbering so the linker puts us in the proper
33425 order; constructors are run from right to left, and the
33426 linker sorts in increasing order. */
33427 MAX_INIT_PRIORITY - priority);
33428 section = buf;
33429 }
33430
33431 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33432 assemble_align (POINTER_SIZE);
33433
33434 if (DEFAULT_ABI == ABI_V4
33435 && (TARGET_RELOCATABLE || flag_pic > 1))
33436 {
33437 fputs ("\t.long (", asm_out_file);
33438 output_addr_const (asm_out_file, symbol);
33439 fputs (")@fixup\n", asm_out_file);
33440 }
33441 else
33442 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33443 }
33444
33445 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33446 static void
33447 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33448 {
33449 const char *section = ".dtors";
33450 char buf[18];
33451
33452 if (priority != DEFAULT_INIT_PRIORITY)
33453 {
33454 sprintf (buf, ".dtors.%.5u",
33455 /* Invert the numbering so the linker puts us in the proper
33456 order; constructors are run from right to left, and the
33457 linker sorts in increasing order. */
33458 MAX_INIT_PRIORITY - priority);
33459 section = buf;
33460 }
33461
33462 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33463 assemble_align (POINTER_SIZE);
33464
33465 if (DEFAULT_ABI == ABI_V4
33466 && (TARGET_RELOCATABLE || flag_pic > 1))
33467 {
33468 fputs ("\t.long (", asm_out_file);
33469 output_addr_const (asm_out_file, symbol);
33470 fputs (")@fixup\n", asm_out_file);
33471 }
33472 else
33473 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33474 }
33475
33476 void
33477 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33478 {
33479 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33480 {
33481 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33482 ASM_OUTPUT_LABEL (file, name);
33483 fputs (DOUBLE_INT_ASM_OP, file);
33484 rs6000_output_function_entry (file, name);
33485 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33486 if (DOT_SYMBOLS)
33487 {
33488 fputs ("\t.size\t", file);
33489 assemble_name (file, name);
33490 fputs (",24\n\t.type\t.", file);
33491 assemble_name (file, name);
33492 fputs (",@function\n", file);
33493 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33494 {
33495 fputs ("\t.globl\t.", file);
33496 assemble_name (file, name);
33497 putc ('\n', file);
33498 }
33499 }
33500 else
33501 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33502 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33503 rs6000_output_function_entry (file, name);
33504 fputs (":\n", file);
33505 return;
33506 }
33507
33508 int uses_toc;
33509 if (DEFAULT_ABI == ABI_V4
33510 && (TARGET_RELOCATABLE || flag_pic > 1)
33511 && !TARGET_SECURE_PLT
33512 && (!constant_pool_empty_p () || crtl->profile)
33513 && (uses_toc = uses_TOC ()))
33514 {
33515 char buf[256];
33516
33517 if (uses_toc == 2)
33518 switch_to_other_text_partition ();
33519 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33520
33521 fprintf (file, "\t.long ");
33522 assemble_name (file, toc_label_name);
33523 need_toc_init = 1;
33524 putc ('-', file);
33525 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33526 assemble_name (file, buf);
33527 putc ('\n', file);
33528 if (uses_toc == 2)
33529 switch_to_other_text_partition ();
33530 }
33531
33532 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33533 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33534
33535 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33536 {
33537 char buf[256];
33538
33539 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33540
33541 fprintf (file, "\t.quad .TOC.-");
33542 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33543 assemble_name (file, buf);
33544 putc ('\n', file);
33545 }
33546
33547 if (DEFAULT_ABI == ABI_AIX)
33548 {
33549 const char *desc_name, *orig_name;
33550
33551 orig_name = (*targetm.strip_name_encoding) (name);
33552 desc_name = orig_name;
33553 while (*desc_name == '.')
33554 desc_name++;
33555
33556 if (TREE_PUBLIC (decl))
33557 fprintf (file, "\t.globl %s\n", desc_name);
33558
33559 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33560 fprintf (file, "%s:\n", desc_name);
33561 fprintf (file, "\t.long %s\n", orig_name);
33562 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33563 fputs ("\t.long 0\n", file);
33564 fprintf (file, "\t.previous\n");
33565 }
33566 ASM_OUTPUT_LABEL (file, name);
33567 }
33568
33569 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33570 static void
33571 rs6000_elf_file_end (void)
33572 {
33573 #ifdef HAVE_AS_GNU_ATTRIBUTE
33574 /* ??? The value emitted depends on options active at file end.
33575 Assume anyone using #pragma or attributes that might change
33576 options knows what they are doing. */
33577 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33578 && rs6000_passes_float)
33579 {
33580 int fp;
33581
33582 if (TARGET_HARD_FLOAT)
33583 fp = 1;
33584 else
33585 fp = 2;
33586 if (rs6000_passes_long_double)
33587 {
33588 if (!TARGET_LONG_DOUBLE_128)
33589 fp |= 2 * 4;
33590 else if (TARGET_IEEEQUAD)
33591 fp |= 3 * 4;
33592 else
33593 fp |= 1 * 4;
33594 }
33595 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33596 }
33597 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33598 {
33599 if (rs6000_passes_vector)
33600 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33601 (TARGET_ALTIVEC_ABI ? 2 : 1));
33602 if (rs6000_returns_struct)
33603 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33604 aix_struct_return ? 2 : 1);
33605 }
33606 #endif
33607 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33608 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33609 file_end_indicate_exec_stack ();
33610 #endif
33611
33612 if (flag_split_stack)
33613 file_end_indicate_split_stack ();
33614
33615 if (cpu_builtin_p)
33616 {
33617 /* We have expanded a CPU builtin, so we need to emit a reference to
33618 the special symbol that LIBC uses to declare it supports the
33619 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33620 switch_to_section (data_section);
33621 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33622 fprintf (asm_out_file, "\t%s %s\n",
33623 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33624 }
33625 }
33626 #endif
33627
33628 #if TARGET_XCOFF
33629
33630 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33631 #define HAVE_XCOFF_DWARF_EXTRAS 0
33632 #endif
33633
33634 static enum unwind_info_type
33635 rs6000_xcoff_debug_unwind_info (void)
33636 {
33637 return UI_NONE;
33638 }
33639
33640 static void
33641 rs6000_xcoff_asm_output_anchor (rtx symbol)
33642 {
33643 char buffer[100];
33644
33645 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33646 SYMBOL_REF_BLOCK_OFFSET (symbol));
33647 fprintf (asm_out_file, "%s", SET_ASM_OP);
33648 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33649 fprintf (asm_out_file, ",");
33650 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33651 fprintf (asm_out_file, "\n");
33652 }
33653
33654 static void
33655 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33656 {
33657 fputs (GLOBAL_ASM_OP, stream);
33658 RS6000_OUTPUT_BASENAME (stream, name);
33659 putc ('\n', stream);
33660 }
33661
33662 /* A get_unnamed_decl callback, used for read-only sections. PTR
33663 points to the section string variable. */
33664
33665 static void
33666 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33667 {
33668 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33669 *(const char *const *) directive,
33670 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33671 }
33672
33673 /* Likewise for read-write sections. */
33674
33675 static void
33676 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33677 {
33678 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33679 *(const char *const *) directive,
33680 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33681 }
33682
33683 static void
33684 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33685 {
33686 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33687 *(const char *const *) directive,
33688 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33689 }
33690
33691 /* A get_unnamed_section callback, used for switching to toc_section. */
33692
33693 static void
33694 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33695 {
33696 if (TARGET_MINIMAL_TOC)
33697 {
33698 /* toc_section is always selected at least once from
33699 rs6000_xcoff_file_start, so this is guaranteed to
33700 always be defined once and only once in each file. */
33701 if (!toc_initialized)
33702 {
33703 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33704 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33705 toc_initialized = 1;
33706 }
33707 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33708 (TARGET_32BIT ? "" : ",3"));
33709 }
33710 else
33711 fputs ("\t.toc\n", asm_out_file);
33712 }
33713
33714 /* Implement TARGET_ASM_INIT_SECTIONS. */
33715
33716 static void
33717 rs6000_xcoff_asm_init_sections (void)
33718 {
33719 read_only_data_section
33720 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33721 &xcoff_read_only_section_name);
33722
33723 private_data_section
33724 = get_unnamed_section (SECTION_WRITE,
33725 rs6000_xcoff_output_readwrite_section_asm_op,
33726 &xcoff_private_data_section_name);
33727
33728 tls_data_section
33729 = get_unnamed_section (SECTION_TLS,
33730 rs6000_xcoff_output_tls_section_asm_op,
33731 &xcoff_tls_data_section_name);
33732
33733 tls_private_data_section
33734 = get_unnamed_section (SECTION_TLS,
33735 rs6000_xcoff_output_tls_section_asm_op,
33736 &xcoff_private_data_section_name);
33737
33738 read_only_private_data_section
33739 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33740 &xcoff_private_data_section_name);
33741
33742 toc_section
33743 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33744
33745 readonly_data_section = read_only_data_section;
33746 }
33747
33748 static int
33749 rs6000_xcoff_reloc_rw_mask (void)
33750 {
33751 return 3;
33752 }
33753
33754 static void
33755 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33756 tree decl ATTRIBUTE_UNUSED)
33757 {
33758 int smclass;
33759 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33760
33761 if (flags & SECTION_EXCLUDE)
33762 smclass = 4;
33763 else if (flags & SECTION_DEBUG)
33764 {
33765 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33766 return;
33767 }
33768 else if (flags & SECTION_CODE)
33769 smclass = 0;
33770 else if (flags & SECTION_TLS)
33771 smclass = 3;
33772 else if (flags & SECTION_WRITE)
33773 smclass = 2;
33774 else
33775 smclass = 1;
33776
33777 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33778 (flags & SECTION_CODE) ? "." : "",
33779 name, suffix[smclass], flags & SECTION_ENTSIZE);
33780 }
33781
33782 #define IN_NAMED_SECTION(DECL) \
33783 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33784 && DECL_SECTION_NAME (DECL) != NULL)
33785
33786 static section *
33787 rs6000_xcoff_select_section (tree decl, int reloc,
33788 unsigned HOST_WIDE_INT align)
33789 {
33790 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33791 named section. */
33792 if (align > BIGGEST_ALIGNMENT)
33793 {
33794 resolve_unique_section (decl, reloc, true);
33795 if (IN_NAMED_SECTION (decl))
33796 return get_named_section (decl, NULL, reloc);
33797 }
33798
33799 if (decl_readonly_section (decl, reloc))
33800 {
33801 if (TREE_PUBLIC (decl))
33802 return read_only_data_section;
33803 else
33804 return read_only_private_data_section;
33805 }
33806 else
33807 {
33808 #if HAVE_AS_TLS
33809 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33810 {
33811 if (TREE_PUBLIC (decl))
33812 return tls_data_section;
33813 else if (bss_initializer_p (decl))
33814 {
33815 /* Convert to COMMON to emit in BSS. */
33816 DECL_COMMON (decl) = 1;
33817 return tls_comm_section;
33818 }
33819 else
33820 return tls_private_data_section;
33821 }
33822 else
33823 #endif
33824 if (TREE_PUBLIC (decl))
33825 return data_section;
33826 else
33827 return private_data_section;
33828 }
33829 }
33830
33831 static void
33832 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33833 {
33834 const char *name;
33835
33836 /* Use select_section for private data and uninitialized data with
33837 alignment <= BIGGEST_ALIGNMENT. */
33838 if (!TREE_PUBLIC (decl)
33839 || DECL_COMMON (decl)
33840 || (DECL_INITIAL (decl) == NULL_TREE
33841 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33842 || DECL_INITIAL (decl) == error_mark_node
33843 || (flag_zero_initialized_in_bss
33844 && initializer_zerop (DECL_INITIAL (decl))))
33845 return;
33846
33847 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33848 name = (*targetm.strip_name_encoding) (name);
33849 set_decl_section_name (decl, name);
33850 }
33851
33852 /* Select section for constant in constant pool.
33853
33854 On RS/6000, all constants are in the private read-only data area.
33855 However, if this is being placed in the TOC it must be output as a
33856 toc entry. */
33857
33858 static section *
33859 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33860 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33861 {
33862 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33863 return toc_section;
33864 else
33865 return read_only_private_data_section;
33866 }
33867
33868 /* Remove any trailing [DS] or the like from the symbol name. */
33869
33870 static const char *
33871 rs6000_xcoff_strip_name_encoding (const char *name)
33872 {
33873 size_t len;
33874 if (*name == '*')
33875 name++;
33876 len = strlen (name);
33877 if (name[len - 1] == ']')
33878 return ggc_alloc_string (name, len - 4);
33879 else
33880 return name;
33881 }
33882
33883 /* Section attributes. AIX is always PIC. */
33884
33885 static unsigned int
33886 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33887 {
33888 unsigned int align;
33889 unsigned int flags = default_section_type_flags (decl, name, reloc);
33890
33891 /* Align to at least UNIT size. */
33892 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33893 align = MIN_UNITS_PER_WORD;
33894 else
33895 /* Increase alignment of large objects if not already stricter. */
33896 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33897 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33898 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33899
33900 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33901 }
33902
33903 /* Output at beginning of assembler file.
33904
33905 Initialize the section names for the RS/6000 at this point.
33906
33907 Specify filename, including full path, to assembler.
33908
33909 We want to go into the TOC section so at least one .toc will be emitted.
33910 Also, in order to output proper .bs/.es pairs, we need at least one static
33911 [RW] section emitted.
33912
33913 Finally, declare mcount when profiling to make the assembler happy. */
33914
33915 static void
33916 rs6000_xcoff_file_start (void)
33917 {
33918 rs6000_gen_section_name (&xcoff_bss_section_name,
33919 main_input_filename, ".bss_");
33920 rs6000_gen_section_name (&xcoff_private_data_section_name,
33921 main_input_filename, ".rw_");
33922 rs6000_gen_section_name (&xcoff_read_only_section_name,
33923 main_input_filename, ".ro_");
33924 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33925 main_input_filename, ".tls_");
33926 rs6000_gen_section_name (&xcoff_tbss_section_name,
33927 main_input_filename, ".tbss_[UL]");
33928
33929 fputs ("\t.file\t", asm_out_file);
33930 output_quoted_string (asm_out_file, main_input_filename);
33931 fputc ('\n', asm_out_file);
33932 if (write_symbols != NO_DEBUG)
33933 switch_to_section (private_data_section);
33934 switch_to_section (toc_section);
33935 switch_to_section (text_section);
33936 if (profile_flag)
33937 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33938 rs6000_file_start ();
33939 }
33940
33941 /* Output at end of assembler file.
33942 On the RS/6000, referencing data should automatically pull in text. */
33943
33944 static void
33945 rs6000_xcoff_file_end (void)
33946 {
33947 switch_to_section (text_section);
33948 fputs ("_section_.text:\n", asm_out_file);
33949 switch_to_section (data_section);
33950 fputs (TARGET_32BIT
33951 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33952 asm_out_file);
33953 }
33954
33955 struct declare_alias_data
33956 {
33957 FILE *file;
33958 bool function_descriptor;
33959 };
33960
33961 /* Declare alias N. A helper function for for_node_and_aliases. */
33962
33963 static bool
33964 rs6000_declare_alias (struct symtab_node *n, void *d)
33965 {
33966 struct declare_alias_data *data = (struct declare_alias_data *)d;
33967 /* Main symbol is output specially, because varasm machinery does part of
33968 the job for us - we do not need to declare .globl/lglobs and such. */
33969 if (!n->alias || n->weakref)
33970 return false;
33971
33972 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33973 return false;
33974
33975 /* Prevent assemble_alias from trying to use .set pseudo operation
33976 that does not behave as expected by the middle-end. */
33977 TREE_ASM_WRITTEN (n->decl) = true;
33978
33979 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33980 char *buffer = (char *) alloca (strlen (name) + 2);
33981 char *p;
33982 int dollar_inside = 0;
33983
33984 strcpy (buffer, name);
33985 p = strchr (buffer, '$');
33986 while (p) {
33987 *p = '_';
33988 dollar_inside++;
33989 p = strchr (p + 1, '$');
33990 }
33991 if (TREE_PUBLIC (n->decl))
33992 {
33993 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33994 {
33995 if (dollar_inside) {
33996 if (data->function_descriptor)
33997 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33998 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33999 }
34000 if (data->function_descriptor)
34001 {
34002 fputs ("\t.globl .", data->file);
34003 RS6000_OUTPUT_BASENAME (data->file, buffer);
34004 putc ('\n', data->file);
34005 }
34006 fputs ("\t.globl ", data->file);
34007 RS6000_OUTPUT_BASENAME (data->file, buffer);
34008 putc ('\n', data->file);
34009 }
34010 #ifdef ASM_WEAKEN_DECL
34011 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34012 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34013 #endif
34014 }
34015 else
34016 {
34017 if (dollar_inside)
34018 {
34019 if (data->function_descriptor)
34020 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34021 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34022 }
34023 if (data->function_descriptor)
34024 {
34025 fputs ("\t.lglobl .", data->file);
34026 RS6000_OUTPUT_BASENAME (data->file, buffer);
34027 putc ('\n', data->file);
34028 }
34029 fputs ("\t.lglobl ", data->file);
34030 RS6000_OUTPUT_BASENAME (data->file, buffer);
34031 putc ('\n', data->file);
34032 }
34033 if (data->function_descriptor)
34034 fputs (".", data->file);
34035 RS6000_OUTPUT_BASENAME (data->file, buffer);
34036 fputs (":\n", data->file);
34037 return false;
34038 }
34039
34040
34041 #ifdef HAVE_GAS_HIDDEN
34042 /* Helper function to calculate visibility of a DECL
34043 and return the value as a const string. */
34044
34045 static const char *
34046 rs6000_xcoff_visibility (tree decl)
34047 {
34048 static const char * const visibility_types[] = {
34049 "", ",protected", ",hidden", ",internal"
34050 };
34051
34052 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34053 return visibility_types[vis];
34054 }
34055 #endif
34056
34057
34058 /* This macro produces the initial definition of a function name.
34059 On the RS/6000, we need to place an extra '.' in the function name and
34060 output the function descriptor.
34061 Dollar signs are converted to underscores.
34062
34063 The csect for the function will have already been created when
34064 text_section was selected. We do have to go back to that csect, however.
34065
34066 The third and fourth parameters to the .function pseudo-op (16 and 044)
34067 are placeholders which no longer have any use.
34068
34069 Because AIX assembler's .set command has unexpected semantics, we output
34070 all aliases as alternative labels in front of the definition. */
34071
34072 void
34073 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34074 {
34075 char *buffer = (char *) alloca (strlen (name) + 1);
34076 char *p;
34077 int dollar_inside = 0;
34078 struct declare_alias_data data = {file, false};
34079
34080 strcpy (buffer, name);
34081 p = strchr (buffer, '$');
34082 while (p) {
34083 *p = '_';
34084 dollar_inside++;
34085 p = strchr (p + 1, '$');
34086 }
34087 if (TREE_PUBLIC (decl))
34088 {
34089 if (!RS6000_WEAK || !DECL_WEAK (decl))
34090 {
34091 if (dollar_inside) {
34092 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34093 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34094 }
34095 fputs ("\t.globl .", file);
34096 RS6000_OUTPUT_BASENAME (file, buffer);
34097 #ifdef HAVE_GAS_HIDDEN
34098 fputs (rs6000_xcoff_visibility (decl), file);
34099 #endif
34100 putc ('\n', file);
34101 }
34102 }
34103 else
34104 {
34105 if (dollar_inside) {
34106 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34107 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34108 }
34109 fputs ("\t.lglobl .", file);
34110 RS6000_OUTPUT_BASENAME (file, buffer);
34111 putc ('\n', file);
34112 }
34113 fputs ("\t.csect ", file);
34114 RS6000_OUTPUT_BASENAME (file, buffer);
34115 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34116 RS6000_OUTPUT_BASENAME (file, buffer);
34117 fputs (":\n", file);
34118 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34119 &data, true);
34120 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34121 RS6000_OUTPUT_BASENAME (file, buffer);
34122 fputs (", TOC[tc0], 0\n", file);
34123 in_section = NULL;
34124 switch_to_section (function_section (decl));
34125 putc ('.', file);
34126 RS6000_OUTPUT_BASENAME (file, buffer);
34127 fputs (":\n", file);
34128 data.function_descriptor = true;
34129 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34130 &data, true);
34131 if (!DECL_IGNORED_P (decl))
34132 {
34133 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34134 xcoffout_declare_function (file, decl, buffer);
34135 else if (write_symbols == DWARF2_DEBUG)
34136 {
34137 name = (*targetm.strip_name_encoding) (name);
34138 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34139 }
34140 }
34141 return;
34142 }
34143
34144
34145 /* Output assembly language to globalize a symbol from a DECL,
34146 possibly with visibility. */
34147
34148 void
34149 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34150 {
34151 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34152 fputs (GLOBAL_ASM_OP, stream);
34153 RS6000_OUTPUT_BASENAME (stream, name);
34154 #ifdef HAVE_GAS_HIDDEN
34155 fputs (rs6000_xcoff_visibility (decl), stream);
34156 #endif
34157 putc ('\n', stream);
34158 }
34159
34160 /* Output assembly language to define a symbol as COMMON from a DECL,
34161 possibly with visibility. */
34162
34163 void
34164 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34165 tree decl ATTRIBUTE_UNUSED,
34166 const char *name,
34167 unsigned HOST_WIDE_INT size,
34168 unsigned HOST_WIDE_INT align)
34169 {
34170 unsigned HOST_WIDE_INT align2 = 2;
34171
34172 if (align > 32)
34173 align2 = floor_log2 (align / BITS_PER_UNIT);
34174 else if (size > 4)
34175 align2 = 3;
34176
34177 fputs (COMMON_ASM_OP, stream);
34178 RS6000_OUTPUT_BASENAME (stream, name);
34179
34180 fprintf (stream,
34181 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34182 size, align2);
34183
34184 #ifdef HAVE_GAS_HIDDEN
34185 if (decl != NULL)
34186 fputs (rs6000_xcoff_visibility (decl), stream);
34187 #endif
34188 putc ('\n', stream);
34189 }
34190
34191 /* This macro produces the initial definition of a object (variable) name.
34192 Because AIX assembler's .set command has unexpected semantics, we output
34193 all aliases as alternative labels in front of the definition. */
34194
34195 void
34196 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34197 {
34198 struct declare_alias_data data = {file, false};
34199 RS6000_OUTPUT_BASENAME (file, name);
34200 fputs (":\n", file);
34201 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34202 &data, true);
34203 }
34204
34205 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34206
34207 void
34208 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34209 {
34210 fputs (integer_asm_op (size, FALSE), file);
34211 assemble_name (file, label);
34212 fputs ("-$", file);
34213 }
34214
34215 /* Output a symbol offset relative to the dbase for the current object.
34216 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34217 signed offsets.
34218
34219 __gcc_unwind_dbase is embedded in all executables/libraries through
34220 libgcc/config/rs6000/crtdbase.S. */
34221
34222 void
34223 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34224 {
34225 fputs (integer_asm_op (size, FALSE), file);
34226 assemble_name (file, label);
34227 fputs("-__gcc_unwind_dbase", file);
34228 }
34229
34230 #ifdef HAVE_AS_TLS
34231 static void
34232 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34233 {
34234 rtx symbol;
34235 int flags;
34236 const char *symname;
34237
34238 default_encode_section_info (decl, rtl, first);
34239
34240 /* Careful not to prod global register variables. */
34241 if (!MEM_P (rtl))
34242 return;
34243 symbol = XEXP (rtl, 0);
34244 if (GET_CODE (symbol) != SYMBOL_REF)
34245 return;
34246
34247 flags = SYMBOL_REF_FLAGS (symbol);
34248
34249 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34250 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34251
34252 SYMBOL_REF_FLAGS (symbol) = flags;
34253
34254 /* Append mapping class to extern decls. */
34255 symname = XSTR (symbol, 0);
34256 if (decl /* sync condition with assemble_external () */
34257 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34258 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34259 || TREE_CODE (decl) == FUNCTION_DECL)
34260 && symname[strlen (symname) - 1] != ']')
34261 {
34262 char *newname = (char *) alloca (strlen (symname) + 5);
34263 strcpy (newname, symname);
34264 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34265 ? "[DS]" : "[UA]"));
34266 XSTR (symbol, 0) = ggc_strdup (newname);
34267 }
34268 }
34269 #endif /* HAVE_AS_TLS */
34270 #endif /* TARGET_XCOFF */
34271
34272 void
34273 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34274 const char *name, const char *val)
34275 {
34276 fputs ("\t.weak\t", stream);
34277 RS6000_OUTPUT_BASENAME (stream, name);
34278 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34279 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34280 {
34281 if (TARGET_XCOFF)
34282 fputs ("[DS]", stream);
34283 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34284 if (TARGET_XCOFF)
34285 fputs (rs6000_xcoff_visibility (decl), stream);
34286 #endif
34287 fputs ("\n\t.weak\t.", stream);
34288 RS6000_OUTPUT_BASENAME (stream, name);
34289 }
34290 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34291 if (TARGET_XCOFF)
34292 fputs (rs6000_xcoff_visibility (decl), stream);
34293 #endif
34294 fputc ('\n', stream);
34295 if (val)
34296 {
34297 #ifdef ASM_OUTPUT_DEF
34298 ASM_OUTPUT_DEF (stream, name, val);
34299 #endif
34300 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34301 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34302 {
34303 fputs ("\t.set\t.", stream);
34304 RS6000_OUTPUT_BASENAME (stream, name);
34305 fputs (",.", stream);
34306 RS6000_OUTPUT_BASENAME (stream, val);
34307 fputc ('\n', stream);
34308 }
34309 }
34310 }
34311
34312
34313 /* Return true if INSN should not be copied. */
34314
34315 static bool
34316 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34317 {
34318 return recog_memoized (insn) >= 0
34319 && get_attr_cannot_copy (insn);
34320 }
34321
34322 /* Compute a (partial) cost for rtx X. Return true if the complete
34323 cost has been computed, and false if subexpressions should be
34324 scanned. In either case, *TOTAL contains the cost result. */
34325
34326 static bool
34327 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34328 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34329 {
34330 int code = GET_CODE (x);
34331
34332 switch (code)
34333 {
34334 /* On the RS/6000, if it is valid in the insn, it is free. */
34335 case CONST_INT:
34336 if (((outer_code == SET
34337 || outer_code == PLUS
34338 || outer_code == MINUS)
34339 && (satisfies_constraint_I (x)
34340 || satisfies_constraint_L (x)))
34341 || (outer_code == AND
34342 && (satisfies_constraint_K (x)
34343 || (mode == SImode
34344 ? satisfies_constraint_L (x)
34345 : satisfies_constraint_J (x))))
34346 || ((outer_code == IOR || outer_code == XOR)
34347 && (satisfies_constraint_K (x)
34348 || (mode == SImode
34349 ? satisfies_constraint_L (x)
34350 : satisfies_constraint_J (x))))
34351 || outer_code == ASHIFT
34352 || outer_code == ASHIFTRT
34353 || outer_code == LSHIFTRT
34354 || outer_code == ROTATE
34355 || outer_code == ROTATERT
34356 || outer_code == ZERO_EXTRACT
34357 || (outer_code == MULT
34358 && satisfies_constraint_I (x))
34359 || ((outer_code == DIV || outer_code == UDIV
34360 || outer_code == MOD || outer_code == UMOD)
34361 && exact_log2 (INTVAL (x)) >= 0)
34362 || (outer_code == COMPARE
34363 && (satisfies_constraint_I (x)
34364 || satisfies_constraint_K (x)))
34365 || ((outer_code == EQ || outer_code == NE)
34366 && (satisfies_constraint_I (x)
34367 || satisfies_constraint_K (x)
34368 || (mode == SImode
34369 ? satisfies_constraint_L (x)
34370 : satisfies_constraint_J (x))))
34371 || (outer_code == GTU
34372 && satisfies_constraint_I (x))
34373 || (outer_code == LTU
34374 && satisfies_constraint_P (x)))
34375 {
34376 *total = 0;
34377 return true;
34378 }
34379 else if ((outer_code == PLUS
34380 && reg_or_add_cint_operand (x, VOIDmode))
34381 || (outer_code == MINUS
34382 && reg_or_sub_cint_operand (x, VOIDmode))
34383 || ((outer_code == SET
34384 || outer_code == IOR
34385 || outer_code == XOR)
34386 && (INTVAL (x)
34387 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34388 {
34389 *total = COSTS_N_INSNS (1);
34390 return true;
34391 }
34392 /* FALLTHRU */
34393
34394 case CONST_DOUBLE:
34395 case CONST_WIDE_INT:
34396 case CONST:
34397 case HIGH:
34398 case SYMBOL_REF:
34399 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34400 return true;
34401
34402 case MEM:
34403 /* When optimizing for size, MEM should be slightly more expensive
34404 than generating address, e.g., (plus (reg) (const)).
34405 L1 cache latency is about two instructions. */
34406 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34407 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34408 *total += COSTS_N_INSNS (100);
34409 return true;
34410
34411 case LABEL_REF:
34412 *total = 0;
34413 return true;
34414
34415 case PLUS:
34416 case MINUS:
34417 if (FLOAT_MODE_P (mode))
34418 *total = rs6000_cost->fp;
34419 else
34420 *total = COSTS_N_INSNS (1);
34421 return false;
34422
34423 case MULT:
34424 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34425 && satisfies_constraint_I (XEXP (x, 1)))
34426 {
34427 if (INTVAL (XEXP (x, 1)) >= -256
34428 && INTVAL (XEXP (x, 1)) <= 255)
34429 *total = rs6000_cost->mulsi_const9;
34430 else
34431 *total = rs6000_cost->mulsi_const;
34432 }
34433 else if (mode == SFmode)
34434 *total = rs6000_cost->fp;
34435 else if (FLOAT_MODE_P (mode))
34436 *total = rs6000_cost->dmul;
34437 else if (mode == DImode)
34438 *total = rs6000_cost->muldi;
34439 else
34440 *total = rs6000_cost->mulsi;
34441 return false;
34442
34443 case FMA:
34444 if (mode == SFmode)
34445 *total = rs6000_cost->fp;
34446 else
34447 *total = rs6000_cost->dmul;
34448 break;
34449
34450 case DIV:
34451 case MOD:
34452 if (FLOAT_MODE_P (mode))
34453 {
34454 *total = mode == DFmode ? rs6000_cost->ddiv
34455 : rs6000_cost->sdiv;
34456 return false;
34457 }
34458 /* FALLTHRU */
34459
34460 case UDIV:
34461 case UMOD:
34462 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34463 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34464 {
34465 if (code == DIV || code == MOD)
34466 /* Shift, addze */
34467 *total = COSTS_N_INSNS (2);
34468 else
34469 /* Shift */
34470 *total = COSTS_N_INSNS (1);
34471 }
34472 else
34473 {
34474 if (GET_MODE (XEXP (x, 1)) == DImode)
34475 *total = rs6000_cost->divdi;
34476 else
34477 *total = rs6000_cost->divsi;
34478 }
34479 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34480 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34481 *total += COSTS_N_INSNS (2);
34482 return false;
34483
34484 case CTZ:
34485 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34486 return false;
34487
34488 case FFS:
34489 *total = COSTS_N_INSNS (4);
34490 return false;
34491
34492 case POPCOUNT:
34493 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34494 return false;
34495
34496 case PARITY:
34497 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34498 return false;
34499
34500 case NOT:
34501 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34502 *total = 0;
34503 else
34504 *total = COSTS_N_INSNS (1);
34505 return false;
34506
34507 case AND:
34508 if (CONST_INT_P (XEXP (x, 1)))
34509 {
34510 rtx left = XEXP (x, 0);
34511 rtx_code left_code = GET_CODE (left);
34512
34513 /* rotate-and-mask: 1 insn. */
34514 if ((left_code == ROTATE
34515 || left_code == ASHIFT
34516 || left_code == LSHIFTRT)
34517 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34518 {
34519 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34520 if (!CONST_INT_P (XEXP (left, 1)))
34521 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34522 *total += COSTS_N_INSNS (1);
34523 return true;
34524 }
34525
34526 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34527 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34528 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34529 || (val & 0xffff) == val
34530 || (val & 0xffff0000) == val
34531 || ((val & 0xffff) == 0 && mode == SImode))
34532 {
34533 *total = rtx_cost (left, mode, AND, 0, speed);
34534 *total += COSTS_N_INSNS (1);
34535 return true;
34536 }
34537
34538 /* 2 insns. */
34539 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34540 {
34541 *total = rtx_cost (left, mode, AND, 0, speed);
34542 *total += COSTS_N_INSNS (2);
34543 return true;
34544 }
34545 }
34546
34547 *total = COSTS_N_INSNS (1);
34548 return false;
34549
34550 case IOR:
34551 /* FIXME */
34552 *total = COSTS_N_INSNS (1);
34553 return true;
34554
34555 case CLZ:
34556 case XOR:
34557 case ZERO_EXTRACT:
34558 *total = COSTS_N_INSNS (1);
34559 return false;
34560
34561 case ASHIFT:
34562 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34563 the sign extend and shift separately within the insn. */
34564 if (TARGET_EXTSWSLI && mode == DImode
34565 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34566 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34567 {
34568 *total = 0;
34569 return false;
34570 }
34571 /* fall through */
34572
34573 case ASHIFTRT:
34574 case LSHIFTRT:
34575 case ROTATE:
34576 case ROTATERT:
34577 /* Handle mul_highpart. */
34578 if (outer_code == TRUNCATE
34579 && GET_CODE (XEXP (x, 0)) == MULT)
34580 {
34581 if (mode == DImode)
34582 *total = rs6000_cost->muldi;
34583 else
34584 *total = rs6000_cost->mulsi;
34585 return true;
34586 }
34587 else if (outer_code == AND)
34588 *total = 0;
34589 else
34590 *total = COSTS_N_INSNS (1);
34591 return false;
34592
34593 case SIGN_EXTEND:
34594 case ZERO_EXTEND:
34595 if (GET_CODE (XEXP (x, 0)) == MEM)
34596 *total = 0;
34597 else
34598 *total = COSTS_N_INSNS (1);
34599 return false;
34600
34601 case COMPARE:
34602 case NEG:
34603 case ABS:
34604 if (!FLOAT_MODE_P (mode))
34605 {
34606 *total = COSTS_N_INSNS (1);
34607 return false;
34608 }
34609 /* FALLTHRU */
34610
34611 case FLOAT:
34612 case UNSIGNED_FLOAT:
34613 case FIX:
34614 case UNSIGNED_FIX:
34615 case FLOAT_TRUNCATE:
34616 *total = rs6000_cost->fp;
34617 return false;
34618
34619 case FLOAT_EXTEND:
34620 if (mode == DFmode)
34621 *total = rs6000_cost->sfdf_convert;
34622 else
34623 *total = rs6000_cost->fp;
34624 return false;
34625
34626 case UNSPEC:
34627 switch (XINT (x, 1))
34628 {
34629 case UNSPEC_FRSP:
34630 *total = rs6000_cost->fp;
34631 return true;
34632
34633 default:
34634 break;
34635 }
34636 break;
34637
34638 case CALL:
34639 case IF_THEN_ELSE:
34640 if (!speed)
34641 {
34642 *total = COSTS_N_INSNS (1);
34643 return true;
34644 }
34645 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34646 {
34647 *total = rs6000_cost->fp;
34648 return false;
34649 }
34650 break;
34651
34652 case NE:
34653 case EQ:
34654 case GTU:
34655 case LTU:
34656 /* Carry bit requires mode == Pmode.
34657 NEG or PLUS already counted so only add one. */
34658 if (mode == Pmode
34659 && (outer_code == NEG || outer_code == PLUS))
34660 {
34661 *total = COSTS_N_INSNS (1);
34662 return true;
34663 }
34664 /* FALLTHRU */
34665
34666 case GT:
34667 case LT:
34668 case UNORDERED:
34669 if (outer_code == SET)
34670 {
34671 if (XEXP (x, 1) == const0_rtx)
34672 {
34673 *total = COSTS_N_INSNS (2);
34674 return true;
34675 }
34676 else
34677 {
34678 *total = COSTS_N_INSNS (3);
34679 return false;
34680 }
34681 }
34682 /* CC COMPARE. */
34683 if (outer_code == COMPARE)
34684 {
34685 *total = 0;
34686 return true;
34687 }
34688 break;
34689
34690 default:
34691 break;
34692 }
34693
34694 return false;
34695 }
34696
34697 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34698
34699 static bool
34700 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34701 int opno, int *total, bool speed)
34702 {
34703 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34704
34705 fprintf (stderr,
34706 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34707 "opno = %d, total = %d, speed = %s, x:\n",
34708 ret ? "complete" : "scan inner",
34709 GET_MODE_NAME (mode),
34710 GET_RTX_NAME (outer_code),
34711 opno,
34712 *total,
34713 speed ? "true" : "false");
34714
34715 debug_rtx (x);
34716
34717 return ret;
34718 }
34719
34720 static int
34721 rs6000_insn_cost (rtx_insn *insn, bool speed)
34722 {
34723 if (recog_memoized (insn) < 0)
34724 return 0;
34725
34726 if (!speed)
34727 return get_attr_length (insn);
34728
34729 int cost = get_attr_cost (insn);
34730 if (cost > 0)
34731 return cost;
34732
34733 int n = get_attr_length (insn) / 4;
34734 enum attr_type type = get_attr_type (insn);
34735
34736 switch (type)
34737 {
34738 case TYPE_LOAD:
34739 case TYPE_FPLOAD:
34740 case TYPE_VECLOAD:
34741 cost = COSTS_N_INSNS (n + 1);
34742 break;
34743
34744 case TYPE_MUL:
34745 switch (get_attr_size (insn))
34746 {
34747 case SIZE_8:
34748 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34749 break;
34750 case SIZE_16:
34751 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34752 break;
34753 case SIZE_32:
34754 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34755 break;
34756 case SIZE_64:
34757 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34758 break;
34759 default:
34760 gcc_unreachable ();
34761 }
34762 break;
34763 case TYPE_DIV:
34764 switch (get_attr_size (insn))
34765 {
34766 case SIZE_32:
34767 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34768 break;
34769 case SIZE_64:
34770 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34771 break;
34772 default:
34773 gcc_unreachable ();
34774 }
34775 break;
34776
34777 case TYPE_FP:
34778 cost = n * rs6000_cost->fp;
34779 break;
34780 case TYPE_DMUL:
34781 cost = n * rs6000_cost->dmul;
34782 break;
34783 case TYPE_SDIV:
34784 cost = n * rs6000_cost->sdiv;
34785 break;
34786 case TYPE_DDIV:
34787 cost = n * rs6000_cost->ddiv;
34788 break;
34789
34790 case TYPE_SYNC:
34791 case TYPE_LOAD_L:
34792 case TYPE_MFCR:
34793 case TYPE_MFCRF:
34794 cost = COSTS_N_INSNS (n + 2);
34795 break;
34796
34797 default:
34798 cost = COSTS_N_INSNS (n);
34799 }
34800
34801 return cost;
34802 }
34803
34804 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34805
34806 static int
34807 rs6000_debug_address_cost (rtx x, machine_mode mode,
34808 addr_space_t as, bool speed)
34809 {
34810 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34811
34812 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34813 ret, speed ? "true" : "false");
34814 debug_rtx (x);
34815
34816 return ret;
34817 }
34818
34819
34820 /* A C expression returning the cost of moving data from a register of class
34821 CLASS1 to one of CLASS2. */
34822
34823 static int
34824 rs6000_register_move_cost (machine_mode mode,
34825 reg_class_t from, reg_class_t to)
34826 {
34827 int ret;
34828
34829 if (TARGET_DEBUG_COST)
34830 dbg_cost_ctrl++;
34831
34832 /* Moves from/to GENERAL_REGS. */
34833 if (reg_classes_intersect_p (to, GENERAL_REGS)
34834 || reg_classes_intersect_p (from, GENERAL_REGS))
34835 {
34836 reg_class_t rclass = from;
34837
34838 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34839 rclass = to;
34840
34841 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34842 ret = (rs6000_memory_move_cost (mode, rclass, false)
34843 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34844
34845 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34846 shift. */
34847 else if (rclass == CR_REGS)
34848 ret = 4;
34849
34850 /* For those processors that have slow LR/CTR moves, make them more
34851 expensive than memory in order to bias spills to memory .*/
34852 else if ((rs6000_tune == PROCESSOR_POWER6
34853 || rs6000_tune == PROCESSOR_POWER7
34854 || rs6000_tune == PROCESSOR_POWER8
34855 || rs6000_tune == PROCESSOR_POWER9)
34856 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34857 ret = 6 * hard_regno_nregs (0, mode);
34858
34859 else
34860 /* A move will cost one instruction per GPR moved. */
34861 ret = 2 * hard_regno_nregs (0, mode);
34862 }
34863
34864 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34865 else if (VECTOR_MEM_VSX_P (mode)
34866 && reg_classes_intersect_p (to, VSX_REGS)
34867 && reg_classes_intersect_p (from, VSX_REGS))
34868 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34869
34870 /* Moving between two similar registers is just one instruction. */
34871 else if (reg_classes_intersect_p (to, from))
34872 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34873
34874 /* Everything else has to go through GENERAL_REGS. */
34875 else
34876 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34877 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34878
34879 if (TARGET_DEBUG_COST)
34880 {
34881 if (dbg_cost_ctrl == 1)
34882 fprintf (stderr,
34883 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34884 ret, GET_MODE_NAME (mode), reg_class_names[from],
34885 reg_class_names[to]);
34886 dbg_cost_ctrl--;
34887 }
34888
34889 return ret;
34890 }
34891
34892 /* A C expressions returning the cost of moving data of MODE from a register to
34893 or from memory. */
34894
34895 static int
34896 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34897 bool in ATTRIBUTE_UNUSED)
34898 {
34899 int ret;
34900
34901 if (TARGET_DEBUG_COST)
34902 dbg_cost_ctrl++;
34903
34904 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34905 ret = 4 * hard_regno_nregs (0, mode);
34906 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34907 || reg_classes_intersect_p (rclass, VSX_REGS)))
34908 ret = 4 * hard_regno_nregs (32, mode);
34909 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34910 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34911 else
34912 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34913
34914 if (TARGET_DEBUG_COST)
34915 {
34916 if (dbg_cost_ctrl == 1)
34917 fprintf (stderr,
34918 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34919 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34920 dbg_cost_ctrl--;
34921 }
34922
34923 return ret;
34924 }
34925
34926 /* Returns a code for a target-specific builtin that implements
34927 reciprocal of the function, or NULL_TREE if not available. */
34928
34929 static tree
34930 rs6000_builtin_reciprocal (tree fndecl)
34931 {
34932 switch (DECL_FUNCTION_CODE (fndecl))
34933 {
34934 case VSX_BUILTIN_XVSQRTDP:
34935 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34936 return NULL_TREE;
34937
34938 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34939
34940 case VSX_BUILTIN_XVSQRTSP:
34941 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34942 return NULL_TREE;
34943
34944 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34945
34946 default:
34947 return NULL_TREE;
34948 }
34949 }
34950
34951 /* Load up a constant. If the mode is a vector mode, splat the value across
34952 all of the vector elements. */
34953
34954 static rtx
34955 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34956 {
34957 rtx reg;
34958
34959 if (mode == SFmode || mode == DFmode)
34960 {
34961 rtx d = const_double_from_real_value (dconst, mode);
34962 reg = force_reg (mode, d);
34963 }
34964 else if (mode == V4SFmode)
34965 {
34966 rtx d = const_double_from_real_value (dconst, SFmode);
34967 rtvec v = gen_rtvec (4, d, d, d, d);
34968 reg = gen_reg_rtx (mode);
34969 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34970 }
34971 else if (mode == V2DFmode)
34972 {
34973 rtx d = const_double_from_real_value (dconst, DFmode);
34974 rtvec v = gen_rtvec (2, d, d);
34975 reg = gen_reg_rtx (mode);
34976 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34977 }
34978 else
34979 gcc_unreachable ();
34980
34981 return reg;
34982 }
34983
34984 /* Generate an FMA instruction. */
34985
34986 static void
34987 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34988 {
34989 machine_mode mode = GET_MODE (target);
34990 rtx dst;
34991
34992 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34993 gcc_assert (dst != NULL);
34994
34995 if (dst != target)
34996 emit_move_insn (target, dst);
34997 }
34998
34999 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35000
35001 static void
35002 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35003 {
35004 machine_mode mode = GET_MODE (dst);
35005 rtx r;
35006
35007 /* This is a tad more complicated, since the fnma_optab is for
35008 a different expression: fma(-m1, m2, a), which is the same
35009 thing except in the case of signed zeros.
35010
35011 Fortunately we know that if FMA is supported that FNMSUB is
35012 also supported in the ISA. Just expand it directly. */
35013
35014 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35015
35016 r = gen_rtx_NEG (mode, a);
35017 r = gen_rtx_FMA (mode, m1, m2, r);
35018 r = gen_rtx_NEG (mode, r);
35019 emit_insn (gen_rtx_SET (dst, r));
35020 }
35021
35022 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35023 add a reg_note saying that this was a division. Support both scalar and
35024 vector divide. Assumes no trapping math and finite arguments. */
35025
35026 void
35027 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35028 {
35029 machine_mode mode = GET_MODE (dst);
35030 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35031 int i;
35032
35033 /* Low precision estimates guarantee 5 bits of accuracy. High
35034 precision estimates guarantee 14 bits of accuracy. SFmode
35035 requires 23 bits of accuracy. DFmode requires 52 bits of
35036 accuracy. Each pass at least doubles the accuracy, leading
35037 to the following. */
35038 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35039 if (mode == DFmode || mode == V2DFmode)
35040 passes++;
35041
35042 enum insn_code code = optab_handler (smul_optab, mode);
35043 insn_gen_fn gen_mul = GEN_FCN (code);
35044
35045 gcc_assert (code != CODE_FOR_nothing);
35046
35047 one = rs6000_load_constant_and_splat (mode, dconst1);
35048
35049 /* x0 = 1./d estimate */
35050 x0 = gen_reg_rtx (mode);
35051 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35052 UNSPEC_FRES)));
35053
35054 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35055 if (passes > 1) {
35056
35057 /* e0 = 1. - d * x0 */
35058 e0 = gen_reg_rtx (mode);
35059 rs6000_emit_nmsub (e0, d, x0, one);
35060
35061 /* x1 = x0 + e0 * x0 */
35062 x1 = gen_reg_rtx (mode);
35063 rs6000_emit_madd (x1, e0, x0, x0);
35064
35065 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35066 ++i, xprev = xnext, eprev = enext) {
35067
35068 /* enext = eprev * eprev */
35069 enext = gen_reg_rtx (mode);
35070 emit_insn (gen_mul (enext, eprev, eprev));
35071
35072 /* xnext = xprev + enext * xprev */
35073 xnext = gen_reg_rtx (mode);
35074 rs6000_emit_madd (xnext, enext, xprev, xprev);
35075 }
35076
35077 } else
35078 xprev = x0;
35079
35080 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35081
35082 /* u = n * xprev */
35083 u = gen_reg_rtx (mode);
35084 emit_insn (gen_mul (u, n, xprev));
35085
35086 /* v = n - (d * u) */
35087 v = gen_reg_rtx (mode);
35088 rs6000_emit_nmsub (v, d, u, n);
35089
35090 /* dst = (v * xprev) + u */
35091 rs6000_emit_madd (dst, v, xprev, u);
35092
35093 if (note_p)
35094 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35095 }
35096
35097 /* Goldschmidt's Algorithm for single/double-precision floating point
35098 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35099
35100 void
35101 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35102 {
35103 machine_mode mode = GET_MODE (src);
35104 rtx e = gen_reg_rtx (mode);
35105 rtx g = gen_reg_rtx (mode);
35106 rtx h = gen_reg_rtx (mode);
35107
35108 /* Low precision estimates guarantee 5 bits of accuracy. High
35109 precision estimates guarantee 14 bits of accuracy. SFmode
35110 requires 23 bits of accuracy. DFmode requires 52 bits of
35111 accuracy. Each pass at least doubles the accuracy, leading
35112 to the following. */
35113 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35114 if (mode == DFmode || mode == V2DFmode)
35115 passes++;
35116
35117 int i;
35118 rtx mhalf;
35119 enum insn_code code = optab_handler (smul_optab, mode);
35120 insn_gen_fn gen_mul = GEN_FCN (code);
35121
35122 gcc_assert (code != CODE_FOR_nothing);
35123
35124 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35125
35126 /* e = rsqrt estimate */
35127 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35128 UNSPEC_RSQRT)));
35129
35130 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35131 if (!recip)
35132 {
35133 rtx zero = force_reg (mode, CONST0_RTX (mode));
35134
35135 if (mode == SFmode)
35136 {
35137 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35138 e, zero, mode, 0);
35139 if (target != e)
35140 emit_move_insn (e, target);
35141 }
35142 else
35143 {
35144 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35145 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35146 }
35147 }
35148
35149 /* g = sqrt estimate. */
35150 emit_insn (gen_mul (g, e, src));
35151 /* h = 1/(2*sqrt) estimate. */
35152 emit_insn (gen_mul (h, e, mhalf));
35153
35154 if (recip)
35155 {
35156 if (passes == 1)
35157 {
35158 rtx t = gen_reg_rtx (mode);
35159 rs6000_emit_nmsub (t, g, h, mhalf);
35160 /* Apply correction directly to 1/rsqrt estimate. */
35161 rs6000_emit_madd (dst, e, t, e);
35162 }
35163 else
35164 {
35165 for (i = 0; i < passes; i++)
35166 {
35167 rtx t1 = gen_reg_rtx (mode);
35168 rtx g1 = gen_reg_rtx (mode);
35169 rtx h1 = gen_reg_rtx (mode);
35170
35171 rs6000_emit_nmsub (t1, g, h, mhalf);
35172 rs6000_emit_madd (g1, g, t1, g);
35173 rs6000_emit_madd (h1, h, t1, h);
35174
35175 g = g1;
35176 h = h1;
35177 }
35178 /* Multiply by 2 for 1/rsqrt. */
35179 emit_insn (gen_add3_insn (dst, h, h));
35180 }
35181 }
35182 else
35183 {
35184 rtx t = gen_reg_rtx (mode);
35185 rs6000_emit_nmsub (t, g, h, mhalf);
35186 rs6000_emit_madd (dst, g, t, g);
35187 }
35188
35189 return;
35190 }
35191
35192 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35193 (Power7) targets. DST is the target, and SRC is the argument operand. */
35194
35195 void
35196 rs6000_emit_popcount (rtx dst, rtx src)
35197 {
35198 machine_mode mode = GET_MODE (dst);
35199 rtx tmp1, tmp2;
35200
35201 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35202 if (TARGET_POPCNTD)
35203 {
35204 if (mode == SImode)
35205 emit_insn (gen_popcntdsi2 (dst, src));
35206 else
35207 emit_insn (gen_popcntddi2 (dst, src));
35208 return;
35209 }
35210
35211 tmp1 = gen_reg_rtx (mode);
35212
35213 if (mode == SImode)
35214 {
35215 emit_insn (gen_popcntbsi2 (tmp1, src));
35216 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35217 NULL_RTX, 0);
35218 tmp2 = force_reg (SImode, tmp2);
35219 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35220 }
35221 else
35222 {
35223 emit_insn (gen_popcntbdi2 (tmp1, src));
35224 tmp2 = expand_mult (DImode, tmp1,
35225 GEN_INT ((HOST_WIDE_INT)
35226 0x01010101 << 32 | 0x01010101),
35227 NULL_RTX, 0);
35228 tmp2 = force_reg (DImode, tmp2);
35229 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35230 }
35231 }
35232
35233
35234 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35235 target, and SRC is the argument operand. */
35236
35237 void
35238 rs6000_emit_parity (rtx dst, rtx src)
35239 {
35240 machine_mode mode = GET_MODE (dst);
35241 rtx tmp;
35242
35243 tmp = gen_reg_rtx (mode);
35244
35245 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35246 if (TARGET_CMPB)
35247 {
35248 if (mode == SImode)
35249 {
35250 emit_insn (gen_popcntbsi2 (tmp, src));
35251 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35252 }
35253 else
35254 {
35255 emit_insn (gen_popcntbdi2 (tmp, src));
35256 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35257 }
35258 return;
35259 }
35260
35261 if (mode == SImode)
35262 {
35263 /* Is mult+shift >= shift+xor+shift+xor? */
35264 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35265 {
35266 rtx tmp1, tmp2, tmp3, tmp4;
35267
35268 tmp1 = gen_reg_rtx (SImode);
35269 emit_insn (gen_popcntbsi2 (tmp1, src));
35270
35271 tmp2 = gen_reg_rtx (SImode);
35272 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35273 tmp3 = gen_reg_rtx (SImode);
35274 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35275
35276 tmp4 = gen_reg_rtx (SImode);
35277 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35278 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35279 }
35280 else
35281 rs6000_emit_popcount (tmp, src);
35282 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35283 }
35284 else
35285 {
35286 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35287 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35288 {
35289 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35290
35291 tmp1 = gen_reg_rtx (DImode);
35292 emit_insn (gen_popcntbdi2 (tmp1, src));
35293
35294 tmp2 = gen_reg_rtx (DImode);
35295 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35296 tmp3 = gen_reg_rtx (DImode);
35297 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35298
35299 tmp4 = gen_reg_rtx (DImode);
35300 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35301 tmp5 = gen_reg_rtx (DImode);
35302 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35303
35304 tmp6 = gen_reg_rtx (DImode);
35305 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35306 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35307 }
35308 else
35309 rs6000_emit_popcount (tmp, src);
35310 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35311 }
35312 }
35313
35314 /* Expand an Altivec constant permutation for little endian mode.
35315 OP0 and OP1 are the input vectors and TARGET is the output vector.
35316 SEL specifies the constant permutation vector.
35317
35318 There are two issues: First, the two input operands must be
35319 swapped so that together they form a double-wide array in LE
35320 order. Second, the vperm instruction has surprising behavior
35321 in LE mode: it interprets the elements of the source vectors
35322 in BE mode ("left to right") and interprets the elements of
35323 the destination vector in LE mode ("right to left"). To
35324 correct for this, we must subtract each element of the permute
35325 control vector from 31.
35326
35327 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35328 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35329 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35330 serve as the permute control vector. Then, in BE mode,
35331
35332 vperm 9,10,11,12
35333
35334 places the desired result in vr9. However, in LE mode the
35335 vector contents will be
35336
35337 vr10 = 00000003 00000002 00000001 00000000
35338 vr11 = 00000007 00000006 00000005 00000004
35339
35340 The result of the vperm using the same permute control vector is
35341
35342 vr9 = 05000000 07000000 01000000 03000000
35343
35344 That is, the leftmost 4 bytes of vr10 are interpreted as the
35345 source for the rightmost 4 bytes of vr9, and so on.
35346
35347 If we change the permute control vector to
35348
35349 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35350
35351 and issue
35352
35353 vperm 9,11,10,12
35354
35355 we get the desired
35356
35357 vr9 = 00000006 00000004 00000002 00000000. */
35358
35359 static void
35360 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35361 const vec_perm_indices &sel)
35362 {
35363 unsigned int i;
35364 rtx perm[16];
35365 rtx constv, unspec;
35366
35367 /* Unpack and adjust the constant selector. */
35368 for (i = 0; i < 16; ++i)
35369 {
35370 unsigned int elt = 31 - (sel[i] & 31);
35371 perm[i] = GEN_INT (elt);
35372 }
35373
35374 /* Expand to a permute, swapping the inputs and using the
35375 adjusted selector. */
35376 if (!REG_P (op0))
35377 op0 = force_reg (V16QImode, op0);
35378 if (!REG_P (op1))
35379 op1 = force_reg (V16QImode, op1);
35380
35381 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35382 constv = force_reg (V16QImode, constv);
35383 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35384 UNSPEC_VPERM);
35385 if (!REG_P (target))
35386 {
35387 rtx tmp = gen_reg_rtx (V16QImode);
35388 emit_move_insn (tmp, unspec);
35389 unspec = tmp;
35390 }
35391
35392 emit_move_insn (target, unspec);
35393 }
35394
35395 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35396 permute control vector. But here it's not a constant, so we must
35397 generate a vector NAND or NOR to do the adjustment. */
35398
35399 void
35400 altivec_expand_vec_perm_le (rtx operands[4])
35401 {
35402 rtx notx, iorx, unspec;
35403 rtx target = operands[0];
35404 rtx op0 = operands[1];
35405 rtx op1 = operands[2];
35406 rtx sel = operands[3];
35407 rtx tmp = target;
35408 rtx norreg = gen_reg_rtx (V16QImode);
35409 machine_mode mode = GET_MODE (target);
35410
35411 /* Get everything in regs so the pattern matches. */
35412 if (!REG_P (op0))
35413 op0 = force_reg (mode, op0);
35414 if (!REG_P (op1))
35415 op1 = force_reg (mode, op1);
35416 if (!REG_P (sel))
35417 sel = force_reg (V16QImode, sel);
35418 if (!REG_P (target))
35419 tmp = gen_reg_rtx (mode);
35420
35421 if (TARGET_P9_VECTOR)
35422 {
35423 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35424 UNSPEC_VPERMR);
35425 }
35426 else
35427 {
35428 /* Invert the selector with a VNAND if available, else a VNOR.
35429 The VNAND is preferred for future fusion opportunities. */
35430 notx = gen_rtx_NOT (V16QImode, sel);
35431 iorx = (TARGET_P8_VECTOR
35432 ? gen_rtx_IOR (V16QImode, notx, notx)
35433 : gen_rtx_AND (V16QImode, notx, notx));
35434 emit_insn (gen_rtx_SET (norreg, iorx));
35435
35436 /* Permute with operands reversed and adjusted selector. */
35437 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35438 UNSPEC_VPERM);
35439 }
35440
35441 /* Copy into target, possibly by way of a register. */
35442 if (!REG_P (target))
35443 {
35444 emit_move_insn (tmp, unspec);
35445 unspec = tmp;
35446 }
35447
35448 emit_move_insn (target, unspec);
35449 }
35450
35451 /* Expand an Altivec constant permutation. Return true if we match
35452 an efficient implementation; false to fall back to VPERM.
35453
35454 OP0 and OP1 are the input vectors and TARGET is the output vector.
35455 SEL specifies the constant permutation vector. */
35456
35457 static bool
35458 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35459 const vec_perm_indices &sel)
35460 {
35461 struct altivec_perm_insn {
35462 HOST_WIDE_INT mask;
35463 enum insn_code impl;
35464 unsigned char perm[16];
35465 };
35466 static const struct altivec_perm_insn patterns[] = {
35467 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35468 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35469 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35470 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35471 { OPTION_MASK_ALTIVEC,
35472 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35473 : CODE_FOR_altivec_vmrglb_direct),
35474 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35475 { OPTION_MASK_ALTIVEC,
35476 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35477 : CODE_FOR_altivec_vmrglh_direct),
35478 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35479 { OPTION_MASK_ALTIVEC,
35480 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35481 : CODE_FOR_altivec_vmrglw_direct),
35482 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35483 { OPTION_MASK_ALTIVEC,
35484 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35485 : CODE_FOR_altivec_vmrghb_direct),
35486 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35487 { OPTION_MASK_ALTIVEC,
35488 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35489 : CODE_FOR_altivec_vmrghh_direct),
35490 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35491 { OPTION_MASK_ALTIVEC,
35492 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35493 : CODE_FOR_altivec_vmrghw_direct),
35494 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35495 { OPTION_MASK_P8_VECTOR,
35496 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35497 : CODE_FOR_p8_vmrgow_v4sf_direct),
35498 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35499 { OPTION_MASK_P8_VECTOR,
35500 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35501 : CODE_FOR_p8_vmrgew_v4sf_direct),
35502 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35503 };
35504
35505 unsigned int i, j, elt, which;
35506 unsigned char perm[16];
35507 rtx x;
35508 bool one_vec;
35509
35510 /* Unpack the constant selector. */
35511 for (i = which = 0; i < 16; ++i)
35512 {
35513 elt = sel[i] & 31;
35514 which |= (elt < 16 ? 1 : 2);
35515 perm[i] = elt;
35516 }
35517
35518 /* Simplify the constant selector based on operands. */
35519 switch (which)
35520 {
35521 default:
35522 gcc_unreachable ();
35523
35524 case 3:
35525 one_vec = false;
35526 if (!rtx_equal_p (op0, op1))
35527 break;
35528 /* FALLTHRU */
35529
35530 case 2:
35531 for (i = 0; i < 16; ++i)
35532 perm[i] &= 15;
35533 op0 = op1;
35534 one_vec = true;
35535 break;
35536
35537 case 1:
35538 op1 = op0;
35539 one_vec = true;
35540 break;
35541 }
35542
35543 /* Look for splat patterns. */
35544 if (one_vec)
35545 {
35546 elt = perm[0];
35547
35548 for (i = 0; i < 16; ++i)
35549 if (perm[i] != elt)
35550 break;
35551 if (i == 16)
35552 {
35553 if (!BYTES_BIG_ENDIAN)
35554 elt = 15 - elt;
35555 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35556 return true;
35557 }
35558
35559 if (elt % 2 == 0)
35560 {
35561 for (i = 0; i < 16; i += 2)
35562 if (perm[i] != elt || perm[i + 1] != elt + 1)
35563 break;
35564 if (i == 16)
35565 {
35566 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35567 x = gen_reg_rtx (V8HImode);
35568 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35569 GEN_INT (field)));
35570 emit_move_insn (target, gen_lowpart (V16QImode, x));
35571 return true;
35572 }
35573 }
35574
35575 if (elt % 4 == 0)
35576 {
35577 for (i = 0; i < 16; i += 4)
35578 if (perm[i] != elt
35579 || perm[i + 1] != elt + 1
35580 || perm[i + 2] != elt + 2
35581 || perm[i + 3] != elt + 3)
35582 break;
35583 if (i == 16)
35584 {
35585 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35586 x = gen_reg_rtx (V4SImode);
35587 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35588 GEN_INT (field)));
35589 emit_move_insn (target, gen_lowpart (V16QImode, x));
35590 return true;
35591 }
35592 }
35593 }
35594
35595 /* Look for merge and pack patterns. */
35596 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35597 {
35598 bool swapped;
35599
35600 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35601 continue;
35602
35603 elt = patterns[j].perm[0];
35604 if (perm[0] == elt)
35605 swapped = false;
35606 else if (perm[0] == elt + 16)
35607 swapped = true;
35608 else
35609 continue;
35610 for (i = 1; i < 16; ++i)
35611 {
35612 elt = patterns[j].perm[i];
35613 if (swapped)
35614 elt = (elt >= 16 ? elt - 16 : elt + 16);
35615 else if (one_vec && elt >= 16)
35616 elt -= 16;
35617 if (perm[i] != elt)
35618 break;
35619 }
35620 if (i == 16)
35621 {
35622 enum insn_code icode = patterns[j].impl;
35623 machine_mode omode = insn_data[icode].operand[0].mode;
35624 machine_mode imode = insn_data[icode].operand[1].mode;
35625
35626 /* For little-endian, don't use vpkuwum and vpkuhum if the
35627 underlying vector type is not V4SI and V8HI, respectively.
35628 For example, using vpkuwum with a V8HI picks up the even
35629 halfwords (BE numbering) when the even halfwords (LE
35630 numbering) are what we need. */
35631 if (!BYTES_BIG_ENDIAN
35632 && icode == CODE_FOR_altivec_vpkuwum_direct
35633 && ((GET_CODE (op0) == REG
35634 && GET_MODE (op0) != V4SImode)
35635 || (GET_CODE (op0) == SUBREG
35636 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35637 continue;
35638 if (!BYTES_BIG_ENDIAN
35639 && icode == CODE_FOR_altivec_vpkuhum_direct
35640 && ((GET_CODE (op0) == REG
35641 && GET_MODE (op0) != V8HImode)
35642 || (GET_CODE (op0) == SUBREG
35643 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35644 continue;
35645
35646 /* For little-endian, the two input operands must be swapped
35647 (or swapped back) to ensure proper right-to-left numbering
35648 from 0 to 2N-1. */
35649 if (swapped ^ !BYTES_BIG_ENDIAN)
35650 std::swap (op0, op1);
35651 if (imode != V16QImode)
35652 {
35653 op0 = gen_lowpart (imode, op0);
35654 op1 = gen_lowpart (imode, op1);
35655 }
35656 if (omode == V16QImode)
35657 x = target;
35658 else
35659 x = gen_reg_rtx (omode);
35660 emit_insn (GEN_FCN (icode) (x, op0, op1));
35661 if (omode != V16QImode)
35662 emit_move_insn (target, gen_lowpart (V16QImode, x));
35663 return true;
35664 }
35665 }
35666
35667 if (!BYTES_BIG_ENDIAN)
35668 {
35669 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35670 return true;
35671 }
35672
35673 return false;
35674 }
35675
35676 /* Expand a VSX Permute Doubleword constant permutation.
35677 Return true if we match an efficient implementation. */
35678
35679 static bool
35680 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35681 unsigned char perm0, unsigned char perm1)
35682 {
35683 rtx x;
35684
35685 /* If both selectors come from the same operand, fold to single op. */
35686 if ((perm0 & 2) == (perm1 & 2))
35687 {
35688 if (perm0 & 2)
35689 op0 = op1;
35690 else
35691 op1 = op0;
35692 }
35693 /* If both operands are equal, fold to simpler permutation. */
35694 if (rtx_equal_p (op0, op1))
35695 {
35696 perm0 = perm0 & 1;
35697 perm1 = (perm1 & 1) + 2;
35698 }
35699 /* If the first selector comes from the second operand, swap. */
35700 else if (perm0 & 2)
35701 {
35702 if (perm1 & 2)
35703 return false;
35704 perm0 -= 2;
35705 perm1 += 2;
35706 std::swap (op0, op1);
35707 }
35708 /* If the second selector does not come from the second operand, fail. */
35709 else if ((perm1 & 2) == 0)
35710 return false;
35711
35712 /* Success! */
35713 if (target != NULL)
35714 {
35715 machine_mode vmode, dmode;
35716 rtvec v;
35717
35718 vmode = GET_MODE (target);
35719 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35720 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35721 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35722 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35723 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35724 emit_insn (gen_rtx_SET (target, x));
35725 }
35726 return true;
35727 }
35728
35729 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35730
35731 static bool
35732 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35733 rtx op1, const vec_perm_indices &sel)
35734 {
35735 bool testing_p = !target;
35736
35737 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35738 if (TARGET_ALTIVEC && testing_p)
35739 return true;
35740
35741 /* Check for ps_merge* or xxpermdi insns. */
35742 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35743 {
35744 if (testing_p)
35745 {
35746 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35747 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35748 }
35749 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35750 return true;
35751 }
35752
35753 if (TARGET_ALTIVEC)
35754 {
35755 /* Force the target-independent code to lower to V16QImode. */
35756 if (vmode != V16QImode)
35757 return false;
35758 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35759 return true;
35760 }
35761
35762 return false;
35763 }
35764
35765 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35766 OP0 and OP1 are the input vectors and TARGET is the output vector.
35767 PERM specifies the constant permutation vector. */
35768
35769 static void
35770 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35771 machine_mode vmode, const vec_perm_builder &perm)
35772 {
35773 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35774 if (x != target)
35775 emit_move_insn (target, x);
35776 }
35777
35778 /* Expand an extract even operation. */
35779
35780 void
35781 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35782 {
35783 machine_mode vmode = GET_MODE (target);
35784 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35785 vec_perm_builder perm (nelt, nelt, 1);
35786
35787 for (i = 0; i < nelt; i++)
35788 perm.quick_push (i * 2);
35789
35790 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35791 }
35792
35793 /* Expand a vector interleave operation. */
35794
35795 void
35796 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35797 {
35798 machine_mode vmode = GET_MODE (target);
35799 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35800 vec_perm_builder perm (nelt, nelt, 1);
35801
35802 high = (highp ? 0 : nelt / 2);
35803 for (i = 0; i < nelt / 2; i++)
35804 {
35805 perm.quick_push (i + high);
35806 perm.quick_push (i + nelt + high);
35807 }
35808
35809 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35810 }
35811
35812 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35813 void
35814 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35815 {
35816 HOST_WIDE_INT hwi_scale (scale);
35817 REAL_VALUE_TYPE r_pow;
35818 rtvec v = rtvec_alloc (2);
35819 rtx elt;
35820 rtx scale_vec = gen_reg_rtx (V2DFmode);
35821 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35822 elt = const_double_from_real_value (r_pow, DFmode);
35823 RTVEC_ELT (v, 0) = elt;
35824 RTVEC_ELT (v, 1) = elt;
35825 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35826 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35827 }
35828
35829 /* Return an RTX representing where to find the function value of a
35830 function returning MODE. */
35831 static rtx
35832 rs6000_complex_function_value (machine_mode mode)
35833 {
35834 unsigned int regno;
35835 rtx r1, r2;
35836 machine_mode inner = GET_MODE_INNER (mode);
35837 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35838
35839 if (TARGET_FLOAT128_TYPE
35840 && (mode == KCmode
35841 || (mode == TCmode && TARGET_IEEEQUAD)))
35842 regno = ALTIVEC_ARG_RETURN;
35843
35844 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35845 regno = FP_ARG_RETURN;
35846
35847 else
35848 {
35849 regno = GP_ARG_RETURN;
35850
35851 /* 32-bit is OK since it'll go in r3/r4. */
35852 if (TARGET_32BIT && inner_bytes >= 4)
35853 return gen_rtx_REG (mode, regno);
35854 }
35855
35856 if (inner_bytes >= 8)
35857 return gen_rtx_REG (mode, regno);
35858
35859 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35860 const0_rtx);
35861 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35862 GEN_INT (inner_bytes));
35863 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35864 }
35865
35866 /* Return an rtx describing a return value of MODE as a PARALLEL
35867 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35868 stride REG_STRIDE. */
35869
35870 static rtx
35871 rs6000_parallel_return (machine_mode mode,
35872 int n_elts, machine_mode elt_mode,
35873 unsigned int regno, unsigned int reg_stride)
35874 {
35875 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35876
35877 int i;
35878 for (i = 0; i < n_elts; i++)
35879 {
35880 rtx r = gen_rtx_REG (elt_mode, regno);
35881 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35882 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35883 regno += reg_stride;
35884 }
35885
35886 return par;
35887 }
35888
35889 /* Target hook for TARGET_FUNCTION_VALUE.
35890
35891 An integer value is in r3 and a floating-point value is in fp1,
35892 unless -msoft-float. */
35893
35894 static rtx
35895 rs6000_function_value (const_tree valtype,
35896 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35897 bool outgoing ATTRIBUTE_UNUSED)
35898 {
35899 machine_mode mode;
35900 unsigned int regno;
35901 machine_mode elt_mode;
35902 int n_elts;
35903
35904 /* Special handling for structs in darwin64. */
35905 if (TARGET_MACHO
35906 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35907 {
35908 CUMULATIVE_ARGS valcum;
35909 rtx valret;
35910
35911 valcum.words = 0;
35912 valcum.fregno = FP_ARG_MIN_REG;
35913 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35914 /* Do a trial code generation as if this were going to be passed as
35915 an argument; if any part goes in memory, we return NULL. */
35916 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35917 if (valret)
35918 return valret;
35919 /* Otherwise fall through to standard ABI rules. */
35920 }
35921
35922 mode = TYPE_MODE (valtype);
35923
35924 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35925 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35926 {
35927 int first_reg, n_regs;
35928
35929 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35930 {
35931 /* _Decimal128 must use even/odd register pairs. */
35932 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35933 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35934 }
35935 else
35936 {
35937 first_reg = ALTIVEC_ARG_RETURN;
35938 n_regs = 1;
35939 }
35940
35941 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35942 }
35943
35944 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35945 if (TARGET_32BIT && TARGET_POWERPC64)
35946 switch (mode)
35947 {
35948 default:
35949 break;
35950 case E_DImode:
35951 case E_SCmode:
35952 case E_DCmode:
35953 case E_TCmode:
35954 int count = GET_MODE_SIZE (mode) / 4;
35955 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35956 }
35957
35958 if ((INTEGRAL_TYPE_P (valtype)
35959 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35960 || POINTER_TYPE_P (valtype))
35961 mode = TARGET_32BIT ? SImode : DImode;
35962
35963 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35964 /* _Decimal128 must use an even/odd register pair. */
35965 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35966 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35967 && !FLOAT128_VECTOR_P (mode))
35968 regno = FP_ARG_RETURN;
35969 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35970 && targetm.calls.split_complex_arg)
35971 return rs6000_complex_function_value (mode);
35972 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35973 return register is used in both cases, and we won't see V2DImode/V2DFmode
35974 for pure altivec, combine the two cases. */
35975 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35976 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35977 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35978 regno = ALTIVEC_ARG_RETURN;
35979 else
35980 regno = GP_ARG_RETURN;
35981
35982 return gen_rtx_REG (mode, regno);
35983 }
35984
35985 /* Define how to find the value returned by a library function
35986 assuming the value has mode MODE. */
35987 rtx
35988 rs6000_libcall_value (machine_mode mode)
35989 {
35990 unsigned int regno;
35991
35992 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35993 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35994 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35995
35996 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35997 /* _Decimal128 must use an even/odd register pair. */
35998 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35999 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
36000 regno = FP_ARG_RETURN;
36001 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36002 return register is used in both cases, and we won't see V2DImode/V2DFmode
36003 for pure altivec, combine the two cases. */
36004 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36005 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36006 regno = ALTIVEC_ARG_RETURN;
36007 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36008 return rs6000_complex_function_value (mode);
36009 else
36010 regno = GP_ARG_RETURN;
36011
36012 return gen_rtx_REG (mode, regno);
36013 }
36014
36015 /* Compute register pressure classes. We implement the target hook to avoid
36016 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36017 lead to incorrect estimates of number of available registers and therefor
36018 increased register pressure/spill. */
36019 static int
36020 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36021 {
36022 int n;
36023
36024 n = 0;
36025 pressure_classes[n++] = GENERAL_REGS;
36026 if (TARGET_VSX)
36027 pressure_classes[n++] = VSX_REGS;
36028 else
36029 {
36030 if (TARGET_ALTIVEC)
36031 pressure_classes[n++] = ALTIVEC_REGS;
36032 if (TARGET_HARD_FLOAT)
36033 pressure_classes[n++] = FLOAT_REGS;
36034 }
36035 pressure_classes[n++] = CR_REGS;
36036 pressure_classes[n++] = SPECIAL_REGS;
36037
36038 return n;
36039 }
36040
36041 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36042 Frame pointer elimination is automatically handled.
36043
36044 For the RS/6000, if frame pointer elimination is being done, we would like
36045 to convert ap into fp, not sp.
36046
36047 We need r30 if -mminimal-toc was specified, and there are constant pool
36048 references. */
36049
36050 static bool
36051 rs6000_can_eliminate (const int from, const int to)
36052 {
36053 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36054 ? ! frame_pointer_needed
36055 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36056 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36057 || constant_pool_empty_p ()
36058 : true);
36059 }
36060
36061 /* Define the offset between two registers, FROM to be eliminated and its
36062 replacement TO, at the start of a routine. */
36063 HOST_WIDE_INT
36064 rs6000_initial_elimination_offset (int from, int to)
36065 {
36066 rs6000_stack_t *info = rs6000_stack_info ();
36067 HOST_WIDE_INT offset;
36068
36069 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36070 offset = info->push_p ? 0 : -info->total_size;
36071 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36072 {
36073 offset = info->push_p ? 0 : -info->total_size;
36074 if (FRAME_GROWS_DOWNWARD)
36075 offset += info->fixed_size + info->vars_size + info->parm_size;
36076 }
36077 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36078 offset = FRAME_GROWS_DOWNWARD
36079 ? info->fixed_size + info->vars_size + info->parm_size
36080 : 0;
36081 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36082 offset = info->total_size;
36083 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36084 offset = info->push_p ? info->total_size : 0;
36085 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36086 offset = 0;
36087 else
36088 gcc_unreachable ();
36089
36090 return offset;
36091 }
36092
36093 /* Fill in sizes of registers used by unwinder. */
36094
36095 static void
36096 rs6000_init_dwarf_reg_sizes_extra (tree address)
36097 {
36098 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36099 {
36100 int i;
36101 machine_mode mode = TYPE_MODE (char_type_node);
36102 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36103 rtx mem = gen_rtx_MEM (BLKmode, addr);
36104 rtx value = gen_int_mode (16, mode);
36105
36106 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36107 The unwinder still needs to know the size of Altivec registers. */
36108
36109 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36110 {
36111 int column = DWARF_REG_TO_UNWIND_COLUMN
36112 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36113 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36114
36115 emit_move_insn (adjust_address (mem, mode, offset), value);
36116 }
36117 }
36118 }
36119
36120 /* Map internal gcc register numbers to debug format register numbers.
36121 FORMAT specifies the type of debug register number to use:
36122 0 -- debug information, except for frame-related sections
36123 1 -- DWARF .debug_frame section
36124 2 -- DWARF .eh_frame section */
36125
36126 unsigned int
36127 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36128 {
36129 /* Except for the above, we use the internal number for non-DWARF
36130 debug information, and also for .eh_frame. */
36131 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36132 return regno;
36133
36134 /* On some platforms, we use the standard DWARF register
36135 numbering for .debug_info and .debug_frame. */
36136 #ifdef RS6000_USE_DWARF_NUMBERING
36137 if (regno <= 63)
36138 return regno;
36139 if (regno == LR_REGNO)
36140 return 108;
36141 if (regno == CTR_REGNO)
36142 return 109;
36143 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36144 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36145 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36146 to the DWARF reg for CR. */
36147 if (format == 1 && regno == CR2_REGNO)
36148 return 64;
36149 if (CR_REGNO_P (regno))
36150 return regno - CR0_REGNO + 86;
36151 if (regno == CA_REGNO)
36152 return 101; /* XER */
36153 if (ALTIVEC_REGNO_P (regno))
36154 return regno - FIRST_ALTIVEC_REGNO + 1124;
36155 if (regno == VRSAVE_REGNO)
36156 return 356;
36157 if (regno == VSCR_REGNO)
36158 return 67;
36159 #endif
36160 return regno;
36161 }
36162
36163 /* target hook eh_return_filter_mode */
36164 static scalar_int_mode
36165 rs6000_eh_return_filter_mode (void)
36166 {
36167 return TARGET_32BIT ? SImode : word_mode;
36168 }
36169
36170 /* Target hook for translate_mode_attribute. */
36171 static machine_mode
36172 rs6000_translate_mode_attribute (machine_mode mode)
36173 {
36174 if ((FLOAT128_IEEE_P (mode)
36175 && ieee128_float_type_node == long_double_type_node)
36176 || (FLOAT128_IBM_P (mode)
36177 && ibm128_float_type_node == long_double_type_node))
36178 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36179 return mode;
36180 }
36181
36182 /* Target hook for scalar_mode_supported_p. */
36183 static bool
36184 rs6000_scalar_mode_supported_p (scalar_mode mode)
36185 {
36186 /* -m32 does not support TImode. This is the default, from
36187 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36188 same ABI as for -m32. But default_scalar_mode_supported_p allows
36189 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36190 for -mpowerpc64. */
36191 if (TARGET_32BIT && mode == TImode)
36192 return false;
36193
36194 if (DECIMAL_FLOAT_MODE_P (mode))
36195 return default_decimal_float_supported_p ();
36196 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36197 return true;
36198 else
36199 return default_scalar_mode_supported_p (mode);
36200 }
36201
36202 /* Target hook for vector_mode_supported_p. */
36203 static bool
36204 rs6000_vector_mode_supported_p (machine_mode mode)
36205 {
36206 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36207 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36208 double-double. */
36209 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36210 return true;
36211
36212 else
36213 return false;
36214 }
36215
36216 /* Target hook for floatn_mode. */
36217 static opt_scalar_float_mode
36218 rs6000_floatn_mode (int n, bool extended)
36219 {
36220 if (extended)
36221 {
36222 switch (n)
36223 {
36224 case 32:
36225 return DFmode;
36226
36227 case 64:
36228 if (TARGET_FLOAT128_TYPE)
36229 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36230 else
36231 return opt_scalar_float_mode ();
36232
36233 case 128:
36234 return opt_scalar_float_mode ();
36235
36236 default:
36237 /* Those are the only valid _FloatNx types. */
36238 gcc_unreachable ();
36239 }
36240 }
36241 else
36242 {
36243 switch (n)
36244 {
36245 case 32:
36246 return SFmode;
36247
36248 case 64:
36249 return DFmode;
36250
36251 case 128:
36252 if (TARGET_FLOAT128_TYPE)
36253 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36254 else
36255 return opt_scalar_float_mode ();
36256
36257 default:
36258 return opt_scalar_float_mode ();
36259 }
36260 }
36261
36262 }
36263
36264 /* Target hook for c_mode_for_suffix. */
36265 static machine_mode
36266 rs6000_c_mode_for_suffix (char suffix)
36267 {
36268 if (TARGET_FLOAT128_TYPE)
36269 {
36270 if (suffix == 'q' || suffix == 'Q')
36271 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36272
36273 /* At the moment, we are not defining a suffix for IBM extended double.
36274 If/when the default for -mabi=ieeelongdouble is changed, and we want
36275 to support __ibm128 constants in legacy library code, we may need to
36276 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36277 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36278 __float80 constants. */
36279 }
36280
36281 return VOIDmode;
36282 }
36283
36284 /* Target hook for invalid_arg_for_unprototyped_fn. */
36285 static const char *
36286 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36287 {
36288 return (!rs6000_darwin64_abi
36289 && typelist == 0
36290 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36291 && (funcdecl == NULL_TREE
36292 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36293 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36294 ? N_("AltiVec argument passed to unprototyped function")
36295 : NULL;
36296 }
36297
36298 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36299 setup by using __stack_chk_fail_local hidden function instead of
36300 calling __stack_chk_fail directly. Otherwise it is better to call
36301 __stack_chk_fail directly. */
36302
36303 static tree ATTRIBUTE_UNUSED
36304 rs6000_stack_protect_fail (void)
36305 {
36306 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36307 ? default_hidden_stack_protect_fail ()
36308 : default_external_stack_protect_fail ();
36309 }
36310
36311 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36312
36313 #if TARGET_ELF
36314 static unsigned HOST_WIDE_INT
36315 rs6000_asan_shadow_offset (void)
36316 {
36317 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36318 }
36319 #endif
36320 \f
36321 /* Mask options that we want to support inside of attribute((target)) and
36322 #pragma GCC target operations. Note, we do not include things like
36323 64/32-bit, endianness, hard/soft floating point, etc. that would have
36324 different calling sequences. */
36325
36326 struct rs6000_opt_mask {
36327 const char *name; /* option name */
36328 HOST_WIDE_INT mask; /* mask to set */
36329 bool invert; /* invert sense of mask */
36330 bool valid_target; /* option is a target option */
36331 };
36332
36333 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36334 {
36335 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36336 { "cmpb", OPTION_MASK_CMPB, false, true },
36337 { "crypto", OPTION_MASK_CRYPTO, false, true },
36338 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36339 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36340 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36341 false, true },
36342 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36343 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36344 { "fprnd", OPTION_MASK_FPRND, false, true },
36345 { "hard-dfp", OPTION_MASK_DFP, false, true },
36346 { "htm", OPTION_MASK_HTM, false, true },
36347 { "isel", OPTION_MASK_ISEL, false, true },
36348 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36349 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36350 { "modulo", OPTION_MASK_MODULO, false, true },
36351 { "mulhw", OPTION_MASK_MULHW, false, true },
36352 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36353 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36354 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36355 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36356 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36357 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36358 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36359 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36360 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36361 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36362 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36363 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36364 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36365 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36366 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36367 { "string", 0, false, true },
36368 { "update", OPTION_MASK_NO_UPDATE, true , true },
36369 { "vsx", OPTION_MASK_VSX, false, true },
36370 #ifdef OPTION_MASK_64BIT
36371 #if TARGET_AIX_OS
36372 { "aix64", OPTION_MASK_64BIT, false, false },
36373 { "aix32", OPTION_MASK_64BIT, true, false },
36374 #else
36375 { "64", OPTION_MASK_64BIT, false, false },
36376 { "32", OPTION_MASK_64BIT, true, false },
36377 #endif
36378 #endif
36379 #ifdef OPTION_MASK_EABI
36380 { "eabi", OPTION_MASK_EABI, false, false },
36381 #endif
36382 #ifdef OPTION_MASK_LITTLE_ENDIAN
36383 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36384 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36385 #endif
36386 #ifdef OPTION_MASK_RELOCATABLE
36387 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36388 #endif
36389 #ifdef OPTION_MASK_STRICT_ALIGN
36390 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36391 #endif
36392 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36393 { "string", 0, false, false },
36394 };
36395
36396 /* Builtin mask mapping for printing the flags. */
36397 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36398 {
36399 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36400 { "vsx", RS6000_BTM_VSX, false, false },
36401 { "fre", RS6000_BTM_FRE, false, false },
36402 { "fres", RS6000_BTM_FRES, false, false },
36403 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36404 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36405 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36406 { "cell", RS6000_BTM_CELL, false, false },
36407 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36408 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36409 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36410 { "crypto", RS6000_BTM_CRYPTO, false, false },
36411 { "htm", RS6000_BTM_HTM, false, false },
36412 { "hard-dfp", RS6000_BTM_DFP, false, false },
36413 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36414 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36415 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36416 { "float128", RS6000_BTM_FLOAT128, false, false },
36417 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36418 };
36419
36420 /* Option variables that we want to support inside attribute((target)) and
36421 #pragma GCC target operations. */
36422
36423 struct rs6000_opt_var {
36424 const char *name; /* option name */
36425 size_t global_offset; /* offset of the option in global_options. */
36426 size_t target_offset; /* offset of the option in target options. */
36427 };
36428
36429 static struct rs6000_opt_var const rs6000_opt_vars[] =
36430 {
36431 { "friz",
36432 offsetof (struct gcc_options, x_TARGET_FRIZ),
36433 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36434 { "avoid-indexed-addresses",
36435 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36436 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36437 { "longcall",
36438 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36439 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36440 { "optimize-swaps",
36441 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36442 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36443 { "allow-movmisalign",
36444 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36445 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36446 { "sched-groups",
36447 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36448 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36449 { "always-hint",
36450 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36451 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36452 { "align-branch-targets",
36453 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36454 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36455 { "tls-markers",
36456 offsetof (struct gcc_options, x_tls_markers),
36457 offsetof (struct cl_target_option, x_tls_markers), },
36458 { "sched-prolog",
36459 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36460 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36461 { "sched-epilog",
36462 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36463 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36464 { "speculate-indirect-jumps",
36465 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36466 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36467 };
36468
36469 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36470 parsing. Return true if there were no errors. */
36471
36472 static bool
36473 rs6000_inner_target_options (tree args, bool attr_p)
36474 {
36475 bool ret = true;
36476
36477 if (args == NULL_TREE)
36478 ;
36479
36480 else if (TREE_CODE (args) == STRING_CST)
36481 {
36482 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36483 char *q;
36484
36485 while ((q = strtok (p, ",")) != NULL)
36486 {
36487 bool error_p = false;
36488 bool not_valid_p = false;
36489 const char *cpu_opt = NULL;
36490
36491 p = NULL;
36492 if (strncmp (q, "cpu=", 4) == 0)
36493 {
36494 int cpu_index = rs6000_cpu_name_lookup (q+4);
36495 if (cpu_index >= 0)
36496 rs6000_cpu_index = cpu_index;
36497 else
36498 {
36499 error_p = true;
36500 cpu_opt = q+4;
36501 }
36502 }
36503 else if (strncmp (q, "tune=", 5) == 0)
36504 {
36505 int tune_index = rs6000_cpu_name_lookup (q+5);
36506 if (tune_index >= 0)
36507 rs6000_tune_index = tune_index;
36508 else
36509 {
36510 error_p = true;
36511 cpu_opt = q+5;
36512 }
36513 }
36514 else
36515 {
36516 size_t i;
36517 bool invert = false;
36518 char *r = q;
36519
36520 error_p = true;
36521 if (strncmp (r, "no-", 3) == 0)
36522 {
36523 invert = true;
36524 r += 3;
36525 }
36526
36527 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36528 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36529 {
36530 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36531
36532 if (!rs6000_opt_masks[i].valid_target)
36533 not_valid_p = true;
36534 else
36535 {
36536 error_p = false;
36537 rs6000_isa_flags_explicit |= mask;
36538
36539 /* VSX needs altivec, so -mvsx automagically sets
36540 altivec and disables -mavoid-indexed-addresses. */
36541 if (!invert)
36542 {
36543 if (mask == OPTION_MASK_VSX)
36544 {
36545 mask |= OPTION_MASK_ALTIVEC;
36546 TARGET_AVOID_XFORM = 0;
36547 }
36548 }
36549
36550 if (rs6000_opt_masks[i].invert)
36551 invert = !invert;
36552
36553 if (invert)
36554 rs6000_isa_flags &= ~mask;
36555 else
36556 rs6000_isa_flags |= mask;
36557 }
36558 break;
36559 }
36560
36561 if (error_p && !not_valid_p)
36562 {
36563 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36564 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36565 {
36566 size_t j = rs6000_opt_vars[i].global_offset;
36567 *((int *) ((char *)&global_options + j)) = !invert;
36568 error_p = false;
36569 not_valid_p = false;
36570 break;
36571 }
36572 }
36573 }
36574
36575 if (error_p)
36576 {
36577 const char *eprefix, *esuffix;
36578
36579 ret = false;
36580 if (attr_p)
36581 {
36582 eprefix = "__attribute__((__target__(";
36583 esuffix = ")))";
36584 }
36585 else
36586 {
36587 eprefix = "#pragma GCC target ";
36588 esuffix = "";
36589 }
36590
36591 if (cpu_opt)
36592 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36593 q, esuffix);
36594 else if (not_valid_p)
36595 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36596 else
36597 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36598 }
36599 }
36600 }
36601
36602 else if (TREE_CODE (args) == TREE_LIST)
36603 {
36604 do
36605 {
36606 tree value = TREE_VALUE (args);
36607 if (value)
36608 {
36609 bool ret2 = rs6000_inner_target_options (value, attr_p);
36610 if (!ret2)
36611 ret = false;
36612 }
36613 args = TREE_CHAIN (args);
36614 }
36615 while (args != NULL_TREE);
36616 }
36617
36618 else
36619 {
36620 error ("attribute %<target%> argument not a string");
36621 return false;
36622 }
36623
36624 return ret;
36625 }
36626
36627 /* Print out the target options as a list for -mdebug=target. */
36628
36629 static void
36630 rs6000_debug_target_options (tree args, const char *prefix)
36631 {
36632 if (args == NULL_TREE)
36633 fprintf (stderr, "%s<NULL>", prefix);
36634
36635 else if (TREE_CODE (args) == STRING_CST)
36636 {
36637 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36638 char *q;
36639
36640 while ((q = strtok (p, ",")) != NULL)
36641 {
36642 p = NULL;
36643 fprintf (stderr, "%s\"%s\"", prefix, q);
36644 prefix = ", ";
36645 }
36646 }
36647
36648 else if (TREE_CODE (args) == TREE_LIST)
36649 {
36650 do
36651 {
36652 tree value = TREE_VALUE (args);
36653 if (value)
36654 {
36655 rs6000_debug_target_options (value, prefix);
36656 prefix = ", ";
36657 }
36658 args = TREE_CHAIN (args);
36659 }
36660 while (args != NULL_TREE);
36661 }
36662
36663 else
36664 gcc_unreachable ();
36665
36666 return;
36667 }
36668
36669 \f
36670 /* Hook to validate attribute((target("..."))). */
36671
36672 static bool
36673 rs6000_valid_attribute_p (tree fndecl,
36674 tree ARG_UNUSED (name),
36675 tree args,
36676 int flags)
36677 {
36678 struct cl_target_option cur_target;
36679 bool ret;
36680 tree old_optimize;
36681 tree new_target, new_optimize;
36682 tree func_optimize;
36683
36684 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36685
36686 if (TARGET_DEBUG_TARGET)
36687 {
36688 tree tname = DECL_NAME (fndecl);
36689 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36690 if (tname)
36691 fprintf (stderr, "function: %.*s\n",
36692 (int) IDENTIFIER_LENGTH (tname),
36693 IDENTIFIER_POINTER (tname));
36694 else
36695 fprintf (stderr, "function: unknown\n");
36696
36697 fprintf (stderr, "args:");
36698 rs6000_debug_target_options (args, " ");
36699 fprintf (stderr, "\n");
36700
36701 if (flags)
36702 fprintf (stderr, "flags: 0x%x\n", flags);
36703
36704 fprintf (stderr, "--------------------\n");
36705 }
36706
36707 /* attribute((target("default"))) does nothing, beyond
36708 affecting multi-versioning. */
36709 if (TREE_VALUE (args)
36710 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36711 && TREE_CHAIN (args) == NULL_TREE
36712 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36713 return true;
36714
36715 old_optimize = build_optimization_node (&global_options);
36716 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36717
36718 /* If the function changed the optimization levels as well as setting target
36719 options, start with the optimizations specified. */
36720 if (func_optimize && func_optimize != old_optimize)
36721 cl_optimization_restore (&global_options,
36722 TREE_OPTIMIZATION (func_optimize));
36723
36724 /* The target attributes may also change some optimization flags, so update
36725 the optimization options if necessary. */
36726 cl_target_option_save (&cur_target, &global_options);
36727 rs6000_cpu_index = rs6000_tune_index = -1;
36728 ret = rs6000_inner_target_options (args, true);
36729
36730 /* Set up any additional state. */
36731 if (ret)
36732 {
36733 ret = rs6000_option_override_internal (false);
36734 new_target = build_target_option_node (&global_options);
36735 }
36736 else
36737 new_target = NULL;
36738
36739 new_optimize = build_optimization_node (&global_options);
36740
36741 if (!new_target)
36742 ret = false;
36743
36744 else if (fndecl)
36745 {
36746 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36747
36748 if (old_optimize != new_optimize)
36749 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36750 }
36751
36752 cl_target_option_restore (&global_options, &cur_target);
36753
36754 if (old_optimize != new_optimize)
36755 cl_optimization_restore (&global_options,
36756 TREE_OPTIMIZATION (old_optimize));
36757
36758 return ret;
36759 }
36760
36761 \f
36762 /* Hook to validate the current #pragma GCC target and set the state, and
36763 update the macros based on what was changed. If ARGS is NULL, then
36764 POP_TARGET is used to reset the options. */
36765
36766 bool
36767 rs6000_pragma_target_parse (tree args, tree pop_target)
36768 {
36769 tree prev_tree = build_target_option_node (&global_options);
36770 tree cur_tree;
36771 struct cl_target_option *prev_opt, *cur_opt;
36772 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36773 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36774
36775 if (TARGET_DEBUG_TARGET)
36776 {
36777 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36778 fprintf (stderr, "args:");
36779 rs6000_debug_target_options (args, " ");
36780 fprintf (stderr, "\n");
36781
36782 if (pop_target)
36783 {
36784 fprintf (stderr, "pop_target:\n");
36785 debug_tree (pop_target);
36786 }
36787 else
36788 fprintf (stderr, "pop_target: <NULL>\n");
36789
36790 fprintf (stderr, "--------------------\n");
36791 }
36792
36793 if (! args)
36794 {
36795 cur_tree = ((pop_target)
36796 ? pop_target
36797 : target_option_default_node);
36798 cl_target_option_restore (&global_options,
36799 TREE_TARGET_OPTION (cur_tree));
36800 }
36801 else
36802 {
36803 rs6000_cpu_index = rs6000_tune_index = -1;
36804 if (!rs6000_inner_target_options (args, false)
36805 || !rs6000_option_override_internal (false)
36806 || (cur_tree = build_target_option_node (&global_options))
36807 == NULL_TREE)
36808 {
36809 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36810 fprintf (stderr, "invalid pragma\n");
36811
36812 return false;
36813 }
36814 }
36815
36816 target_option_current_node = cur_tree;
36817 rs6000_activate_target_options (target_option_current_node);
36818
36819 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36820 change the macros that are defined. */
36821 if (rs6000_target_modify_macros_ptr)
36822 {
36823 prev_opt = TREE_TARGET_OPTION (prev_tree);
36824 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36825 prev_flags = prev_opt->x_rs6000_isa_flags;
36826
36827 cur_opt = TREE_TARGET_OPTION (cur_tree);
36828 cur_flags = cur_opt->x_rs6000_isa_flags;
36829 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36830
36831 diff_bumask = (prev_bumask ^ cur_bumask);
36832 diff_flags = (prev_flags ^ cur_flags);
36833
36834 if ((diff_flags != 0) || (diff_bumask != 0))
36835 {
36836 /* Delete old macros. */
36837 rs6000_target_modify_macros_ptr (false,
36838 prev_flags & diff_flags,
36839 prev_bumask & diff_bumask);
36840
36841 /* Define new macros. */
36842 rs6000_target_modify_macros_ptr (true,
36843 cur_flags & diff_flags,
36844 cur_bumask & diff_bumask);
36845 }
36846 }
36847
36848 return true;
36849 }
36850
36851 \f
36852 /* Remember the last target of rs6000_set_current_function. */
36853 static GTY(()) tree rs6000_previous_fndecl;
36854
36855 /* Restore target's globals from NEW_TREE and invalidate the
36856 rs6000_previous_fndecl cache. */
36857
36858 void
36859 rs6000_activate_target_options (tree new_tree)
36860 {
36861 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36862 if (TREE_TARGET_GLOBALS (new_tree))
36863 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36864 else if (new_tree == target_option_default_node)
36865 restore_target_globals (&default_target_globals);
36866 else
36867 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36868 rs6000_previous_fndecl = NULL_TREE;
36869 }
36870
36871 /* Establish appropriate back-end context for processing the function
36872 FNDECL. The argument might be NULL to indicate processing at top
36873 level, outside of any function scope. */
36874 static void
36875 rs6000_set_current_function (tree fndecl)
36876 {
36877 if (TARGET_DEBUG_TARGET)
36878 {
36879 fprintf (stderr, "\n==================== rs6000_set_current_function");
36880
36881 if (fndecl)
36882 fprintf (stderr, ", fndecl %s (%p)",
36883 (DECL_NAME (fndecl)
36884 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36885 : "<unknown>"), (void *)fndecl);
36886
36887 if (rs6000_previous_fndecl)
36888 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36889
36890 fprintf (stderr, "\n");
36891 }
36892
36893 /* Only change the context if the function changes. This hook is called
36894 several times in the course of compiling a function, and we don't want to
36895 slow things down too much or call target_reinit when it isn't safe. */
36896 if (fndecl == rs6000_previous_fndecl)
36897 return;
36898
36899 tree old_tree;
36900 if (rs6000_previous_fndecl == NULL_TREE)
36901 old_tree = target_option_current_node;
36902 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36903 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36904 else
36905 old_tree = target_option_default_node;
36906
36907 tree new_tree;
36908 if (fndecl == NULL_TREE)
36909 {
36910 if (old_tree != target_option_current_node)
36911 new_tree = target_option_current_node;
36912 else
36913 new_tree = NULL_TREE;
36914 }
36915 else
36916 {
36917 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36918 if (new_tree == NULL_TREE)
36919 new_tree = target_option_default_node;
36920 }
36921
36922 if (TARGET_DEBUG_TARGET)
36923 {
36924 if (new_tree)
36925 {
36926 fprintf (stderr, "\nnew fndecl target specific options:\n");
36927 debug_tree (new_tree);
36928 }
36929
36930 if (old_tree)
36931 {
36932 fprintf (stderr, "\nold fndecl target specific options:\n");
36933 debug_tree (old_tree);
36934 }
36935
36936 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36937 fprintf (stderr, "--------------------\n");
36938 }
36939
36940 if (new_tree && old_tree != new_tree)
36941 rs6000_activate_target_options (new_tree);
36942
36943 if (fndecl)
36944 rs6000_previous_fndecl = fndecl;
36945 }
36946
36947 \f
36948 /* Save the current options */
36949
36950 static void
36951 rs6000_function_specific_save (struct cl_target_option *ptr,
36952 struct gcc_options *opts)
36953 {
36954 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36955 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36956 }
36957
36958 /* Restore the current options */
36959
36960 static void
36961 rs6000_function_specific_restore (struct gcc_options *opts,
36962 struct cl_target_option *ptr)
36963
36964 {
36965 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36966 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36967 (void) rs6000_option_override_internal (false);
36968 }
36969
36970 /* Print the current options */
36971
36972 static void
36973 rs6000_function_specific_print (FILE *file, int indent,
36974 struct cl_target_option *ptr)
36975 {
36976 rs6000_print_isa_options (file, indent, "Isa options set",
36977 ptr->x_rs6000_isa_flags);
36978
36979 rs6000_print_isa_options (file, indent, "Isa options explicit",
36980 ptr->x_rs6000_isa_flags_explicit);
36981 }
36982
36983 /* Helper function to print the current isa or misc options on a line. */
36984
36985 static void
36986 rs6000_print_options_internal (FILE *file,
36987 int indent,
36988 const char *string,
36989 HOST_WIDE_INT flags,
36990 const char *prefix,
36991 const struct rs6000_opt_mask *opts,
36992 size_t num_elements)
36993 {
36994 size_t i;
36995 size_t start_column = 0;
36996 size_t cur_column;
36997 size_t max_column = 120;
36998 size_t prefix_len = strlen (prefix);
36999 size_t comma_len = 0;
37000 const char *comma = "";
37001
37002 if (indent)
37003 start_column += fprintf (file, "%*s", indent, "");
37004
37005 if (!flags)
37006 {
37007 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37008 return;
37009 }
37010
37011 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37012
37013 /* Print the various mask options. */
37014 cur_column = start_column;
37015 for (i = 0; i < num_elements; i++)
37016 {
37017 bool invert = opts[i].invert;
37018 const char *name = opts[i].name;
37019 const char *no_str = "";
37020 HOST_WIDE_INT mask = opts[i].mask;
37021 size_t len = comma_len + prefix_len + strlen (name);
37022
37023 if (!invert)
37024 {
37025 if ((flags & mask) == 0)
37026 {
37027 no_str = "no-";
37028 len += sizeof ("no-") - 1;
37029 }
37030
37031 flags &= ~mask;
37032 }
37033
37034 else
37035 {
37036 if ((flags & mask) != 0)
37037 {
37038 no_str = "no-";
37039 len += sizeof ("no-") - 1;
37040 }
37041
37042 flags |= mask;
37043 }
37044
37045 cur_column += len;
37046 if (cur_column > max_column)
37047 {
37048 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37049 cur_column = start_column + len;
37050 comma = "";
37051 }
37052
37053 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37054 comma = ", ";
37055 comma_len = sizeof (", ") - 1;
37056 }
37057
37058 fputs ("\n", file);
37059 }
37060
37061 /* Helper function to print the current isa options on a line. */
37062
37063 static void
37064 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37065 HOST_WIDE_INT flags)
37066 {
37067 rs6000_print_options_internal (file, indent, string, flags, "-m",
37068 &rs6000_opt_masks[0],
37069 ARRAY_SIZE (rs6000_opt_masks));
37070 }
37071
37072 static void
37073 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37074 HOST_WIDE_INT flags)
37075 {
37076 rs6000_print_options_internal (file, indent, string, flags, "",
37077 &rs6000_builtin_mask_names[0],
37078 ARRAY_SIZE (rs6000_builtin_mask_names));
37079 }
37080
37081 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37082 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37083 -mupper-regs-df, etc.).
37084
37085 If the user used -mno-power8-vector, we need to turn off all of the implicit
37086 ISA 2.07 and 3.0 options that relate to the vector unit.
37087
37088 If the user used -mno-power9-vector, we need to turn off all of the implicit
37089 ISA 3.0 options that relate to the vector unit.
37090
37091 This function does not handle explicit options such as the user specifying
37092 -mdirect-move. These are handled in rs6000_option_override_internal, and
37093 the appropriate error is given if needed.
37094
37095 We return a mask of all of the implicit options that should not be enabled
37096 by default. */
37097
37098 static HOST_WIDE_INT
37099 rs6000_disable_incompatible_switches (void)
37100 {
37101 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37102 size_t i, j;
37103
37104 static const struct {
37105 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37106 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37107 const char *const name; /* name of the switch. */
37108 } flags[] = {
37109 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37110 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37111 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37112 };
37113
37114 for (i = 0; i < ARRAY_SIZE (flags); i++)
37115 {
37116 HOST_WIDE_INT no_flag = flags[i].no_flag;
37117
37118 if ((rs6000_isa_flags & no_flag) == 0
37119 && (rs6000_isa_flags_explicit & no_flag) != 0)
37120 {
37121 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37122 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37123 & rs6000_isa_flags
37124 & dep_flags);
37125
37126 if (set_flags)
37127 {
37128 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37129 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37130 {
37131 set_flags &= ~rs6000_opt_masks[j].mask;
37132 error ("%<-mno-%s%> turns off %<-m%s%>",
37133 flags[i].name,
37134 rs6000_opt_masks[j].name);
37135 }
37136
37137 gcc_assert (!set_flags);
37138 }
37139
37140 rs6000_isa_flags &= ~dep_flags;
37141 ignore_masks |= no_flag | dep_flags;
37142 }
37143 }
37144
37145 return ignore_masks;
37146 }
37147
37148 \f
37149 /* Helper function for printing the function name when debugging. */
37150
37151 static const char *
37152 get_decl_name (tree fn)
37153 {
37154 tree name;
37155
37156 if (!fn)
37157 return "<null>";
37158
37159 name = DECL_NAME (fn);
37160 if (!name)
37161 return "<no-name>";
37162
37163 return IDENTIFIER_POINTER (name);
37164 }
37165
37166 /* Return the clone id of the target we are compiling code for in a target
37167 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37168 the priority list for the target clones (ordered from lowest to
37169 highest). */
37170
37171 static int
37172 rs6000_clone_priority (tree fndecl)
37173 {
37174 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37175 HOST_WIDE_INT isa_masks;
37176 int ret = CLONE_DEFAULT;
37177 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37178 const char *attrs_str = NULL;
37179
37180 attrs = TREE_VALUE (TREE_VALUE (attrs));
37181 attrs_str = TREE_STRING_POINTER (attrs);
37182
37183 /* Return priority zero for default function. Return the ISA needed for the
37184 function if it is not the default. */
37185 if (strcmp (attrs_str, "default") != 0)
37186 {
37187 if (fn_opts == NULL_TREE)
37188 fn_opts = target_option_default_node;
37189
37190 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37191 isa_masks = rs6000_isa_flags;
37192 else
37193 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37194
37195 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37196 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37197 break;
37198 }
37199
37200 if (TARGET_DEBUG_TARGET)
37201 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37202 get_decl_name (fndecl), ret);
37203
37204 return ret;
37205 }
37206
37207 /* This compares the priority of target features in function DECL1 and DECL2.
37208 It returns positive value if DECL1 is higher priority, negative value if
37209 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37210 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37211
37212 static int
37213 rs6000_compare_version_priority (tree decl1, tree decl2)
37214 {
37215 int priority1 = rs6000_clone_priority (decl1);
37216 int priority2 = rs6000_clone_priority (decl2);
37217 int ret = priority1 - priority2;
37218
37219 if (TARGET_DEBUG_TARGET)
37220 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37221 get_decl_name (decl1), get_decl_name (decl2), ret);
37222
37223 return ret;
37224 }
37225
37226 /* Make a dispatcher declaration for the multi-versioned function DECL.
37227 Calls to DECL function will be replaced with calls to the dispatcher
37228 by the front-end. Returns the decl of the dispatcher function. */
37229
37230 static tree
37231 rs6000_get_function_versions_dispatcher (void *decl)
37232 {
37233 tree fn = (tree) decl;
37234 struct cgraph_node *node = NULL;
37235 struct cgraph_node *default_node = NULL;
37236 struct cgraph_function_version_info *node_v = NULL;
37237 struct cgraph_function_version_info *first_v = NULL;
37238
37239 tree dispatch_decl = NULL;
37240
37241 struct cgraph_function_version_info *default_version_info = NULL;
37242 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37243
37244 if (TARGET_DEBUG_TARGET)
37245 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37246 get_decl_name (fn));
37247
37248 node = cgraph_node::get (fn);
37249 gcc_assert (node != NULL);
37250
37251 node_v = node->function_version ();
37252 gcc_assert (node_v != NULL);
37253
37254 if (node_v->dispatcher_resolver != NULL)
37255 return node_v->dispatcher_resolver;
37256
37257 /* Find the default version and make it the first node. */
37258 first_v = node_v;
37259 /* Go to the beginning of the chain. */
37260 while (first_v->prev != NULL)
37261 first_v = first_v->prev;
37262
37263 default_version_info = first_v;
37264 while (default_version_info != NULL)
37265 {
37266 const tree decl2 = default_version_info->this_node->decl;
37267 if (is_function_default_version (decl2))
37268 break;
37269 default_version_info = default_version_info->next;
37270 }
37271
37272 /* If there is no default node, just return NULL. */
37273 if (default_version_info == NULL)
37274 return NULL;
37275
37276 /* Make default info the first node. */
37277 if (first_v != default_version_info)
37278 {
37279 default_version_info->prev->next = default_version_info->next;
37280 if (default_version_info->next)
37281 default_version_info->next->prev = default_version_info->prev;
37282 first_v->prev = default_version_info;
37283 default_version_info->next = first_v;
37284 default_version_info->prev = NULL;
37285 }
37286
37287 default_node = default_version_info->this_node;
37288
37289 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37290 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37291 "target_clones attribute needs GLIBC (2.23 and newer) that "
37292 "exports hardware capability bits");
37293 #else
37294
37295 if (targetm.has_ifunc_p ())
37296 {
37297 struct cgraph_function_version_info *it_v = NULL;
37298 struct cgraph_node *dispatcher_node = NULL;
37299 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37300
37301 /* Right now, the dispatching is done via ifunc. */
37302 dispatch_decl = make_dispatcher_decl (default_node->decl);
37303
37304 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37305 gcc_assert (dispatcher_node != NULL);
37306 dispatcher_node->dispatcher_function = 1;
37307 dispatcher_version_info
37308 = dispatcher_node->insert_new_function_version ();
37309 dispatcher_version_info->next = default_version_info;
37310 dispatcher_node->definition = 1;
37311
37312 /* Set the dispatcher for all the versions. */
37313 it_v = default_version_info;
37314 while (it_v != NULL)
37315 {
37316 it_v->dispatcher_resolver = dispatch_decl;
37317 it_v = it_v->next;
37318 }
37319 }
37320 else
37321 {
37322 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37323 "multiversioning needs ifunc which is not supported "
37324 "on this target");
37325 }
37326 #endif
37327
37328 return dispatch_decl;
37329 }
37330
37331 /* Make the resolver function decl to dispatch the versions of a multi-
37332 versioned function, DEFAULT_DECL. Create an empty basic block in the
37333 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37334 function. */
37335
37336 static tree
37337 make_resolver_func (const tree default_decl,
37338 const tree dispatch_decl,
37339 basic_block *empty_bb)
37340 {
37341 /* Make the resolver function static. The resolver function returns
37342 void *. */
37343 tree decl_name = clone_function_name (default_decl, "resolver");
37344 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37345 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37346 tree decl = build_fn_decl (resolver_name, type);
37347 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37348
37349 DECL_NAME (decl) = decl_name;
37350 TREE_USED (decl) = 1;
37351 DECL_ARTIFICIAL (decl) = 1;
37352 DECL_IGNORED_P (decl) = 0;
37353 TREE_PUBLIC (decl) = 0;
37354 DECL_UNINLINABLE (decl) = 1;
37355
37356 /* Resolver is not external, body is generated. */
37357 DECL_EXTERNAL (decl) = 0;
37358 DECL_EXTERNAL (dispatch_decl) = 0;
37359
37360 DECL_CONTEXT (decl) = NULL_TREE;
37361 DECL_INITIAL (decl) = make_node (BLOCK);
37362 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37363
37364 /* Build result decl and add to function_decl. */
37365 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37366 DECL_ARTIFICIAL (t) = 1;
37367 DECL_IGNORED_P (t) = 1;
37368 DECL_RESULT (decl) = t;
37369
37370 gimplify_function_tree (decl);
37371 push_cfun (DECL_STRUCT_FUNCTION (decl));
37372 *empty_bb = init_lowered_empty_function (decl, false,
37373 profile_count::uninitialized ());
37374
37375 cgraph_node::add_new_function (decl, true);
37376 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37377
37378 pop_cfun ();
37379
37380 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37381 DECL_ATTRIBUTES (dispatch_decl)
37382 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37383
37384 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37385
37386 return decl;
37387 }
37388
37389 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37390 return a pointer to VERSION_DECL if we are running on a machine that
37391 supports the index CLONE_ISA hardware architecture bits. This function will
37392 be called during version dispatch to decide which function version to
37393 execute. It returns the basic block at the end, to which more conditions
37394 can be added. */
37395
37396 static basic_block
37397 add_condition_to_bb (tree function_decl, tree version_decl,
37398 int clone_isa, basic_block new_bb)
37399 {
37400 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37401
37402 gcc_assert (new_bb != NULL);
37403 gimple_seq gseq = bb_seq (new_bb);
37404
37405
37406 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37407 build_fold_addr_expr (version_decl));
37408 tree result_var = create_tmp_var (ptr_type_node);
37409 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37410 gimple *return_stmt = gimple_build_return (result_var);
37411
37412 if (clone_isa == CLONE_DEFAULT)
37413 {
37414 gimple_seq_add_stmt (&gseq, convert_stmt);
37415 gimple_seq_add_stmt (&gseq, return_stmt);
37416 set_bb_seq (new_bb, gseq);
37417 gimple_set_bb (convert_stmt, new_bb);
37418 gimple_set_bb (return_stmt, new_bb);
37419 pop_cfun ();
37420 return new_bb;
37421 }
37422
37423 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37424 tree cond_var = create_tmp_var (bool_int_type_node);
37425 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37426 const char *arg_str = rs6000_clone_map[clone_isa].name;
37427 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37428 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37429 gimple_call_set_lhs (call_cond_stmt, cond_var);
37430
37431 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37432 gimple_set_bb (call_cond_stmt, new_bb);
37433 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37434
37435 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37436 NULL_TREE, NULL_TREE);
37437 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37438 gimple_set_bb (if_else_stmt, new_bb);
37439 gimple_seq_add_stmt (&gseq, if_else_stmt);
37440
37441 gimple_seq_add_stmt (&gseq, convert_stmt);
37442 gimple_seq_add_stmt (&gseq, return_stmt);
37443 set_bb_seq (new_bb, gseq);
37444
37445 basic_block bb1 = new_bb;
37446 edge e12 = split_block (bb1, if_else_stmt);
37447 basic_block bb2 = e12->dest;
37448 e12->flags &= ~EDGE_FALLTHRU;
37449 e12->flags |= EDGE_TRUE_VALUE;
37450
37451 edge e23 = split_block (bb2, return_stmt);
37452 gimple_set_bb (convert_stmt, bb2);
37453 gimple_set_bb (return_stmt, bb2);
37454
37455 basic_block bb3 = e23->dest;
37456 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37457
37458 remove_edge (e23);
37459 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37460
37461 pop_cfun ();
37462 return bb3;
37463 }
37464
37465 /* This function generates the dispatch function for multi-versioned functions.
37466 DISPATCH_DECL is the function which will contain the dispatch logic.
37467 FNDECLS are the function choices for dispatch, and is a tree chain.
37468 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37469 code is generated. */
37470
37471 static int
37472 dispatch_function_versions (tree dispatch_decl,
37473 void *fndecls_p,
37474 basic_block *empty_bb)
37475 {
37476 int ix;
37477 tree ele;
37478 vec<tree> *fndecls;
37479 tree clones[CLONE_MAX];
37480
37481 if (TARGET_DEBUG_TARGET)
37482 fputs ("dispatch_function_versions, top\n", stderr);
37483
37484 gcc_assert (dispatch_decl != NULL
37485 && fndecls_p != NULL
37486 && empty_bb != NULL);
37487
37488 /* fndecls_p is actually a vector. */
37489 fndecls = static_cast<vec<tree> *> (fndecls_p);
37490
37491 /* At least one more version other than the default. */
37492 gcc_assert (fndecls->length () >= 2);
37493
37494 /* The first version in the vector is the default decl. */
37495 memset ((void *) clones, '\0', sizeof (clones));
37496 clones[CLONE_DEFAULT] = (*fndecls)[0];
37497
37498 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37499 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37500 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37501 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37502 to insert the code here to do the call. */
37503
37504 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37505 {
37506 int priority = rs6000_clone_priority (ele);
37507 if (!clones[priority])
37508 clones[priority] = ele;
37509 }
37510
37511 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37512 if (clones[ix])
37513 {
37514 if (TARGET_DEBUG_TARGET)
37515 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37516 ix, get_decl_name (clones[ix]));
37517
37518 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37519 *empty_bb);
37520 }
37521
37522 return 0;
37523 }
37524
37525 /* Generate the dispatching code body to dispatch multi-versioned function
37526 DECL. The target hook is called to process the "target" attributes and
37527 provide the code to dispatch the right function at run-time. NODE points
37528 to the dispatcher decl whose body will be created. */
37529
37530 static tree
37531 rs6000_generate_version_dispatcher_body (void *node_p)
37532 {
37533 tree resolver;
37534 basic_block empty_bb;
37535 struct cgraph_node *node = (cgraph_node *) node_p;
37536 struct cgraph_function_version_info *ninfo = node->function_version ();
37537
37538 if (ninfo->dispatcher_resolver)
37539 return ninfo->dispatcher_resolver;
37540
37541 /* node is going to be an alias, so remove the finalized bit. */
37542 node->definition = false;
37543
37544 /* The first version in the chain corresponds to the default version. */
37545 ninfo->dispatcher_resolver = resolver
37546 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37547
37548 if (TARGET_DEBUG_TARGET)
37549 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37550 get_decl_name (resolver));
37551
37552 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37553 auto_vec<tree, 2> fn_ver_vec;
37554
37555 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37556 vinfo;
37557 vinfo = vinfo->next)
37558 {
37559 struct cgraph_node *version = vinfo->this_node;
37560 /* Check for virtual functions here again, as by this time it should
37561 have been determined if this function needs a vtable index or
37562 not. This happens for methods in derived classes that override
37563 virtual methods in base classes but are not explicitly marked as
37564 virtual. */
37565 if (DECL_VINDEX (version->decl))
37566 sorry ("Virtual function multiversioning not supported");
37567
37568 fn_ver_vec.safe_push (version->decl);
37569 }
37570
37571 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37572 cgraph_edge::rebuild_edges ();
37573 pop_cfun ();
37574 return resolver;
37575 }
37576
37577 \f
37578 /* Hook to determine if one function can safely inline another. */
37579
37580 static bool
37581 rs6000_can_inline_p (tree caller, tree callee)
37582 {
37583 bool ret = false;
37584 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37585 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37586
37587 /* If callee has no option attributes, then it is ok to inline. */
37588 if (!callee_tree)
37589 ret = true;
37590
37591 /* If caller has no option attributes, but callee does then it is not ok to
37592 inline. */
37593 else if (!caller_tree)
37594 ret = false;
37595
37596 else
37597 {
37598 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37599 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37600
37601 /* Callee's options should a subset of the caller's, i.e. a vsx function
37602 can inline an altivec function but a non-vsx function can't inline a
37603 vsx function. */
37604 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37605 == callee_opts->x_rs6000_isa_flags)
37606 ret = true;
37607 }
37608
37609 if (TARGET_DEBUG_TARGET)
37610 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37611 get_decl_name (caller), get_decl_name (callee),
37612 (ret ? "can" : "cannot"));
37613
37614 return ret;
37615 }
37616 \f
37617 /* Allocate a stack temp and fixup the address so it meets the particular
37618 memory requirements (either offetable or REG+REG addressing). */
37619
37620 rtx
37621 rs6000_allocate_stack_temp (machine_mode mode,
37622 bool offsettable_p,
37623 bool reg_reg_p)
37624 {
37625 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37626 rtx addr = XEXP (stack, 0);
37627 int strict_p = reload_completed;
37628
37629 if (!legitimate_indirect_address_p (addr, strict_p))
37630 {
37631 if (offsettable_p
37632 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37633 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37634
37635 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37636 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37637 }
37638
37639 return stack;
37640 }
37641
37642 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37643 convert to such a form to deal with memory reference instructions
37644 like STFIWX and LDBRX that only take reg+reg addressing. */
37645
37646 rtx
37647 rs6000_force_indexed_or_indirect_mem (rtx x)
37648 {
37649 machine_mode mode = GET_MODE (x);
37650
37651 gcc_assert (MEM_P (x));
37652 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37653 {
37654 rtx addr = XEXP (x, 0);
37655 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37656 {
37657 rtx reg = XEXP (addr, 0);
37658 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37659 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37660 gcc_assert (REG_P (reg));
37661 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37662 addr = reg;
37663 }
37664 else if (GET_CODE (addr) == PRE_MODIFY)
37665 {
37666 rtx reg = XEXP (addr, 0);
37667 rtx expr = XEXP (addr, 1);
37668 gcc_assert (REG_P (reg));
37669 gcc_assert (GET_CODE (expr) == PLUS);
37670 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37671 addr = reg;
37672 }
37673
37674 x = replace_equiv_address (x, force_reg (Pmode, addr));
37675 }
37676
37677 return x;
37678 }
37679
37680 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37681
37682 On the RS/6000, all integer constants are acceptable, most won't be valid
37683 for particular insns, though. Only easy FP constants are acceptable. */
37684
37685 static bool
37686 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37687 {
37688 if (TARGET_ELF && tls_referenced_p (x))
37689 return false;
37690
37691 if (CONST_DOUBLE_P (x))
37692 return easy_fp_constant (x, mode);
37693
37694 if (GET_CODE (x) == CONST_VECTOR)
37695 return easy_vector_constant (x, mode);
37696
37697 return true;
37698 }
37699
37700 \f
37701 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37702
37703 static bool
37704 chain_already_loaded (rtx_insn *last)
37705 {
37706 for (; last != NULL; last = PREV_INSN (last))
37707 {
37708 if (NONJUMP_INSN_P (last))
37709 {
37710 rtx patt = PATTERN (last);
37711
37712 if (GET_CODE (patt) == SET)
37713 {
37714 rtx lhs = XEXP (patt, 0);
37715
37716 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37717 return true;
37718 }
37719 }
37720 }
37721 return false;
37722 }
37723
37724 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37725
37726 void
37727 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37728 {
37729 rtx func = func_desc;
37730 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37731 rtx toc_load = NULL_RTX;
37732 rtx toc_restore = NULL_RTX;
37733 rtx func_addr;
37734 rtx abi_reg = NULL_RTX;
37735 rtx call[4];
37736 int n_call;
37737 rtx insn;
37738
37739 if (global_tlsarg)
37740 tlsarg = global_tlsarg;
37741
37742 /* Handle longcall attributes. */
37743 if ((INTVAL (cookie) & CALL_LONG) != 0
37744 && GET_CODE (func_desc) == SYMBOL_REF)
37745 func = rs6000_longcall_ref (func_desc, tlsarg);
37746
37747 /* Handle indirect calls. */
37748 if (GET_CODE (func) != SYMBOL_REF
37749 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37750 {
37751 /* Save the TOC into its reserved slot before the call,
37752 and prepare to restore it after the call. */
37753 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37754 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37755 gen_rtvec (1, stack_toc_offset),
37756 UNSPEC_TOCSLOT);
37757 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37758
37759 /* Can we optimize saving the TOC in the prologue or
37760 do we need to do it at every call? */
37761 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37762 cfun->machine->save_toc_in_prologue = true;
37763 else
37764 {
37765 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37766 rtx stack_toc_mem = gen_frame_mem (Pmode,
37767 gen_rtx_PLUS (Pmode, stack_ptr,
37768 stack_toc_offset));
37769 MEM_VOLATILE_P (stack_toc_mem) = 1;
37770 if (HAVE_AS_PLTSEQ
37771 && TARGET_TLS_MARKERS
37772 && DEFAULT_ABI == ABI_ELFv2
37773 && GET_CODE (func_desc) == SYMBOL_REF)
37774 {
37775 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37776 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37777 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37778 }
37779 else
37780 emit_move_insn (stack_toc_mem, toc_reg);
37781 }
37782
37783 if (DEFAULT_ABI == ABI_ELFv2)
37784 {
37785 /* A function pointer in the ELFv2 ABI is just a plain address, but
37786 the ABI requires it to be loaded into r12 before the call. */
37787 func_addr = gen_rtx_REG (Pmode, 12);
37788 if (!rtx_equal_p (func_addr, func))
37789 emit_move_insn (func_addr, func);
37790 abi_reg = func_addr;
37791 /* Indirect calls via CTR are strongly preferred over indirect
37792 calls via LR, so move the address there. Needed to mark
37793 this insn for linker plt sequence editing too. */
37794 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37795 if (HAVE_AS_PLTSEQ
37796 && TARGET_TLS_MARKERS
37797 && GET_CODE (func_desc) == SYMBOL_REF)
37798 {
37799 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37800 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37801 emit_insn (gen_rtx_SET (func_addr, mark_func));
37802 v = gen_rtvec (2, func_addr, func_desc);
37803 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37804 }
37805 else
37806 emit_move_insn (func_addr, abi_reg);
37807 }
37808 else
37809 {
37810 /* A function pointer under AIX is a pointer to a data area whose
37811 first word contains the actual address of the function, whose
37812 second word contains a pointer to its TOC, and whose third word
37813 contains a value to place in the static chain register (r11).
37814 Note that if we load the static chain, our "trampoline" need
37815 not have any executable code. */
37816
37817 /* Load up address of the actual function. */
37818 func = force_reg (Pmode, func);
37819 func_addr = gen_reg_rtx (Pmode);
37820 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37821
37822 /* Indirect calls via CTR are strongly preferred over indirect
37823 calls via LR, so move the address there. */
37824 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37825 emit_move_insn (ctr_reg, func_addr);
37826 func_addr = ctr_reg;
37827
37828 /* Prepare to load the TOC of the called function. Note that the
37829 TOC load must happen immediately before the actual call so
37830 that unwinding the TOC registers works correctly. See the
37831 comment in frob_update_context. */
37832 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37833 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37834 gen_rtx_PLUS (Pmode, func,
37835 func_toc_offset));
37836 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37837
37838 /* If we have a static chain, load it up. But, if the call was
37839 originally direct, the 3rd word has not been written since no
37840 trampoline has been built, so we ought not to load it, lest we
37841 override a static chain value. */
37842 if (!(GET_CODE (func_desc) == SYMBOL_REF
37843 && SYMBOL_REF_FUNCTION_P (func_desc))
37844 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37845 && !chain_already_loaded (get_current_sequence ()->next->last))
37846 {
37847 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37848 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37849 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37850 gen_rtx_PLUS (Pmode, func,
37851 func_sc_offset));
37852 emit_move_insn (sc_reg, func_sc_mem);
37853 abi_reg = sc_reg;
37854 }
37855 }
37856 }
37857 else
37858 {
37859 /* Direct calls use the TOC: for local calls, the callee will
37860 assume the TOC register is set; for non-local calls, the
37861 PLT stub needs the TOC register. */
37862 abi_reg = toc_reg;
37863 func_addr = func;
37864 }
37865
37866 /* Create the call. */
37867 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37868 if (value != NULL_RTX)
37869 call[0] = gen_rtx_SET (value, call[0]);
37870 n_call = 1;
37871
37872 if (toc_load)
37873 call[n_call++] = toc_load;
37874 if (toc_restore)
37875 call[n_call++] = toc_restore;
37876
37877 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37878
37879 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37880 insn = emit_call_insn (insn);
37881
37882 /* Mention all registers defined by the ABI to hold information
37883 as uses in CALL_INSN_FUNCTION_USAGE. */
37884 if (abi_reg)
37885 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37886 }
37887
37888 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37889
37890 void
37891 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37892 {
37893 rtx call[2];
37894 rtx insn;
37895
37896 gcc_assert (INTVAL (cookie) == 0);
37897
37898 if (global_tlsarg)
37899 tlsarg = global_tlsarg;
37900
37901 /* Create the call. */
37902 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
37903 if (value != NULL_RTX)
37904 call[0] = gen_rtx_SET (value, call[0]);
37905
37906 call[1] = simple_return_rtx;
37907
37908 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37909 insn = emit_call_insn (insn);
37910
37911 /* Note use of the TOC register. */
37912 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37913 }
37914
37915 /* Expand code to perform a call under the SYSV4 ABI. */
37916
37917 void
37918 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37919 {
37920 rtx func = func_desc;
37921 rtx func_addr;
37922 rtx call[3];
37923 rtx insn;
37924 rtx abi_reg = NULL_RTX;
37925
37926 if (global_tlsarg)
37927 tlsarg = global_tlsarg;
37928
37929 /* Handle longcall attributes. */
37930 if ((INTVAL (cookie) & CALL_LONG) != 0
37931 && GET_CODE (func_desc) == SYMBOL_REF)
37932 {
37933 func = rs6000_longcall_ref (func_desc, tlsarg);
37934 /* If the longcall was implemented using PLT16 relocs, then r11
37935 needs to be valid at the call for lazy linking. */
37936 if (HAVE_AS_PLTSEQ
37937 && TARGET_TLS_MARKERS)
37938 abi_reg = func;
37939 }
37940
37941 /* Handle indirect calls. */
37942 if (GET_CODE (func) != SYMBOL_REF)
37943 {
37944 func = force_reg (Pmode, func);
37945
37946 /* Indirect calls via CTR are strongly preferred over indirect
37947 calls via LR, so move the address there. Needed to mark
37948 this insn for linker plt sequence editing too. */
37949 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37950 if (HAVE_AS_PLTSEQ
37951 && TARGET_TLS_MARKERS
37952 && GET_CODE (func_desc) == SYMBOL_REF)
37953 {
37954 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37955 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37956 emit_insn (gen_rtx_SET (func_addr, mark_func));
37957 v = gen_rtvec (2, func_addr, func_desc);
37958 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37959 }
37960 else
37961 emit_move_insn (func_addr, func);
37962 }
37963 else
37964 func_addr = func;
37965
37966 /* Create the call. */
37967 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37968 if (value != NULL_RTX)
37969 call[0] = gen_rtx_SET (value, call[0]);
37970
37971 call[1] = gen_rtx_USE (VOIDmode, cookie);
37972 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37973
37974 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
37975 insn = emit_call_insn (insn);
37976 if (abi_reg)
37977 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37978 }
37979
37980 /* Expand code to perform a sibling call under the SysV4 ABI. */
37981
37982 void
37983 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37984 {
37985 rtx func = func_desc;
37986 rtx func_addr;
37987 rtx call[3];
37988 rtx insn;
37989 rtx abi_reg = NULL_RTX;
37990
37991 if (global_tlsarg)
37992 tlsarg = global_tlsarg;
37993
37994 /* Handle longcall attributes. */
37995 if ((INTVAL (cookie) & CALL_LONG) != 0
37996 && GET_CODE (func_desc) == SYMBOL_REF)
37997 {
37998 func = rs6000_longcall_ref (func_desc, tlsarg);
37999 /* If the longcall was implemented using PLT16 relocs, then r11
38000 needs to be valid at the call for lazy linking. */
38001 if (HAVE_AS_PLTSEQ
38002 && TARGET_TLS_MARKERS)
38003 abi_reg = func;
38004 }
38005
38006 /* Handle indirect calls. */
38007 if (GET_CODE (func) != SYMBOL_REF)
38008 {
38009 func = force_reg (Pmode, func);
38010
38011 /* Indirect sibcalls must go via CTR. Needed to mark
38012 this insn for linker plt sequence editing too. */
38013 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38014 if (HAVE_AS_PLTSEQ
38015 && TARGET_TLS_MARKERS
38016 && GET_CODE (func_desc) == SYMBOL_REF)
38017 {
38018 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38019 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38020 emit_insn (gen_rtx_SET (func_addr, mark_func));
38021 v = gen_rtvec (2, func_addr, func_desc);
38022 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38023 }
38024 else
38025 emit_move_insn (func_addr, func);
38026 }
38027 else
38028 func_addr = func;
38029
38030 /* Create the call. */
38031 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38032 if (value != NULL_RTX)
38033 call[0] = gen_rtx_SET (value, call[0]);
38034
38035 call[1] = gen_rtx_USE (VOIDmode, cookie);
38036 call[2] = simple_return_rtx;
38037
38038 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38039 insn = emit_call_insn (insn);
38040 if (abi_reg)
38041 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38042 }
38043
38044 #if TARGET_MACHO
38045
38046 /* Expand code to perform a call under the Darwin ABI.
38047 Modulo handling of mlongcall, this is much the same as sysv.
38048 if/when the longcall optimisation is removed, we could drop this
38049 code and use the sysv case (taking care to avoid the tls stuff).
38050
38051 We can use this for sibcalls too, if needed. */
38052
38053 void
38054 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38055 rtx cookie, bool sibcall)
38056 {
38057 rtx func = func_desc;
38058 rtx func_addr;
38059 rtx call[3];
38060 rtx insn;
38061 int cookie_val = INTVAL (cookie);
38062 bool make_island = false;
38063
38064 /* Handle longcall attributes, there are two cases for Darwin:
38065 1) Newer linkers are capable of synthesising any branch islands needed.
38066 2) We need a helper branch island synthesised by the compiler.
38067 The second case has mostly been retired and we don't use it for m64.
38068 In fact, it's is an optimisation, we could just indirect as sysv does..
38069 ... however, backwards compatibility for now.
38070 If we're going to use this, then we need to keep the CALL_LONG bit set,
38071 so that we can pick up the special insn form later. */
38072 if ((cookie_val & CALL_LONG) != 0
38073 && GET_CODE (func_desc) == SYMBOL_REF)
38074 {
38075 if (darwin_emit_branch_islands && TARGET_32BIT)
38076 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38077 else
38078 {
38079 /* The linker is capable of doing this, but the user explicitly
38080 asked for -mlongcall, so we'll do the 'normal' version. */
38081 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38082 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38083 }
38084 }
38085
38086 /* Handle indirect calls. */
38087 if (GET_CODE (func) != SYMBOL_REF)
38088 {
38089 func = force_reg (Pmode, func);
38090
38091 /* Indirect calls via CTR are strongly preferred over indirect
38092 calls via LR, and are required for indirect sibcalls, so move
38093 the address there. */
38094 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38095 emit_move_insn (func_addr, func);
38096 }
38097 else
38098 func_addr = func;
38099
38100 /* Create the call. */
38101 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38102 if (value != NULL_RTX)
38103 call[0] = gen_rtx_SET (value, call[0]);
38104
38105 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38106
38107 if (sibcall)
38108 call[2] = simple_return_rtx;
38109 else
38110 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38111
38112 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38113 insn = emit_call_insn (insn);
38114 /* Now we have the debug info in the insn, we can set up the branch island
38115 if we're using one. */
38116 if (make_island)
38117 {
38118 tree funname = get_identifier (XSTR (func_desc, 0));
38119
38120 if (no_previous_def (funname))
38121 {
38122 rtx label_rtx = gen_label_rtx ();
38123 char *label_buf, temp_buf[256];
38124 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38125 CODE_LABEL_NUMBER (label_rtx));
38126 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38127 tree labelname = get_identifier (label_buf);
38128 add_compiler_branch_island (labelname, funname,
38129 insn_line ((const rtx_insn*)insn));
38130 }
38131 }
38132 }
38133 #endif
38134
38135 void
38136 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38137 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38138 {
38139 #if TARGET_MACHO
38140 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38141 #else
38142 gcc_unreachable();
38143 #endif
38144 }
38145
38146
38147 void
38148 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38149 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38150 {
38151 #if TARGET_MACHO
38152 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38153 #else
38154 gcc_unreachable();
38155 #endif
38156 }
38157
38158
38159 /* Return whether we need to always update the saved TOC pointer when we update
38160 the stack pointer. */
38161
38162 static bool
38163 rs6000_save_toc_in_prologue_p (void)
38164 {
38165 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38166 }
38167
38168 #ifdef HAVE_GAS_HIDDEN
38169 # define USE_HIDDEN_LINKONCE 1
38170 #else
38171 # define USE_HIDDEN_LINKONCE 0
38172 #endif
38173
38174 /* Fills in the label name that should be used for a 476 link stack thunk. */
38175
38176 void
38177 get_ppc476_thunk_name (char name[32])
38178 {
38179 gcc_assert (TARGET_LINK_STACK);
38180
38181 if (USE_HIDDEN_LINKONCE)
38182 sprintf (name, "__ppc476.get_thunk");
38183 else
38184 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38185 }
38186
38187 /* This function emits the simple thunk routine that is used to preserve
38188 the link stack on the 476 cpu. */
38189
38190 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38191 static void
38192 rs6000_code_end (void)
38193 {
38194 char name[32];
38195 tree decl;
38196
38197 if (!TARGET_LINK_STACK)
38198 return;
38199
38200 get_ppc476_thunk_name (name);
38201
38202 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38203 build_function_type_list (void_type_node, NULL_TREE));
38204 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38205 NULL_TREE, void_type_node);
38206 TREE_PUBLIC (decl) = 1;
38207 TREE_STATIC (decl) = 1;
38208
38209 #if RS6000_WEAK
38210 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38211 {
38212 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38213 targetm.asm_out.unique_section (decl, 0);
38214 switch_to_section (get_named_section (decl, NULL, 0));
38215 DECL_WEAK (decl) = 1;
38216 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38217 targetm.asm_out.globalize_label (asm_out_file, name);
38218 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38219 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38220 }
38221 else
38222 #endif
38223 {
38224 switch_to_section (text_section);
38225 ASM_OUTPUT_LABEL (asm_out_file, name);
38226 }
38227
38228 DECL_INITIAL (decl) = make_node (BLOCK);
38229 current_function_decl = decl;
38230 allocate_struct_function (decl, false);
38231 init_function_start (decl);
38232 first_function_block_is_cold = false;
38233 /* Make sure unwind info is emitted for the thunk if needed. */
38234 final_start_function (emit_barrier (), asm_out_file, 1);
38235
38236 fputs ("\tblr\n", asm_out_file);
38237
38238 final_end_function ();
38239 init_insn_lengths ();
38240 free_after_compilation (cfun);
38241 set_cfun (NULL);
38242 current_function_decl = NULL;
38243 }
38244
38245 /* Add r30 to hard reg set if the prologue sets it up and it is not
38246 pic_offset_table_rtx. */
38247
38248 static void
38249 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38250 {
38251 if (!TARGET_SINGLE_PIC_BASE
38252 && TARGET_TOC
38253 && TARGET_MINIMAL_TOC
38254 && !constant_pool_empty_p ())
38255 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38256 if (cfun->machine->split_stack_argp_used)
38257 add_to_hard_reg_set (&set->set, Pmode, 12);
38258
38259 /* Make sure the hard reg set doesn't include r2, which was possibly added
38260 via PIC_OFFSET_TABLE_REGNUM. */
38261 if (TARGET_TOC)
38262 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38263 }
38264
38265 \f
38266 /* Helper function for rs6000_split_logical to emit a logical instruction after
38267 spliting the operation to single GPR registers.
38268
38269 DEST is the destination register.
38270 OP1 and OP2 are the input source registers.
38271 CODE is the base operation (AND, IOR, XOR, NOT).
38272 MODE is the machine mode.
38273 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38274 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38275 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38276
38277 static void
38278 rs6000_split_logical_inner (rtx dest,
38279 rtx op1,
38280 rtx op2,
38281 enum rtx_code code,
38282 machine_mode mode,
38283 bool complement_final_p,
38284 bool complement_op1_p,
38285 bool complement_op2_p)
38286 {
38287 rtx bool_rtx;
38288
38289 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38290 if (op2 && GET_CODE (op2) == CONST_INT
38291 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38292 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38293 {
38294 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38295 HOST_WIDE_INT value = INTVAL (op2) & mask;
38296
38297 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38298 if (code == AND)
38299 {
38300 if (value == 0)
38301 {
38302 emit_insn (gen_rtx_SET (dest, const0_rtx));
38303 return;
38304 }
38305
38306 else if (value == mask)
38307 {
38308 if (!rtx_equal_p (dest, op1))
38309 emit_insn (gen_rtx_SET (dest, op1));
38310 return;
38311 }
38312 }
38313
38314 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38315 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38316 else if (code == IOR || code == XOR)
38317 {
38318 if (value == 0)
38319 {
38320 if (!rtx_equal_p (dest, op1))
38321 emit_insn (gen_rtx_SET (dest, op1));
38322 return;
38323 }
38324 }
38325 }
38326
38327 if (code == AND && mode == SImode
38328 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38329 {
38330 emit_insn (gen_andsi3 (dest, op1, op2));
38331 return;
38332 }
38333
38334 if (complement_op1_p)
38335 op1 = gen_rtx_NOT (mode, op1);
38336
38337 if (complement_op2_p)
38338 op2 = gen_rtx_NOT (mode, op2);
38339
38340 /* For canonical RTL, if only one arm is inverted it is the first. */
38341 if (!complement_op1_p && complement_op2_p)
38342 std::swap (op1, op2);
38343
38344 bool_rtx = ((code == NOT)
38345 ? gen_rtx_NOT (mode, op1)
38346 : gen_rtx_fmt_ee (code, mode, op1, op2));
38347
38348 if (complement_final_p)
38349 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38350
38351 emit_insn (gen_rtx_SET (dest, bool_rtx));
38352 }
38353
38354 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38355 operations are split immediately during RTL generation to allow for more
38356 optimizations of the AND/IOR/XOR.
38357
38358 OPERANDS is an array containing the destination and two input operands.
38359 CODE is the base operation (AND, IOR, XOR, NOT).
38360 MODE is the machine mode.
38361 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38362 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38363 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38364 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38365 formation of the AND instructions. */
38366
38367 static void
38368 rs6000_split_logical_di (rtx operands[3],
38369 enum rtx_code code,
38370 bool complement_final_p,
38371 bool complement_op1_p,
38372 bool complement_op2_p)
38373 {
38374 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38375 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38376 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38377 enum hi_lo { hi = 0, lo = 1 };
38378 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38379 size_t i;
38380
38381 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38382 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38383 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38384 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38385
38386 if (code == NOT)
38387 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38388 else
38389 {
38390 if (GET_CODE (operands[2]) != CONST_INT)
38391 {
38392 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38393 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38394 }
38395 else
38396 {
38397 HOST_WIDE_INT value = INTVAL (operands[2]);
38398 HOST_WIDE_INT value_hi_lo[2];
38399
38400 gcc_assert (!complement_final_p);
38401 gcc_assert (!complement_op1_p);
38402 gcc_assert (!complement_op2_p);
38403
38404 value_hi_lo[hi] = value >> 32;
38405 value_hi_lo[lo] = value & lower_32bits;
38406
38407 for (i = 0; i < 2; i++)
38408 {
38409 HOST_WIDE_INT sub_value = value_hi_lo[i];
38410
38411 if (sub_value & sign_bit)
38412 sub_value |= upper_32bits;
38413
38414 op2_hi_lo[i] = GEN_INT (sub_value);
38415
38416 /* If this is an AND instruction, check to see if we need to load
38417 the value in a register. */
38418 if (code == AND && sub_value != -1 && sub_value != 0
38419 && !and_operand (op2_hi_lo[i], SImode))
38420 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38421 }
38422 }
38423 }
38424
38425 for (i = 0; i < 2; i++)
38426 {
38427 /* Split large IOR/XOR operations. */
38428 if ((code == IOR || code == XOR)
38429 && GET_CODE (op2_hi_lo[i]) == CONST_INT
38430 && !complement_final_p
38431 && !complement_op1_p
38432 && !complement_op2_p
38433 && !logical_const_operand (op2_hi_lo[i], SImode))
38434 {
38435 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38436 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38437 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38438 rtx tmp = gen_reg_rtx (SImode);
38439
38440 /* Make sure the constant is sign extended. */
38441 if ((hi_16bits & sign_bit) != 0)
38442 hi_16bits |= upper_32bits;
38443
38444 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38445 code, SImode, false, false, false);
38446
38447 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38448 code, SImode, false, false, false);
38449 }
38450 else
38451 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38452 code, SImode, complement_final_p,
38453 complement_op1_p, complement_op2_p);
38454 }
38455
38456 return;
38457 }
38458
38459 /* Split the insns that make up boolean operations operating on multiple GPR
38460 registers. The boolean MD patterns ensure that the inputs either are
38461 exactly the same as the output registers, or there is no overlap.
38462
38463 OPERANDS is an array containing the destination and two input operands.
38464 CODE is the base operation (AND, IOR, XOR, NOT).
38465 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38466 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38467 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38468
38469 void
38470 rs6000_split_logical (rtx operands[3],
38471 enum rtx_code code,
38472 bool complement_final_p,
38473 bool complement_op1_p,
38474 bool complement_op2_p)
38475 {
38476 machine_mode mode = GET_MODE (operands[0]);
38477 machine_mode sub_mode;
38478 rtx op0, op1, op2;
38479 int sub_size, regno0, regno1, nregs, i;
38480
38481 /* If this is DImode, use the specialized version that can run before
38482 register allocation. */
38483 if (mode == DImode && !TARGET_POWERPC64)
38484 {
38485 rs6000_split_logical_di (operands, code, complement_final_p,
38486 complement_op1_p, complement_op2_p);
38487 return;
38488 }
38489
38490 op0 = operands[0];
38491 op1 = operands[1];
38492 op2 = (code == NOT) ? NULL_RTX : operands[2];
38493 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38494 sub_size = GET_MODE_SIZE (sub_mode);
38495 regno0 = REGNO (op0);
38496 regno1 = REGNO (op1);
38497
38498 gcc_assert (reload_completed);
38499 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38500 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38501
38502 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38503 gcc_assert (nregs > 1);
38504
38505 if (op2 && REG_P (op2))
38506 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38507
38508 for (i = 0; i < nregs; i++)
38509 {
38510 int offset = i * sub_size;
38511 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38512 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38513 rtx sub_op2 = ((code == NOT)
38514 ? NULL_RTX
38515 : simplify_subreg (sub_mode, op2, mode, offset));
38516
38517 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38518 complement_final_p, complement_op1_p,
38519 complement_op2_p);
38520 }
38521
38522 return;
38523 }
38524
38525 \f
38526 /* Return true if the peephole2 can combine a load involving a combination of
38527 an addis instruction and a load with an offset that can be fused together on
38528 a power8. */
38529
38530 bool
38531 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38532 rtx addis_value, /* addis value. */
38533 rtx target, /* target register that is loaded. */
38534 rtx mem) /* bottom part of the memory addr. */
38535 {
38536 rtx addr;
38537 rtx base_reg;
38538
38539 /* Validate arguments. */
38540 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38541 return false;
38542
38543 if (!base_reg_operand (target, GET_MODE (target)))
38544 return false;
38545
38546 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38547 return false;
38548
38549 /* Allow sign/zero extension. */
38550 if (GET_CODE (mem) == ZERO_EXTEND
38551 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38552 mem = XEXP (mem, 0);
38553
38554 if (!MEM_P (mem))
38555 return false;
38556
38557 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38558 return false;
38559
38560 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38561 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38562 return false;
38563
38564 /* Validate that the register used to load the high value is either the
38565 register being loaded, or we can safely replace its use.
38566
38567 This function is only called from the peephole2 pass and we assume that
38568 there are 2 instructions in the peephole (addis and load), so we want to
38569 check if the target register was not used in the memory address and the
38570 register to hold the addis result is dead after the peephole. */
38571 if (REGNO (addis_reg) != REGNO (target))
38572 {
38573 if (reg_mentioned_p (target, mem))
38574 return false;
38575
38576 if (!peep2_reg_dead_p (2, addis_reg))
38577 return false;
38578
38579 /* If the target register being loaded is the stack pointer, we must
38580 avoid loading any other value into it, even temporarily. */
38581 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38582 return false;
38583 }
38584
38585 base_reg = XEXP (addr, 0);
38586 return REGNO (addis_reg) == REGNO (base_reg);
38587 }
38588
38589 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38590 sequence. We adjust the addis register to use the target register. If the
38591 load sign extends, we adjust the code to do the zero extending load, and an
38592 explicit sign extension later since the fusion only covers zero extending
38593 loads.
38594
38595 The operands are:
38596 operands[0] register set with addis (to be replaced with target)
38597 operands[1] value set via addis
38598 operands[2] target register being loaded
38599 operands[3] D-form memory reference using operands[0]. */
38600
38601 void
38602 expand_fusion_gpr_load (rtx *operands)
38603 {
38604 rtx addis_value = operands[1];
38605 rtx target = operands[2];
38606 rtx orig_mem = operands[3];
38607 rtx new_addr, new_mem, orig_addr, offset;
38608 enum rtx_code plus_or_lo_sum;
38609 machine_mode target_mode = GET_MODE (target);
38610 machine_mode extend_mode = target_mode;
38611 machine_mode ptr_mode = Pmode;
38612 enum rtx_code extend = UNKNOWN;
38613
38614 if (GET_CODE (orig_mem) == ZERO_EXTEND
38615 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38616 {
38617 extend = GET_CODE (orig_mem);
38618 orig_mem = XEXP (orig_mem, 0);
38619 target_mode = GET_MODE (orig_mem);
38620 }
38621
38622 gcc_assert (MEM_P (orig_mem));
38623
38624 orig_addr = XEXP (orig_mem, 0);
38625 plus_or_lo_sum = GET_CODE (orig_addr);
38626 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38627
38628 offset = XEXP (orig_addr, 1);
38629 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38630 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38631
38632 if (extend != UNKNOWN)
38633 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38634
38635 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38636 UNSPEC_FUSION_GPR);
38637 emit_insn (gen_rtx_SET (target, new_mem));
38638
38639 if (extend == SIGN_EXTEND)
38640 {
38641 int sub_off = ((BYTES_BIG_ENDIAN)
38642 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38643 : 0);
38644 rtx sign_reg
38645 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38646
38647 emit_insn (gen_rtx_SET (target,
38648 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38649 }
38650
38651 return;
38652 }
38653
38654 /* Emit the addis instruction that will be part of a fused instruction
38655 sequence. */
38656
38657 void
38658 emit_fusion_addis (rtx target, rtx addis_value)
38659 {
38660 rtx fuse_ops[10];
38661 const char *addis_str = NULL;
38662
38663 /* Emit the addis instruction. */
38664 fuse_ops[0] = target;
38665 if (satisfies_constraint_L (addis_value))
38666 {
38667 fuse_ops[1] = addis_value;
38668 addis_str = "lis %0,%v1";
38669 }
38670
38671 else if (GET_CODE (addis_value) == PLUS)
38672 {
38673 rtx op0 = XEXP (addis_value, 0);
38674 rtx op1 = XEXP (addis_value, 1);
38675
38676 if (REG_P (op0) && CONST_INT_P (op1)
38677 && satisfies_constraint_L (op1))
38678 {
38679 fuse_ops[1] = op0;
38680 fuse_ops[2] = op1;
38681 addis_str = "addis %0,%1,%v2";
38682 }
38683 }
38684
38685 else if (GET_CODE (addis_value) == HIGH)
38686 {
38687 rtx value = XEXP (addis_value, 0);
38688 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38689 {
38690 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38691 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38692 if (TARGET_ELF)
38693 addis_str = "addis %0,%2,%1@toc@ha";
38694
38695 else if (TARGET_XCOFF)
38696 addis_str = "addis %0,%1@u(%2)";
38697
38698 else
38699 gcc_unreachable ();
38700 }
38701
38702 else if (GET_CODE (value) == PLUS)
38703 {
38704 rtx op0 = XEXP (value, 0);
38705 rtx op1 = XEXP (value, 1);
38706
38707 if (GET_CODE (op0) == UNSPEC
38708 && XINT (op0, 1) == UNSPEC_TOCREL
38709 && CONST_INT_P (op1))
38710 {
38711 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38712 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38713 fuse_ops[3] = op1;
38714 if (TARGET_ELF)
38715 addis_str = "addis %0,%2,%1+%3@toc@ha";
38716
38717 else if (TARGET_XCOFF)
38718 addis_str = "addis %0,%1+%3@u(%2)";
38719
38720 else
38721 gcc_unreachable ();
38722 }
38723 }
38724
38725 else if (satisfies_constraint_L (value))
38726 {
38727 fuse_ops[1] = value;
38728 addis_str = "lis %0,%v1";
38729 }
38730
38731 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38732 {
38733 fuse_ops[1] = value;
38734 addis_str = "lis %0,%1@ha";
38735 }
38736 }
38737
38738 if (!addis_str)
38739 fatal_insn ("Could not generate addis value for fusion", addis_value);
38740
38741 output_asm_insn (addis_str, fuse_ops);
38742 }
38743
38744 /* Emit a D-form load or store instruction that is the second instruction
38745 of a fusion sequence. */
38746
38747 static void
38748 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38749 {
38750 rtx fuse_ops[10];
38751 char insn_template[80];
38752
38753 fuse_ops[0] = load_reg;
38754 fuse_ops[1] = addis_reg;
38755
38756 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38757 {
38758 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38759 fuse_ops[2] = offset;
38760 output_asm_insn (insn_template, fuse_ops);
38761 }
38762
38763 else if (GET_CODE (offset) == UNSPEC
38764 && XINT (offset, 1) == UNSPEC_TOCREL)
38765 {
38766 if (TARGET_ELF)
38767 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38768
38769 else if (TARGET_XCOFF)
38770 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38771
38772 else
38773 gcc_unreachable ();
38774
38775 fuse_ops[2] = XVECEXP (offset, 0, 0);
38776 output_asm_insn (insn_template, fuse_ops);
38777 }
38778
38779 else if (GET_CODE (offset) == PLUS
38780 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38781 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38782 && CONST_INT_P (XEXP (offset, 1)))
38783 {
38784 rtx tocrel_unspec = XEXP (offset, 0);
38785 if (TARGET_ELF)
38786 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38787
38788 else if (TARGET_XCOFF)
38789 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38790
38791 else
38792 gcc_unreachable ();
38793
38794 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38795 fuse_ops[3] = XEXP (offset, 1);
38796 output_asm_insn (insn_template, fuse_ops);
38797 }
38798
38799 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38800 {
38801 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38802
38803 fuse_ops[2] = offset;
38804 output_asm_insn (insn_template, fuse_ops);
38805 }
38806
38807 else
38808 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38809
38810 return;
38811 }
38812
38813 /* Given an address, convert it into the addis and load offset parts. Addresses
38814 created during the peephole2 process look like:
38815 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38816 (unspec [(...)] UNSPEC_TOCREL)) */
38817
38818 static void
38819 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38820 {
38821 rtx hi, lo;
38822
38823 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38824 {
38825 hi = XEXP (addr, 0);
38826 lo = XEXP (addr, 1);
38827 }
38828 else
38829 gcc_unreachable ();
38830
38831 *p_hi = hi;
38832 *p_lo = lo;
38833 }
38834
38835 /* Return a string to fuse an addis instruction with a gpr load to the same
38836 register that we loaded up the addis instruction. The address that is used
38837 is the logical address that was formed during peephole2:
38838 (lo_sum (high) (low-part))
38839
38840 The code is complicated, so we call output_asm_insn directly, and just
38841 return "". */
38842
38843 const char *
38844 emit_fusion_gpr_load (rtx target, rtx mem)
38845 {
38846 rtx addis_value;
38847 rtx addr;
38848 rtx load_offset;
38849 const char *load_str = NULL;
38850 machine_mode mode;
38851
38852 if (GET_CODE (mem) == ZERO_EXTEND)
38853 mem = XEXP (mem, 0);
38854
38855 gcc_assert (REG_P (target) && MEM_P (mem));
38856
38857 addr = XEXP (mem, 0);
38858 fusion_split_address (addr, &addis_value, &load_offset);
38859
38860 /* Now emit the load instruction to the same register. */
38861 mode = GET_MODE (mem);
38862 switch (mode)
38863 {
38864 case E_QImode:
38865 load_str = "lbz";
38866 break;
38867
38868 case E_HImode:
38869 load_str = "lhz";
38870 break;
38871
38872 case E_SImode:
38873 case E_SFmode:
38874 load_str = "lwz";
38875 break;
38876
38877 case E_DImode:
38878 case E_DFmode:
38879 gcc_assert (TARGET_POWERPC64);
38880 load_str = "ld";
38881 break;
38882
38883 default:
38884 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38885 }
38886
38887 /* Emit the addis instruction. */
38888 emit_fusion_addis (target, addis_value);
38889
38890 /* Emit the D-form load instruction. */
38891 emit_fusion_load (target, target, load_offset, load_str);
38892
38893 return "";
38894 }
38895 \f
38896
38897 #ifdef RS6000_GLIBC_ATOMIC_FENV
38898 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38899 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38900 #endif
38901
38902 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38903
38904 static void
38905 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38906 {
38907 if (!TARGET_HARD_FLOAT)
38908 {
38909 #ifdef RS6000_GLIBC_ATOMIC_FENV
38910 if (atomic_hold_decl == NULL_TREE)
38911 {
38912 atomic_hold_decl
38913 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38914 get_identifier ("__atomic_feholdexcept"),
38915 build_function_type_list (void_type_node,
38916 double_ptr_type_node,
38917 NULL_TREE));
38918 TREE_PUBLIC (atomic_hold_decl) = 1;
38919 DECL_EXTERNAL (atomic_hold_decl) = 1;
38920 }
38921
38922 if (atomic_clear_decl == NULL_TREE)
38923 {
38924 atomic_clear_decl
38925 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38926 get_identifier ("__atomic_feclearexcept"),
38927 build_function_type_list (void_type_node,
38928 NULL_TREE));
38929 TREE_PUBLIC (atomic_clear_decl) = 1;
38930 DECL_EXTERNAL (atomic_clear_decl) = 1;
38931 }
38932
38933 tree const_double = build_qualified_type (double_type_node,
38934 TYPE_QUAL_CONST);
38935 tree const_double_ptr = build_pointer_type (const_double);
38936 if (atomic_update_decl == NULL_TREE)
38937 {
38938 atomic_update_decl
38939 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38940 get_identifier ("__atomic_feupdateenv"),
38941 build_function_type_list (void_type_node,
38942 const_double_ptr,
38943 NULL_TREE));
38944 TREE_PUBLIC (atomic_update_decl) = 1;
38945 DECL_EXTERNAL (atomic_update_decl) = 1;
38946 }
38947
38948 tree fenv_var = create_tmp_var_raw (double_type_node);
38949 TREE_ADDRESSABLE (fenv_var) = 1;
38950 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38951
38952 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38953 *clear = build_call_expr (atomic_clear_decl, 0);
38954 *update = build_call_expr (atomic_update_decl, 1,
38955 fold_convert (const_double_ptr, fenv_addr));
38956 #endif
38957 return;
38958 }
38959
38960 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38961 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38962 tree call_mffs = build_call_expr (mffs, 0);
38963
38964 /* Generates the equivalent of feholdexcept (&fenv_var)
38965
38966 *fenv_var = __builtin_mffs ();
38967 double fenv_hold;
38968 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38969 __builtin_mtfsf (0xff, fenv_hold); */
38970
38971 /* Mask to clear everything except for the rounding modes and non-IEEE
38972 arithmetic flag. */
38973 const unsigned HOST_WIDE_INT hold_exception_mask =
38974 HOST_WIDE_INT_C (0xffffffff00000007);
38975
38976 tree fenv_var = create_tmp_var_raw (double_type_node);
38977
38978 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38979
38980 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38981 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38982 build_int_cst (uint64_type_node,
38983 hold_exception_mask));
38984
38985 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38986 fenv_llu_and);
38987
38988 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38989 build_int_cst (unsigned_type_node, 0xff),
38990 fenv_hold_mtfsf);
38991
38992 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38993
38994 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38995
38996 double fenv_clear = __builtin_mffs ();
38997 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38998 __builtin_mtfsf (0xff, fenv_clear); */
38999
39000 /* Mask to clear everything except for the rounding modes and non-IEEE
39001 arithmetic flag. */
39002 const unsigned HOST_WIDE_INT clear_exception_mask =
39003 HOST_WIDE_INT_C (0xffffffff00000000);
39004
39005 tree fenv_clear = create_tmp_var_raw (double_type_node);
39006
39007 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39008
39009 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39010 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39011 fenv_clean_llu,
39012 build_int_cst (uint64_type_node,
39013 clear_exception_mask));
39014
39015 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39016 fenv_clear_llu_and);
39017
39018 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39019 build_int_cst (unsigned_type_node, 0xff),
39020 fenv_clear_mtfsf);
39021
39022 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39023
39024 /* Generates the equivalent of feupdateenv (&fenv_var)
39025
39026 double old_fenv = __builtin_mffs ();
39027 double fenv_update;
39028 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39029 (*(uint64_t*)fenv_var 0x1ff80fff);
39030 __builtin_mtfsf (0xff, fenv_update); */
39031
39032 const unsigned HOST_WIDE_INT update_exception_mask =
39033 HOST_WIDE_INT_C (0xffffffff1fffff00);
39034 const unsigned HOST_WIDE_INT new_exception_mask =
39035 HOST_WIDE_INT_C (0x1ff80fff);
39036
39037 tree old_fenv = create_tmp_var_raw (double_type_node);
39038 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39039
39040 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39041 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39042 build_int_cst (uint64_type_node,
39043 update_exception_mask));
39044
39045 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39046 build_int_cst (uint64_type_node,
39047 new_exception_mask));
39048
39049 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39050 old_llu_and, new_llu_and);
39051
39052 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39053 new_llu_mask);
39054
39055 tree update_mtfsf = build_call_expr (mtfsf, 2,
39056 build_int_cst (unsigned_type_node, 0xff),
39057 fenv_update_mtfsf);
39058
39059 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39060 }
39061
39062 void
39063 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39064 {
39065 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39066
39067 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39068 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39069
39070 /* The destination of the vmrgew instruction layout is:
39071 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39072 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39073 vmrgew instruction will be correct. */
39074 if (BYTES_BIG_ENDIAN)
39075 {
39076 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39077 GEN_INT (0)));
39078 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39079 GEN_INT (3)));
39080 }
39081 else
39082 {
39083 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39084 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39085 }
39086
39087 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39088 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39089
39090 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39091 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39092
39093 if (BYTES_BIG_ENDIAN)
39094 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39095 else
39096 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39097 }
39098
39099 void
39100 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39101 {
39102 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39103
39104 rtx_tmp0 = gen_reg_rtx (V2DImode);
39105 rtx_tmp1 = gen_reg_rtx (V2DImode);
39106
39107 /* The destination of the vmrgew instruction layout is:
39108 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39109 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39110 vmrgew instruction will be correct. */
39111 if (BYTES_BIG_ENDIAN)
39112 {
39113 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39114 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39115 }
39116 else
39117 {
39118 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39119 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39120 }
39121
39122 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39123 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39124
39125 if (signed_convert)
39126 {
39127 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39128 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39129 }
39130 else
39131 {
39132 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39133 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39134 }
39135
39136 if (BYTES_BIG_ENDIAN)
39137 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39138 else
39139 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39140 }
39141
39142 void
39143 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39144 rtx src2)
39145 {
39146 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39147
39148 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39149 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39150
39151 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39152 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39153
39154 rtx_tmp2 = gen_reg_rtx (V4SImode);
39155 rtx_tmp3 = gen_reg_rtx (V4SImode);
39156
39157 if (signed_convert)
39158 {
39159 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39160 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39161 }
39162 else
39163 {
39164 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39165 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39166 }
39167
39168 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39169 }
39170
39171 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39172
39173 static bool
39174 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39175 optimization_type opt_type)
39176 {
39177 switch (op)
39178 {
39179 case rsqrt_optab:
39180 return (opt_type == OPTIMIZE_FOR_SPEED
39181 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39182
39183 default:
39184 return true;
39185 }
39186 }
39187
39188 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39189
39190 static HOST_WIDE_INT
39191 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39192 {
39193 if (TREE_CODE (exp) == STRING_CST
39194 && (STRICT_ALIGNMENT || !optimize_size))
39195 return MAX (align, BITS_PER_WORD);
39196 return align;
39197 }
39198
39199 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39200
39201 static HOST_WIDE_INT
39202 rs6000_starting_frame_offset (void)
39203 {
39204 if (FRAME_GROWS_DOWNWARD)
39205 return 0;
39206 return RS6000_STARTING_FRAME_OFFSET;
39207 }
39208 \f
39209
39210 /* Create an alias for a mangled name where we have changed the mangling (in
39211 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39212 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39213
39214 #if TARGET_ELF && RS6000_WEAK
39215 static void
39216 rs6000_globalize_decl_name (FILE * stream, tree decl)
39217 {
39218 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39219
39220 targetm.asm_out.globalize_label (stream, name);
39221
39222 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39223 {
39224 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39225 const char *old_name;
39226
39227 ieee128_mangling_gcc_8_1 = true;
39228 lang_hooks.set_decl_assembler_name (decl);
39229 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39230 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39231 ieee128_mangling_gcc_8_1 = false;
39232
39233 if (strcmp (name, old_name) != 0)
39234 {
39235 fprintf (stream, "\t.weak %s\n", old_name);
39236 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39237 }
39238 }
39239 }
39240 #endif
39241
39242 \f
39243 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39244 function names from <foo>l to <foo>f128 if the default long double type is
39245 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39246 include file switches the names on systems that support long double as IEEE
39247 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39248 In the future, glibc will export names like __ieee128_sinf128 and we can
39249 switch to using those instead of using sinf128, which pollutes the user's
39250 namespace.
39251
39252 This will switch the names for Fortran math functions as well (which doesn't
39253 use math.h). However, Fortran needs other changes to the compiler and
39254 library before you can switch the real*16 type at compile time.
39255
39256 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39257 only do this if the default is that long double is IBM extended double, and
39258 the user asked for IEEE 128-bit. */
39259
39260 static tree
39261 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39262 {
39263 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39264 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39265 {
39266 size_t len = IDENTIFIER_LENGTH (id);
39267 const char *name = IDENTIFIER_POINTER (id);
39268
39269 if (name[len - 1] == 'l')
39270 {
39271 bool uses_ieee128_p = false;
39272 tree type = TREE_TYPE (decl);
39273 machine_mode ret_mode = TYPE_MODE (type);
39274
39275 /* See if the function returns a IEEE 128-bit floating point type or
39276 complex type. */
39277 if (ret_mode == TFmode || ret_mode == TCmode)
39278 uses_ieee128_p = true;
39279 else
39280 {
39281 function_args_iterator args_iter;
39282 tree arg;
39283
39284 /* See if the function passes a IEEE 128-bit floating point type
39285 or complex type. */
39286 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39287 {
39288 machine_mode arg_mode = TYPE_MODE (arg);
39289 if (arg_mode == TFmode || arg_mode == TCmode)
39290 {
39291 uses_ieee128_p = true;
39292 break;
39293 }
39294 }
39295 }
39296
39297 /* If we passed or returned an IEEE 128-bit floating point type,
39298 change the name. */
39299 if (uses_ieee128_p)
39300 {
39301 char *name2 = (char *) alloca (len + 4);
39302 memcpy (name2, name, len - 1);
39303 strcpy (name2 + len - 1, "f128");
39304 id = get_identifier (name2);
39305 }
39306 }
39307 }
39308
39309 return id;
39310 }
39311
39312 \f
39313 struct gcc_target targetm = TARGET_INITIALIZER;
39314
39315 #include "gt-rs6000.h"