[RS6000] Comment fixes
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84
85 /* This file should be included last. */
86 #include "target-def.h"
87
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
91
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
105
106 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
107
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack {
110 int reload_completed; /* stack info won't change from here on */
111 int first_gp_reg_save; /* first callee saved GP register used */
112 int first_fp_reg_save; /* first callee saved FP register used */
113 int first_altivec_reg_save; /* first callee saved AltiVec register used */
114 int lr_save_p; /* true if the link reg needs to be saved */
115 int cr_save_p; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask; /* mask of vec registers to save */
117 int push_p; /* true if we need to allocate stack space */
118 int calls_p; /* true if the function makes any calls */
119 int world_save_p; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi; /* which ABI to use */
122 int gp_save_offset; /* offset to save GP regs from initial SP */
123 int fp_save_offset; /* offset to save FP regs from initial SP */
124 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset; /* offset to save LR from initial SP */
126 int cr_save_offset; /* offset to save CR from initial SP */
127 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset; /* offset to save the varargs registers */
129 int ehrd_offset; /* offset to EH return data */
130 int ehcr_offset; /* offset to EH CR field data */
131 int reg_size; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size; /* variable save area size */
133 int parm_size; /* outgoing parameter size */
134 int save_size; /* save area size */
135 int fixed_size; /* fixed size of stack frame */
136 int gp_size; /* size of saved GP registers */
137 int fp_size; /* size of saved FP registers */
138 int altivec_size; /* size of saved AltiVec registers */
139 int cr_size; /* size to hold CR if not in fixed area */
140 int vrsave_size; /* size to hold VRSAVE */
141 int altivec_padding_size; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
143 int savres_strategy;
144 } rs6000_stack_t;
145
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
149 {
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame;
152 /* Flags if __builtin_return_address (0) was used. */
153 int ra_need_lr;
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
155 int lr_save_state;
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer;
164 bool split_stack_argp_used;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 /* The number of components we use for separate shrink-wrapping. */
168 int n_components;
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately[32];
172 bool fpr_is_wrapped_separately[32];
173 bool lr_is_wrapped_separately;
174 bool toc_is_wrapped_separately;
175 } machine_function;
176
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load;
179
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined;
182
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno;
186
187 #ifdef USING_ELFOS_H
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno = 0;
190 #endif
191
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
193 int dot_symbols;
194
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode;
199
200 #if TARGET_ELF
201 /* Note whether IEEE 128-bit floating point was passed or returned, either as
202 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
203 floating point. We changed the default C++ mangling for these types and we
204 may want to generate a weak alias of the old mangling (U10__float128) to the
205 new mangling (u9__ieee128). */
206 static bool rs6000_passes_ieee128;
207 #endif
208
209 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
210 name used in current releases (i.e. u9__ieee128). */
211 static bool ieee128_mangling_gcc_8_1;
212
213 /* Width in bits of a pointer. */
214 unsigned rs6000_pointer_size;
215
216 #ifdef HAVE_AS_GNU_ATTRIBUTE
217 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
218 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
219 # endif
220 /* Flag whether floating point values have been passed/returned.
221 Note that this doesn't say whether fprs are used, since the
222 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
223 should be set for soft-float values passed in gprs and ieee128
224 values passed in vsx registers. */
225 static bool rs6000_passes_float;
226 static bool rs6000_passes_long_double;
227 /* Flag whether vector values have been passed/returned. */
228 static bool rs6000_passes_vector;
229 /* Flag whether small (<= 8 byte) structures have been returned. */
230 static bool rs6000_returns_struct;
231 #endif
232
233 /* Value is TRUE if register/mode pair is acceptable. */
234 static bool rs6000_hard_regno_mode_ok_p
235 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
236
237 /* Maximum number of registers needed for a given register class and mode. */
238 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
239
240 /* How many registers are needed for a given register and mode. */
241 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
242
243 /* Map register number to register class. */
244 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
245
246 static int dbg_cost_ctrl;
247
248 /* Built in types. */
249 tree rs6000_builtin_types[RS6000_BTI_MAX];
250 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
251
252 /* Flag to say the TOC is initialized */
253 int toc_initialized, need_toc_init;
254 char toc_label_name[10];
255
256 /* Cached value of rs6000_variable_issue. This is cached in
257 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
258 static short cached_can_issue_more;
259
260 static GTY(()) section *read_only_data_section;
261 static GTY(()) section *private_data_section;
262 static GTY(()) section *tls_data_section;
263 static GTY(()) section *tls_private_data_section;
264 static GTY(()) section *read_only_private_data_section;
265 static GTY(()) section *sdata2_section;
266 static GTY(()) section *toc_section;
267
268 struct builtin_description
269 {
270 const HOST_WIDE_INT mask;
271 const enum insn_code icode;
272 const char *const name;
273 const enum rs6000_builtins code;
274 };
275
276 /* Describe the vector unit used for modes. */
277 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
278 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
279
280 /* Register classes for various constraints that are based on the target
281 switches. */
282 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
283
284 /* Describe the alignment of a vector. */
285 int rs6000_vector_align[NUM_MACHINE_MODES];
286
287 /* Map selected modes to types for builtins. */
288 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
289
290 /* What modes to automatically generate reciprocal divide estimate (fre) and
291 reciprocal sqrt (frsqrte) for. */
292 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
293
294 /* Masks to determine which reciprocal esitmate instructions to generate
295 automatically. */
296 enum rs6000_recip_mask {
297 RECIP_SF_DIV = 0x001, /* Use divide estimate */
298 RECIP_DF_DIV = 0x002,
299 RECIP_V4SF_DIV = 0x004,
300 RECIP_V2DF_DIV = 0x008,
301
302 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
303 RECIP_DF_RSQRT = 0x020,
304 RECIP_V4SF_RSQRT = 0x040,
305 RECIP_V2DF_RSQRT = 0x080,
306
307 /* Various combination of flags for -mrecip=xxx. */
308 RECIP_NONE = 0,
309 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
310 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
311 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
312
313 RECIP_HIGH_PRECISION = RECIP_ALL,
314
315 /* On low precision machines like the power5, don't enable double precision
316 reciprocal square root estimate, since it isn't accurate enough. */
317 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
318 };
319
320 /* -mrecip options. */
321 static struct
322 {
323 const char *string; /* option name */
324 unsigned int mask; /* mask bits to set */
325 } recip_options[] = {
326 { "all", RECIP_ALL },
327 { "none", RECIP_NONE },
328 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
329 | RECIP_V2DF_DIV) },
330 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
331 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
332 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
333 | RECIP_V2DF_RSQRT) },
334 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
335 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
336 };
337
338 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
339 static const struct
340 {
341 const char *cpu;
342 unsigned int cpuid;
343 } cpu_is_info[] = {
344 { "power9", PPC_PLATFORM_POWER9 },
345 { "power8", PPC_PLATFORM_POWER8 },
346 { "power7", PPC_PLATFORM_POWER7 },
347 { "power6x", PPC_PLATFORM_POWER6X },
348 { "power6", PPC_PLATFORM_POWER6 },
349 { "power5+", PPC_PLATFORM_POWER5_PLUS },
350 { "power5", PPC_PLATFORM_POWER5 },
351 { "ppc970", PPC_PLATFORM_PPC970 },
352 { "power4", PPC_PLATFORM_POWER4 },
353 { "ppca2", PPC_PLATFORM_PPCA2 },
354 { "ppc476", PPC_PLATFORM_PPC476 },
355 { "ppc464", PPC_PLATFORM_PPC464 },
356 { "ppc440", PPC_PLATFORM_PPC440 },
357 { "ppc405", PPC_PLATFORM_PPC405 },
358 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
359 };
360
361 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
362 static const struct
363 {
364 const char *hwcap;
365 int mask;
366 unsigned int id;
367 } cpu_supports_info[] = {
368 /* AT_HWCAP masks. */
369 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
370 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
371 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
372 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
373 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
374 { "booke", PPC_FEATURE_BOOKE, 0 },
375 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
376 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
377 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
378 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
379 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
380 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
381 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
382 { "notb", PPC_FEATURE_NO_TB, 0 },
383 { "pa6t", PPC_FEATURE_PA6T, 0 },
384 { "power4", PPC_FEATURE_POWER4, 0 },
385 { "power5", PPC_FEATURE_POWER5, 0 },
386 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
387 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
388 { "ppc32", PPC_FEATURE_32, 0 },
389 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
390 { "ppc64", PPC_FEATURE_64, 0 },
391 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
392 { "smt", PPC_FEATURE_SMT, 0 },
393 { "spe", PPC_FEATURE_HAS_SPE, 0 },
394 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
395 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
396 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
397
398 /* AT_HWCAP2 masks. */
399 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
400 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
401 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
402 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
403 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
404 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
405 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
406 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
407 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
408 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
409 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
410 { "darn", PPC_FEATURE2_DARN, 1 },
411 { "scv", PPC_FEATURE2_SCV, 1 }
412 };
413
414 /* On PowerPC, we have a limited number of target clones that we care about
415 which means we can use an array to hold the options, rather than having more
416 elaborate data structures to identify each possible variation. Order the
417 clones from the default to the highest ISA. */
418 enum {
419 CLONE_DEFAULT = 0, /* default clone. */
420 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
421 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
422 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
423 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
424 CLONE_MAX
425 };
426
427 /* Map compiler ISA bits into HWCAP names. */
428 struct clone_map {
429 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
430 const char *name; /* name to use in __builtin_cpu_supports. */
431 };
432
433 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
434 { 0, "" }, /* Default options. */
435 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
436 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
437 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
438 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
439 };
440
441
442 /* Newer LIBCs explicitly export this symbol to declare that they provide
443 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
444 reference to this symbol whenever we expand a CPU builtin, so that
445 we never link against an old LIBC. */
446 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
447
448 /* True if we have expanded a CPU builtin. */
449 bool cpu_builtin_p;
450
451 /* Pointer to function (in rs6000-c.c) that can define or undefine target
452 macros that have changed. Languages that don't support the preprocessor
453 don't link in rs6000-c.c, so we can't call it directly. */
454 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
455
456 /* Simplfy register classes into simpler classifications. We assume
457 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
458 check for standard register classes (gpr/floating/altivec/vsx) and
459 floating/vector classes (float/altivec/vsx). */
460
461 enum rs6000_reg_type {
462 NO_REG_TYPE,
463 PSEUDO_REG_TYPE,
464 GPR_REG_TYPE,
465 VSX_REG_TYPE,
466 ALTIVEC_REG_TYPE,
467 FPR_REG_TYPE,
468 SPR_REG_TYPE,
469 CR_REG_TYPE
470 };
471
472 /* Map register class to register type. */
473 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
474
475 /* First/last register type for the 'normal' register types (i.e. general
476 purpose, floating point, altivec, and VSX registers). */
477 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
478
479 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
480
481
482 /* Register classes we care about in secondary reload or go if legitimate
483 address. We only need to worry about GPR, FPR, and Altivec registers here,
484 along an ANY field that is the OR of the 3 register classes. */
485
486 enum rs6000_reload_reg_type {
487 RELOAD_REG_GPR, /* General purpose registers. */
488 RELOAD_REG_FPR, /* Traditional floating point regs. */
489 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
490 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
491 N_RELOAD_REG
492 };
493
494 /* For setting up register classes, loop through the 3 register classes mapping
495 into real registers, and skip the ANY class, which is just an OR of the
496 bits. */
497 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
498 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
499
500 /* Map reload register type to a register in the register class. */
501 struct reload_reg_map_type {
502 const char *name; /* Register class name. */
503 int reg; /* Register in the register class. */
504 };
505
506 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
507 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
508 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
509 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
510 { "Any", -1 }, /* RELOAD_REG_ANY. */
511 };
512
513 /* Mask bits for each register class, indexed per mode. Historically the
514 compiler has been more restrictive which types can do PRE_MODIFY instead of
515 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
516 typedef unsigned char addr_mask_type;
517
518 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
519 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
520 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
521 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
522 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
523 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
524 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
525 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
526
527 /* Register type masks based on the type, of valid addressing modes. */
528 struct rs6000_reg_addr {
529 enum insn_code reload_load; /* INSN to reload for loading. */
530 enum insn_code reload_store; /* INSN to reload for storing. */
531 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
532 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
533 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
534 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
535 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
536 };
537
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
539
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
543 {
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
546 }
547
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
551 {
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
554 }
555
556 /* Return true if we have D-form addressing in altivec registers. */
557 static inline bool
558 mode_supports_vmx_dform (machine_mode mode)
559 {
560 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
561 }
562
563 /* Return true if we have D-form addressing in VSX registers. This addressing
564 is more limited than normal d-form addressing in that the offset must be
565 aligned on a 16-byte boundary. */
566 static inline bool
567 mode_supports_dq_form (machine_mode mode)
568 {
569 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
570 != 0);
571 }
572
573 /* Given that there exists at least one variable that is set (produced)
574 by OUT_INSN and read (consumed) by IN_INSN, return true iff
575 IN_INSN represents one or more memory store operations and none of
576 the variables set by OUT_INSN is used by IN_INSN as the address of a
577 store operation. If either IN_INSN or OUT_INSN does not represent
578 a "single" RTL SET expression (as loosely defined by the
579 implementation of the single_set function) or a PARALLEL with only
580 SETs, CLOBBERs, and USEs inside, this function returns false.
581
582 This rs6000-specific version of store_data_bypass_p checks for
583 certain conditions that result in assertion failures (and internal
584 compiler errors) in the generic store_data_bypass_p function and
585 returns false rather than calling store_data_bypass_p if one of the
586 problematic conditions is detected. */
587
588 int
589 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
590 {
591 rtx out_set, in_set;
592 rtx out_pat, in_pat;
593 rtx out_exp, in_exp;
594 int i, j;
595
596 in_set = single_set (in_insn);
597 if (in_set)
598 {
599 if (MEM_P (SET_DEST (in_set)))
600 {
601 out_set = single_set (out_insn);
602 if (!out_set)
603 {
604 out_pat = PATTERN (out_insn);
605 if (GET_CODE (out_pat) == PARALLEL)
606 {
607 for (i = 0; i < XVECLEN (out_pat, 0); i++)
608 {
609 out_exp = XVECEXP (out_pat, 0, i);
610 if ((GET_CODE (out_exp) == CLOBBER)
611 || (GET_CODE (out_exp) == USE))
612 continue;
613 else if (GET_CODE (out_exp) != SET)
614 return false;
615 }
616 }
617 }
618 }
619 }
620 else
621 {
622 in_pat = PATTERN (in_insn);
623 if (GET_CODE (in_pat) != PARALLEL)
624 return false;
625
626 for (i = 0; i < XVECLEN (in_pat, 0); i++)
627 {
628 in_exp = XVECEXP (in_pat, 0, i);
629 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
630 continue;
631 else if (GET_CODE (in_exp) != SET)
632 return false;
633
634 if (MEM_P (SET_DEST (in_exp)))
635 {
636 out_set = single_set (out_insn);
637 if (!out_set)
638 {
639 out_pat = PATTERN (out_insn);
640 if (GET_CODE (out_pat) != PARALLEL)
641 return false;
642 for (j = 0; j < XVECLEN (out_pat, 0); j++)
643 {
644 out_exp = XVECEXP (out_pat, 0, j);
645 if ((GET_CODE (out_exp) == CLOBBER)
646 || (GET_CODE (out_exp) == USE))
647 continue;
648 else if (GET_CODE (out_exp) != SET)
649 return false;
650 }
651 }
652 }
653 }
654 }
655 return store_data_bypass_p (out_insn, in_insn);
656 }
657
658 \f
659 /* Processor costs (relative to an add) */
660
661 const struct processor_costs *rs6000_cost;
662
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
681 };
682
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
701 };
702
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
721 };
722
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
741 };
742
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
761 };
762
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
781 };
782
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
801 };
802
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
821 };
822
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
841 };
842
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
861 };
862
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
881 };
882
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
901 };
902
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
921 };
922
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
941 };
942
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
962 };
963
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
982 };
983
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1002 };
1003
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1022 };
1023
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1037 32,
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1042 };
1043
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1062 };
1063
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1082 };
1083
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1102 };
1103
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1122 };
1123
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1142 };
1143
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1162 };
1163
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1182 };
1183
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1202 };
1203
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1222 };
1223
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1242 };
1243
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1257 64,
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1262 };
1263
1264 \f
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_X
1275
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1278
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1281
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1284
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1287
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1290
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1293
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1296
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1299
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1302
1303 struct rs6000_builtin_info_type {
1304 const char *name;
1305 const enum insn_code icode;
1306 const HOST_WIDE_INT mask;
1307 const unsigned attr;
1308 };
1309
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1311 {
1312 #include "rs6000-builtin.def"
1313 };
1314
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_X
1324
1325 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1326 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1327
1328 \f
1329 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1330 static struct machine_function * rs6000_init_machine_status (void);
1331 static int rs6000_ra_ever_killed (void);
1332 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1333 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1334 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1336 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1337 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1338 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1339 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1340 bool);
1341 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1342 unsigned int);
1343 static bool is_microcoded_insn (rtx_insn *);
1344 static bool is_nonpipeline_insn (rtx_insn *);
1345 static bool is_cracked_insn (rtx_insn *);
1346 static bool is_load_insn (rtx, rtx *);
1347 static bool is_store_insn (rtx, rtx *);
1348 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1349 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1350 static bool insn_must_be_first_in_group (rtx_insn *);
1351 static bool insn_must_be_last_in_group (rtx_insn *);
1352 static void altivec_init_builtins (void);
1353 static tree builtin_function_type (machine_mode, machine_mode,
1354 machine_mode, machine_mode,
1355 enum rs6000_builtins, const char *name);
1356 static void rs6000_common_init_builtins (void);
1357 static void htm_init_builtins (void);
1358 static rs6000_stack_t *rs6000_stack_info (void);
1359 static void is_altivec_return_reg (rtx, void *);
1360 int easy_vector_constant (rtx, machine_mode);
1361 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1362 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1363 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1364 bool, bool);
1365 #if TARGET_MACHO
1366 static void macho_branch_islands (void);
1367 #endif
1368 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1369 int, int *);
1370 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1371 int, int, int *);
1372 static bool rs6000_mode_dependent_address (const_rtx);
1373 static bool rs6000_debug_mode_dependent_address (const_rtx);
1374 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1375 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1376 machine_mode, rtx);
1377 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1378 machine_mode,
1379 rtx);
1380 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1381 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1382 enum reg_class);
1383 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1384 reg_class_t,
1385 reg_class_t);
1386 static bool rs6000_debug_can_change_mode_class (machine_mode,
1387 machine_mode,
1388 reg_class_t);
1389 static bool rs6000_save_toc_in_prologue_p (void);
1390 static rtx rs6000_internal_arg_pointer (void);
1391
1392 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1393 int, int *)
1394 = rs6000_legitimize_reload_address;
1395
1396 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1397 = rs6000_mode_dependent_address;
1398
1399 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1400 machine_mode, rtx)
1401 = rs6000_secondary_reload_class;
1402
1403 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1404 = rs6000_preferred_reload_class;
1405
1406 const int INSN_NOT_AVAILABLE = -1;
1407
1408 static void rs6000_print_isa_options (FILE *, int, const char *,
1409 HOST_WIDE_INT);
1410 static void rs6000_print_builtin_options (FILE *, int, const char *,
1411 HOST_WIDE_INT);
1412 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1413
1414 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1415 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1416 enum rs6000_reg_type,
1417 machine_mode,
1418 secondary_reload_info *,
1419 bool);
1420 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1421 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1422 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1423
1424 /* Hash table stuff for keeping track of TOC entries. */
1425
1426 struct GTY((for_user)) toc_hash_struct
1427 {
1428 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1429 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1430 rtx key;
1431 machine_mode key_mode;
1432 int labelno;
1433 };
1434
1435 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1436 {
1437 static hashval_t hash (toc_hash_struct *);
1438 static bool equal (toc_hash_struct *, toc_hash_struct *);
1439 };
1440
1441 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1442
1443 /* Hash table to keep track of the argument types for builtin functions. */
1444
1445 struct GTY((for_user)) builtin_hash_struct
1446 {
1447 tree type;
1448 machine_mode mode[4]; /* return value + 3 arguments. */
1449 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1450 };
1451
1452 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1453 {
1454 static hashval_t hash (builtin_hash_struct *);
1455 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1456 };
1457
1458 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1459
1460 \f
1461 /* Default register names. */
1462 char rs6000_reg_names[][8] =
1463 {
1464 "0", "1", "2", "3", "4", "5", "6", "7",
1465 "8", "9", "10", "11", "12", "13", "14", "15",
1466 "16", "17", "18", "19", "20", "21", "22", "23",
1467 "24", "25", "26", "27", "28", "29", "30", "31",
1468 "0", "1", "2", "3", "4", "5", "6", "7",
1469 "8", "9", "10", "11", "12", "13", "14", "15",
1470 "16", "17", "18", "19", "20", "21", "22", "23",
1471 "24", "25", "26", "27", "28", "29", "30", "31",
1472 "mq", "lr", "ctr","ap",
1473 "0", "1", "2", "3", "4", "5", "6", "7",
1474 "ca",
1475 /* AltiVec registers. */
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "8", "9", "10", "11", "12", "13", "14", "15",
1478 "16", "17", "18", "19", "20", "21", "22", "23",
1479 "24", "25", "26", "27", "28", "29", "30", "31",
1480 "vrsave", "vscr",
1481 /* Soft frame pointer. */
1482 "sfp",
1483 /* HTM SPR registers. */
1484 "tfhar", "tfiar", "texasr"
1485 };
1486
1487 #ifdef TARGET_REGNAMES
1488 static const char alt_reg_names[][8] =
1489 {
1490 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1491 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1492 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1493 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1494 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1495 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1496 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1497 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1498 "mq", "lr", "ctr", "ap",
1499 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1500 "ca",
1501 /* AltiVec registers. */
1502 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1503 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1504 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1505 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1506 "vrsave", "vscr",
1507 /* Soft frame pointer. */
1508 "sfp",
1509 /* HTM SPR registers. */
1510 "tfhar", "tfiar", "texasr"
1511 };
1512 #endif
1513
1514 /* Table of valid machine attributes. */
1515
1516 static const struct attribute_spec rs6000_attribute_table[] =
1517 {
1518 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1519 affects_type_identity, handler, exclude } */
1520 { "altivec", 1, 1, false, true, false, false,
1521 rs6000_handle_altivec_attribute, NULL },
1522 { "longcall", 0, 0, false, true, true, false,
1523 rs6000_handle_longcall_attribute, NULL },
1524 { "shortcall", 0, 0, false, true, true, false,
1525 rs6000_handle_longcall_attribute, NULL },
1526 { "ms_struct", 0, 0, false, false, false, false,
1527 rs6000_handle_struct_attribute, NULL },
1528 { "gcc_struct", 0, 0, false, false, false, false,
1529 rs6000_handle_struct_attribute, NULL },
1530 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1531 SUBTARGET_ATTRIBUTE_TABLE,
1532 #endif
1533 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1534 };
1535 \f
1536 #ifndef TARGET_PROFILE_KERNEL
1537 #define TARGET_PROFILE_KERNEL 0
1538 #endif
1539
1540 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1541 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1542 \f
1543 /* Initialize the GCC target structure. */
1544 #undef TARGET_ATTRIBUTE_TABLE
1545 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1546 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1547 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1548 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1549 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1550
1551 #undef TARGET_ASM_ALIGNED_DI_OP
1552 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1553
1554 /* Default unaligned ops are only provided for ELF. Find the ops needed
1555 for non-ELF systems. */
1556 #ifndef OBJECT_FORMAT_ELF
1557 #if TARGET_XCOFF
1558 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1559 64-bit targets. */
1560 #undef TARGET_ASM_UNALIGNED_HI_OP
1561 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1562 #undef TARGET_ASM_UNALIGNED_SI_OP
1563 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1564 #undef TARGET_ASM_UNALIGNED_DI_OP
1565 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1566 #else
1567 /* For Darwin. */
1568 #undef TARGET_ASM_UNALIGNED_HI_OP
1569 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1570 #undef TARGET_ASM_UNALIGNED_SI_OP
1571 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1572 #undef TARGET_ASM_UNALIGNED_DI_OP
1573 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1574 #undef TARGET_ASM_ALIGNED_DI_OP
1575 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1576 #endif
1577 #endif
1578
1579 /* This hook deals with fixups for relocatable code and DI-mode objects
1580 in 64-bit code. */
1581 #undef TARGET_ASM_INTEGER
1582 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1583
1584 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1585 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1586 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1587 #endif
1588
1589 #undef TARGET_SET_UP_BY_PROLOGUE
1590 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1591
1592 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1593 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1594 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1595 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1596 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1597 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1598 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1599 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1600 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1601 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1602 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1604
1605 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1606 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1607
1608 #undef TARGET_INTERNAL_ARG_POINTER
1609 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1610
1611 #undef TARGET_HAVE_TLS
1612 #define TARGET_HAVE_TLS HAVE_AS_TLS
1613
1614 #undef TARGET_CANNOT_FORCE_CONST_MEM
1615 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1616
1617 #undef TARGET_DELEGITIMIZE_ADDRESS
1618 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1619
1620 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1621 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1622
1623 #undef TARGET_LEGITIMATE_COMBINED_INSN
1624 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1625
1626 #undef TARGET_ASM_FUNCTION_PROLOGUE
1627 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1628 #undef TARGET_ASM_FUNCTION_EPILOGUE
1629 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1630
1631 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1632 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1633
1634 #undef TARGET_LEGITIMIZE_ADDRESS
1635 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1636
1637 #undef TARGET_SCHED_VARIABLE_ISSUE
1638 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1639
1640 #undef TARGET_SCHED_ISSUE_RATE
1641 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1642 #undef TARGET_SCHED_ADJUST_COST
1643 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1644 #undef TARGET_SCHED_ADJUST_PRIORITY
1645 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1646 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1647 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1648 #undef TARGET_SCHED_INIT
1649 #define TARGET_SCHED_INIT rs6000_sched_init
1650 #undef TARGET_SCHED_FINISH
1651 #define TARGET_SCHED_FINISH rs6000_sched_finish
1652 #undef TARGET_SCHED_REORDER
1653 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1654 #undef TARGET_SCHED_REORDER2
1655 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1656
1657 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1658 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1659
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1662
1663 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1664 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1665 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1666 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1667 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1668 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1669 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1670 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1671
1672 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1673 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1674
1675 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1676 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1677 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1678 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1679 rs6000_builtin_support_vector_misalignment
1680 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1681 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1682 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1683 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1684 rs6000_builtin_vectorization_cost
1685 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1686 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1687 rs6000_preferred_simd_mode
1688 #undef TARGET_VECTORIZE_INIT_COST
1689 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1690 #undef TARGET_VECTORIZE_ADD_STMT_COST
1691 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1692 #undef TARGET_VECTORIZE_FINISH_COST
1693 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1694 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1695 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1696
1697 #undef TARGET_INIT_BUILTINS
1698 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1699 #undef TARGET_BUILTIN_DECL
1700 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1701
1702 #undef TARGET_FOLD_BUILTIN
1703 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1704 #undef TARGET_GIMPLE_FOLD_BUILTIN
1705 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1706
1707 #undef TARGET_EXPAND_BUILTIN
1708 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1709
1710 #undef TARGET_MANGLE_TYPE
1711 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1712
1713 #undef TARGET_INIT_LIBFUNCS
1714 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1715
1716 #if TARGET_MACHO
1717 #undef TARGET_BINDS_LOCAL_P
1718 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1719 #endif
1720
1721 #undef TARGET_MS_BITFIELD_LAYOUT_P
1722 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1723
1724 #undef TARGET_ASM_OUTPUT_MI_THUNK
1725 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1726
1727 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1729
1730 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1731 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1732
1733 #undef TARGET_REGISTER_MOVE_COST
1734 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1735 #undef TARGET_MEMORY_MOVE_COST
1736 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1737 #undef TARGET_CANNOT_COPY_INSN_P
1738 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1739 #undef TARGET_RTX_COSTS
1740 #define TARGET_RTX_COSTS rs6000_rtx_costs
1741 #undef TARGET_ADDRESS_COST
1742 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1743 #undef TARGET_INSN_COST
1744 #define TARGET_INSN_COST rs6000_insn_cost
1745
1746 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1747 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1748
1749 #undef TARGET_PROMOTE_FUNCTION_MODE
1750 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1751
1752 #undef TARGET_RETURN_IN_MEMORY
1753 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1754
1755 #undef TARGET_RETURN_IN_MSB
1756 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1757
1758 #undef TARGET_SETUP_INCOMING_VARARGS
1759 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1760
1761 /* Always strict argument naming on rs6000. */
1762 #undef TARGET_STRICT_ARGUMENT_NAMING
1763 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1764 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1765 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1766 #undef TARGET_SPLIT_COMPLEX_ARG
1767 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1768 #undef TARGET_MUST_PASS_IN_STACK
1769 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1770 #undef TARGET_PASS_BY_REFERENCE
1771 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1772 #undef TARGET_ARG_PARTIAL_BYTES
1773 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1774 #undef TARGET_FUNCTION_ARG_ADVANCE
1775 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1776 #undef TARGET_FUNCTION_ARG
1777 #define TARGET_FUNCTION_ARG rs6000_function_arg
1778 #undef TARGET_FUNCTION_ARG_PADDING
1779 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1780 #undef TARGET_FUNCTION_ARG_BOUNDARY
1781 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1782
1783 #undef TARGET_BUILD_BUILTIN_VA_LIST
1784 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1785
1786 #undef TARGET_EXPAND_BUILTIN_VA_START
1787 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1788
1789 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1790 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1791
1792 #undef TARGET_EH_RETURN_FILTER_MODE
1793 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1794
1795 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1796 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1797
1798 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1799 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1800
1801 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1802 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1803
1804 #undef TARGET_FLOATN_MODE
1805 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1806
1807 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1808 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1809
1810 #undef TARGET_MD_ASM_ADJUST
1811 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1812
1813 #undef TARGET_OPTION_OVERRIDE
1814 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1815
1816 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1817 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1818 rs6000_builtin_vectorized_function
1819
1820 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1821 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1822 rs6000_builtin_md_vectorized_function
1823
1824 #undef TARGET_STACK_PROTECT_GUARD
1825 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1826
1827 #if !TARGET_MACHO
1828 #undef TARGET_STACK_PROTECT_FAIL
1829 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1830 #endif
1831
1832 #ifdef HAVE_AS_TLS
1833 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1834 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1835 #endif
1836
1837 /* Use a 32-bit anchor range. This leads to sequences like:
1838
1839 addis tmp,anchor,high
1840 add dest,tmp,low
1841
1842 where tmp itself acts as an anchor, and can be shared between
1843 accesses to the same 64k page. */
1844 #undef TARGET_MIN_ANCHOR_OFFSET
1845 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1846 #undef TARGET_MAX_ANCHOR_OFFSET
1847 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1848 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1849 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1850 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1851 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1852
1853 #undef TARGET_BUILTIN_RECIPROCAL
1854 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1855
1856 #undef TARGET_SECONDARY_RELOAD
1857 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1858 #undef TARGET_SECONDARY_MEMORY_NEEDED
1859 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1860 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1861 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1862
1863 #undef TARGET_LEGITIMATE_ADDRESS_P
1864 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1865
1866 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1867 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1868
1869 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1870 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1871
1872 #undef TARGET_CAN_ELIMINATE
1873 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1874
1875 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1876 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1877
1878 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1879 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1880
1881 #undef TARGET_TRAMPOLINE_INIT
1882 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1883
1884 #undef TARGET_FUNCTION_VALUE
1885 #define TARGET_FUNCTION_VALUE rs6000_function_value
1886
1887 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1888 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1889
1890 #undef TARGET_OPTION_SAVE
1891 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1892
1893 #undef TARGET_OPTION_RESTORE
1894 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1895
1896 #undef TARGET_OPTION_PRINT
1897 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1898
1899 #undef TARGET_CAN_INLINE_P
1900 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1901
1902 #undef TARGET_SET_CURRENT_FUNCTION
1903 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1904
1905 #undef TARGET_LEGITIMATE_CONSTANT_P
1906 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1907
1908 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1909 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1910
1911 #undef TARGET_CAN_USE_DOLOOP_P
1912 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1913
1914 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1915 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1916
1917 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1918 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1919 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1920 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1921 #undef TARGET_UNWIND_WORD_MODE
1922 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1923
1924 #undef TARGET_OFFLOAD_OPTIONS
1925 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1926
1927 #undef TARGET_C_MODE_FOR_SUFFIX
1928 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1929
1930 #undef TARGET_INVALID_BINARY_OP
1931 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1932
1933 #undef TARGET_OPTAB_SUPPORTED_P
1934 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1935
1936 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1937 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1938
1939 #undef TARGET_COMPARE_VERSION_PRIORITY
1940 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1941
1942 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1943 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1944 rs6000_generate_version_dispatcher_body
1945
1946 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1947 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1948 rs6000_get_function_versions_dispatcher
1949
1950 #undef TARGET_OPTION_FUNCTION_VERSIONS
1951 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1952
1953 #undef TARGET_HARD_REGNO_NREGS
1954 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1955 #undef TARGET_HARD_REGNO_MODE_OK
1956 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1957
1958 #undef TARGET_MODES_TIEABLE_P
1959 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1960
1961 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1962 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1963 rs6000_hard_regno_call_part_clobbered
1964
1965 #undef TARGET_SLOW_UNALIGNED_ACCESS
1966 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1967
1968 #undef TARGET_CAN_CHANGE_MODE_CLASS
1969 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1970
1971 #undef TARGET_CONSTANT_ALIGNMENT
1972 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1973
1974 #undef TARGET_STARTING_FRAME_OFFSET
1975 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1976
1977 #if TARGET_ELF && RS6000_WEAK
1978 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1979 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1980 #endif
1981
1982 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1983 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1984
1985 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1986 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1987 \f
1988
1989 /* Processor table. */
1990 struct rs6000_ptt
1991 {
1992 const char *const name; /* Canonical processor name. */
1993 const enum processor_type processor; /* Processor type enum value. */
1994 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1995 };
1996
1997 static struct rs6000_ptt const processor_target_table[] =
1998 {
1999 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2000 #include "rs6000-cpus.def"
2001 #undef RS6000_CPU
2002 };
2003
2004 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2005 name is invalid. */
2006
2007 static int
2008 rs6000_cpu_name_lookup (const char *name)
2009 {
2010 size_t i;
2011
2012 if (name != NULL)
2013 {
2014 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2015 if (! strcmp (name, processor_target_table[i].name))
2016 return (int)i;
2017 }
2018
2019 return -1;
2020 }
2021
2022 \f
2023 /* Return number of consecutive hard regs needed starting at reg REGNO
2024 to hold something of mode MODE.
2025 This is ordinarily the length in words of a value of mode MODE
2026 but can be less for certain modes in special long registers.
2027
2028 POWER and PowerPC GPRs hold 32 bits worth;
2029 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2030
2031 static int
2032 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2033 {
2034 unsigned HOST_WIDE_INT reg_size;
2035
2036 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2037 128-bit floating point that can go in vector registers, which has VSX
2038 memory addressing. */
2039 if (FP_REGNO_P (regno))
2040 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2041 ? UNITS_PER_VSX_WORD
2042 : UNITS_PER_FP_WORD);
2043
2044 else if (ALTIVEC_REGNO_P (regno))
2045 reg_size = UNITS_PER_ALTIVEC_WORD;
2046
2047 else
2048 reg_size = UNITS_PER_WORD;
2049
2050 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2051 }
2052
2053 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2054 MODE. */
2055 static int
2056 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2057 {
2058 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2059
2060 if (COMPLEX_MODE_P (mode))
2061 mode = GET_MODE_INNER (mode);
2062
2063 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2064 register combinations, and use PTImode where we need to deal with quad
2065 word memory operations. Don't allow quad words in the argument or frame
2066 pointer registers, just registers 0..31. */
2067 if (mode == PTImode)
2068 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2069 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2070 && ((regno & 1) == 0));
2071
2072 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2073 implementations. Don't allow an item to be split between a FP register
2074 and an Altivec register. Allow TImode in all VSX registers if the user
2075 asked for it. */
2076 if (TARGET_VSX && VSX_REGNO_P (regno)
2077 && (VECTOR_MEM_VSX_P (mode)
2078 || FLOAT128_VECTOR_P (mode)
2079 || reg_addr[mode].scalar_in_vmx_p
2080 || mode == TImode
2081 || (TARGET_VADDUQM && mode == V1TImode)))
2082 {
2083 if (FP_REGNO_P (regno))
2084 return FP_REGNO_P (last_regno);
2085
2086 if (ALTIVEC_REGNO_P (regno))
2087 {
2088 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2089 return 0;
2090
2091 return ALTIVEC_REGNO_P (last_regno);
2092 }
2093 }
2094
2095 /* The GPRs can hold any mode, but values bigger than one register
2096 cannot go past R31. */
2097 if (INT_REGNO_P (regno))
2098 return INT_REGNO_P (last_regno);
2099
2100 /* The float registers (except for VSX vector modes) can only hold floating
2101 modes and DImode. */
2102 if (FP_REGNO_P (regno))
2103 {
2104 if (FLOAT128_VECTOR_P (mode))
2105 return false;
2106
2107 if (SCALAR_FLOAT_MODE_P (mode)
2108 && (mode != TDmode || (regno % 2) == 0)
2109 && FP_REGNO_P (last_regno))
2110 return 1;
2111
2112 if (GET_MODE_CLASS (mode) == MODE_INT)
2113 {
2114 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2115 return 1;
2116
2117 if (TARGET_P8_VECTOR && (mode == SImode))
2118 return 1;
2119
2120 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2121 return 1;
2122 }
2123
2124 return 0;
2125 }
2126
2127 /* The CR register can only hold CC modes. */
2128 if (CR_REGNO_P (regno))
2129 return GET_MODE_CLASS (mode) == MODE_CC;
2130
2131 if (CA_REGNO_P (regno))
2132 return mode == Pmode || mode == SImode;
2133
2134 /* AltiVec only in AldyVec registers. */
2135 if (ALTIVEC_REGNO_P (regno))
2136 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2137 || mode == V1TImode);
2138
2139 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2140 and it must be able to fit within the register set. */
2141
2142 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2143 }
2144
2145 /* Implement TARGET_HARD_REGNO_NREGS. */
2146
2147 static unsigned int
2148 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2149 {
2150 return rs6000_hard_regno_nregs[mode][regno];
2151 }
2152
2153 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2154
2155 static bool
2156 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2157 {
2158 return rs6000_hard_regno_mode_ok_p[mode][regno];
2159 }
2160
2161 /* Implement TARGET_MODES_TIEABLE_P.
2162
2163 PTImode cannot tie with other modes because PTImode is restricted to even
2164 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2165 57744).
2166
2167 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2168 128-bit floating point on VSX systems ties with other vectors. */
2169
2170 static bool
2171 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2172 {
2173 if (mode1 == PTImode)
2174 return mode2 == PTImode;
2175 if (mode2 == PTImode)
2176 return false;
2177
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2179 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2180 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2181 return false;
2182
2183 if (SCALAR_FLOAT_MODE_P (mode1))
2184 return SCALAR_FLOAT_MODE_P (mode2);
2185 if (SCALAR_FLOAT_MODE_P (mode2))
2186 return false;
2187
2188 if (GET_MODE_CLASS (mode1) == MODE_CC)
2189 return GET_MODE_CLASS (mode2) == MODE_CC;
2190 if (GET_MODE_CLASS (mode2) == MODE_CC)
2191 return false;
2192
2193 return true;
2194 }
2195
2196 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2197
2198 static bool
2199 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2200 {
2201 if (TARGET_32BIT
2202 && TARGET_POWERPC64
2203 && GET_MODE_SIZE (mode) > 4
2204 && INT_REGNO_P (regno))
2205 return true;
2206
2207 if (TARGET_VSX
2208 && FP_REGNO_P (regno)
2209 && GET_MODE_SIZE (mode) > 8
2210 && !FLOAT128_2REG_P (mode))
2211 return true;
2212
2213 return false;
2214 }
2215
2216 /* Print interesting facts about registers. */
2217 static void
2218 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2219 {
2220 int r, m;
2221
2222 for (r = first_regno; r <= last_regno; ++r)
2223 {
2224 const char *comma = "";
2225 int len;
2226
2227 if (first_regno == last_regno)
2228 fprintf (stderr, "%s:\t", reg_name);
2229 else
2230 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2231
2232 len = 8;
2233 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2234 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2235 {
2236 if (len > 70)
2237 {
2238 fprintf (stderr, ",\n\t");
2239 len = 8;
2240 comma = "";
2241 }
2242
2243 if (rs6000_hard_regno_nregs[m][r] > 1)
2244 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2245 rs6000_hard_regno_nregs[m][r]);
2246 else
2247 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2248
2249 comma = ", ";
2250 }
2251
2252 if (call_used_regs[r])
2253 {
2254 if (len > 70)
2255 {
2256 fprintf (stderr, ",\n\t");
2257 len = 8;
2258 comma = "";
2259 }
2260
2261 len += fprintf (stderr, "%s%s", comma, "call-used");
2262 comma = ", ";
2263 }
2264
2265 if (fixed_regs[r])
2266 {
2267 if (len > 70)
2268 {
2269 fprintf (stderr, ",\n\t");
2270 len = 8;
2271 comma = "";
2272 }
2273
2274 len += fprintf (stderr, "%s%s", comma, "fixed");
2275 comma = ", ";
2276 }
2277
2278 if (len > 70)
2279 {
2280 fprintf (stderr, ",\n\t");
2281 comma = "";
2282 }
2283
2284 len += fprintf (stderr, "%sreg-class = %s", comma,
2285 reg_class_names[(int)rs6000_regno_regclass[r]]);
2286 comma = ", ";
2287
2288 if (len > 70)
2289 {
2290 fprintf (stderr, ",\n\t");
2291 comma = "";
2292 }
2293
2294 fprintf (stderr, "%sregno = %d\n", comma, r);
2295 }
2296 }
2297
2298 static const char *
2299 rs6000_debug_vector_unit (enum rs6000_vector v)
2300 {
2301 const char *ret;
2302
2303 switch (v)
2304 {
2305 case VECTOR_NONE: ret = "none"; break;
2306 case VECTOR_ALTIVEC: ret = "altivec"; break;
2307 case VECTOR_VSX: ret = "vsx"; break;
2308 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2309 default: ret = "unknown"; break;
2310 }
2311
2312 return ret;
2313 }
2314
2315 /* Inner function printing just the address mask for a particular reload
2316 register class. */
2317 DEBUG_FUNCTION char *
2318 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2319 {
2320 static char ret[8];
2321 char *p = ret;
2322
2323 if ((mask & RELOAD_REG_VALID) != 0)
2324 *p++ = 'v';
2325 else if (keep_spaces)
2326 *p++ = ' ';
2327
2328 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2329 *p++ = 'm';
2330 else if (keep_spaces)
2331 *p++ = ' ';
2332
2333 if ((mask & RELOAD_REG_INDEXED) != 0)
2334 *p++ = 'i';
2335 else if (keep_spaces)
2336 *p++ = ' ';
2337
2338 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2339 *p++ = 'O';
2340 else if ((mask & RELOAD_REG_OFFSET) != 0)
2341 *p++ = 'o';
2342 else if (keep_spaces)
2343 *p++ = ' ';
2344
2345 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2346 *p++ = '+';
2347 else if (keep_spaces)
2348 *p++ = ' ';
2349
2350 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2351 *p++ = '+';
2352 else if (keep_spaces)
2353 *p++ = ' ';
2354
2355 if ((mask & RELOAD_REG_AND_M16) != 0)
2356 *p++ = '&';
2357 else if (keep_spaces)
2358 *p++ = ' ';
2359
2360 *p = '\0';
2361
2362 return ret;
2363 }
2364
2365 /* Print the address masks in a human readble fashion. */
2366 DEBUG_FUNCTION void
2367 rs6000_debug_print_mode (ssize_t m)
2368 {
2369 ssize_t rc;
2370 int spaces = 0;
2371
2372 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2373 for (rc = 0; rc < N_RELOAD_REG; rc++)
2374 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2375 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2376
2377 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2378 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2379 {
2380 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2381 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2382 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2383 spaces = 0;
2384 }
2385 else
2386 spaces += sizeof (" Reload=sl") - 1;
2387
2388 if (reg_addr[m].scalar_in_vmx_p)
2389 {
2390 fprintf (stderr, "%*s Upper=y", spaces, "");
2391 spaces = 0;
2392 }
2393 else
2394 spaces += sizeof (" Upper=y") - 1;
2395
2396 if (rs6000_vector_unit[m] != VECTOR_NONE
2397 || rs6000_vector_mem[m] != VECTOR_NONE)
2398 {
2399 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2400 spaces, "",
2401 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2402 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2403 }
2404
2405 fputs ("\n", stderr);
2406 }
2407
2408 #define DEBUG_FMT_ID "%-32s= "
2409 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2410 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2411 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2412
2413 /* Print various interesting information with -mdebug=reg. */
2414 static void
2415 rs6000_debug_reg_global (void)
2416 {
2417 static const char *const tf[2] = { "false", "true" };
2418 const char *nl = (const char *)0;
2419 int m;
2420 size_t m1, m2, v;
2421 char costly_num[20];
2422 char nop_num[20];
2423 char flags_buffer[40];
2424 const char *costly_str;
2425 const char *nop_str;
2426 const char *trace_str;
2427 const char *abi_str;
2428 const char *cmodel_str;
2429 struct cl_target_option cl_opts;
2430
2431 /* Modes we want tieable information on. */
2432 static const machine_mode print_tieable_modes[] = {
2433 QImode,
2434 HImode,
2435 SImode,
2436 DImode,
2437 TImode,
2438 PTImode,
2439 SFmode,
2440 DFmode,
2441 TFmode,
2442 IFmode,
2443 KFmode,
2444 SDmode,
2445 DDmode,
2446 TDmode,
2447 V16QImode,
2448 V8HImode,
2449 V4SImode,
2450 V2DImode,
2451 V1TImode,
2452 V32QImode,
2453 V16HImode,
2454 V8SImode,
2455 V4DImode,
2456 V2TImode,
2457 V4SFmode,
2458 V2DFmode,
2459 V8SFmode,
2460 V4DFmode,
2461 CCmode,
2462 CCUNSmode,
2463 CCEQmode,
2464 };
2465
2466 /* Virtual regs we are interested in. */
2467 const static struct {
2468 int regno; /* register number. */
2469 const char *name; /* register name. */
2470 } virtual_regs[] = {
2471 { STACK_POINTER_REGNUM, "stack pointer:" },
2472 { TOC_REGNUM, "toc: " },
2473 { STATIC_CHAIN_REGNUM, "static chain: " },
2474 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2475 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2476 { ARG_POINTER_REGNUM, "arg pointer: " },
2477 { FRAME_POINTER_REGNUM, "frame pointer:" },
2478 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2479 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2480 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2481 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2482 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2483 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2484 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2485 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2486 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2487 };
2488
2489 fputs ("\nHard register information:\n", stderr);
2490 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2491 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2492 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2493 LAST_ALTIVEC_REGNO,
2494 "vs");
2495 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2496 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2497 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2498 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2499 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2500 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2501
2502 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2503 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2504 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2505
2506 fprintf (stderr,
2507 "\n"
2508 "d reg_class = %s\n"
2509 "f reg_class = %s\n"
2510 "v reg_class = %s\n"
2511 "wa reg_class = %s\n"
2512 "wb reg_class = %s\n"
2513 "wd reg_class = %s\n"
2514 "we reg_class = %s\n"
2515 "wf reg_class = %s\n"
2516 "wg reg_class = %s\n"
2517 "wh reg_class = %s\n"
2518 "wi reg_class = %s\n"
2519 "wj reg_class = %s\n"
2520 "wk reg_class = %s\n"
2521 "wl reg_class = %s\n"
2522 "wm reg_class = %s\n"
2523 "wo reg_class = %s\n"
2524 "wp reg_class = %s\n"
2525 "wq reg_class = %s\n"
2526 "wr reg_class = %s\n"
2527 "ws reg_class = %s\n"
2528 "wt reg_class = %s\n"
2529 "wu reg_class = %s\n"
2530 "wv reg_class = %s\n"
2531 "ww reg_class = %s\n"
2532 "wx reg_class = %s\n"
2533 "wy reg_class = %s\n"
2534 "wz reg_class = %s\n"
2535 "wA reg_class = %s\n"
2536 "wH reg_class = %s\n"
2537 "wI reg_class = %s\n"
2538 "wJ reg_class = %s\n"
2539 "wK reg_class = %s\n"
2540 "\n",
2541 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2573
2574 nl = "\n";
2575 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2576 rs6000_debug_print_mode (m);
2577
2578 fputs ("\n", stderr);
2579
2580 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2581 {
2582 machine_mode mode1 = print_tieable_modes[m1];
2583 bool first_time = true;
2584
2585 nl = (const char *)0;
2586 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2587 {
2588 machine_mode mode2 = print_tieable_modes[m2];
2589 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2590 {
2591 if (first_time)
2592 {
2593 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2594 nl = "\n";
2595 first_time = false;
2596 }
2597
2598 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2599 }
2600 }
2601
2602 if (!first_time)
2603 fputs ("\n", stderr);
2604 }
2605
2606 if (nl)
2607 fputs (nl, stderr);
2608
2609 if (rs6000_recip_control)
2610 {
2611 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2612
2613 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2614 if (rs6000_recip_bits[m])
2615 {
2616 fprintf (stderr,
2617 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2618 GET_MODE_NAME (m),
2619 (RS6000_RECIP_AUTO_RE_P (m)
2620 ? "auto"
2621 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2622 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2623 ? "auto"
2624 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2625 }
2626
2627 fputs ("\n", stderr);
2628 }
2629
2630 if (rs6000_cpu_index >= 0)
2631 {
2632 const char *name = processor_target_table[rs6000_cpu_index].name;
2633 HOST_WIDE_INT flags
2634 = processor_target_table[rs6000_cpu_index].target_enable;
2635
2636 sprintf (flags_buffer, "-mcpu=%s flags", name);
2637 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2638 }
2639 else
2640 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2641
2642 if (rs6000_tune_index >= 0)
2643 {
2644 const char *name = processor_target_table[rs6000_tune_index].name;
2645 HOST_WIDE_INT flags
2646 = processor_target_table[rs6000_tune_index].target_enable;
2647
2648 sprintf (flags_buffer, "-mtune=%s flags", name);
2649 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2650 }
2651 else
2652 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2653
2654 cl_target_option_save (&cl_opts, &global_options);
2655 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2656 rs6000_isa_flags);
2657
2658 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2659 rs6000_isa_flags_explicit);
2660
2661 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2662 rs6000_builtin_mask);
2663
2664 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2665
2666 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2667 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2668
2669 switch (rs6000_sched_costly_dep)
2670 {
2671 case max_dep_latency:
2672 costly_str = "max_dep_latency";
2673 break;
2674
2675 case no_dep_costly:
2676 costly_str = "no_dep_costly";
2677 break;
2678
2679 case all_deps_costly:
2680 costly_str = "all_deps_costly";
2681 break;
2682
2683 case true_store_to_load_dep_costly:
2684 costly_str = "true_store_to_load_dep_costly";
2685 break;
2686
2687 case store_to_load_dep_costly:
2688 costly_str = "store_to_load_dep_costly";
2689 break;
2690
2691 default:
2692 costly_str = costly_num;
2693 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2694 break;
2695 }
2696
2697 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2698
2699 switch (rs6000_sched_insert_nops)
2700 {
2701 case sched_finish_regroup_exact:
2702 nop_str = "sched_finish_regroup_exact";
2703 break;
2704
2705 case sched_finish_pad_groups:
2706 nop_str = "sched_finish_pad_groups";
2707 break;
2708
2709 case sched_finish_none:
2710 nop_str = "sched_finish_none";
2711 break;
2712
2713 default:
2714 nop_str = nop_num;
2715 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2716 break;
2717 }
2718
2719 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2720
2721 switch (rs6000_sdata)
2722 {
2723 default:
2724 case SDATA_NONE:
2725 break;
2726
2727 case SDATA_DATA:
2728 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2729 break;
2730
2731 case SDATA_SYSV:
2732 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2733 break;
2734
2735 case SDATA_EABI:
2736 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2737 break;
2738
2739 }
2740
2741 switch (rs6000_traceback)
2742 {
2743 case traceback_default: trace_str = "default"; break;
2744 case traceback_none: trace_str = "none"; break;
2745 case traceback_part: trace_str = "part"; break;
2746 case traceback_full: trace_str = "full"; break;
2747 default: trace_str = "unknown"; break;
2748 }
2749
2750 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2751
2752 switch (rs6000_current_cmodel)
2753 {
2754 case CMODEL_SMALL: cmodel_str = "small"; break;
2755 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2756 case CMODEL_LARGE: cmodel_str = "large"; break;
2757 default: cmodel_str = "unknown"; break;
2758 }
2759
2760 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2761
2762 switch (rs6000_current_abi)
2763 {
2764 case ABI_NONE: abi_str = "none"; break;
2765 case ABI_AIX: abi_str = "aix"; break;
2766 case ABI_ELFv2: abi_str = "ELFv2"; break;
2767 case ABI_V4: abi_str = "V4"; break;
2768 case ABI_DARWIN: abi_str = "darwin"; break;
2769 default: abi_str = "unknown"; break;
2770 }
2771
2772 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2773
2774 if (rs6000_altivec_abi)
2775 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2776
2777 if (rs6000_darwin64_abi)
2778 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2779
2780 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2781 (TARGET_SOFT_FLOAT ? "true" : "false"));
2782
2783 if (TARGET_LINK_STACK)
2784 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2785
2786 if (TARGET_P8_FUSION)
2787 {
2788 char options[80];
2789
2790 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2791 if (TARGET_P8_FUSION_SIGN)
2792 strcat (options, ", sign");
2793
2794 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2795 }
2796
2797 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2798 TARGET_SECURE_PLT ? "secure" : "bss");
2799 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2800 aix_struct_return ? "aix" : "sysv");
2801 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2802 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2803 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2804 tf[!!rs6000_align_branch_targets]);
2805 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2806 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2807 rs6000_long_double_type_size);
2808 if (rs6000_long_double_type_size > 64)
2809 {
2810 fprintf (stderr, DEBUG_FMT_S, "long double type",
2811 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2812 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2813 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2814 }
2815 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2816 (int)rs6000_sched_restricted_insns_priority);
2817 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2818 (int)END_BUILTINS);
2819 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2820 (int)RS6000_BUILTIN_COUNT);
2821
2822 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2823 (int)TARGET_FLOAT128_ENABLE_TYPE);
2824
2825 if (TARGET_VSX)
2826 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2827 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2828
2829 if (TARGET_DIRECT_MOVE_128)
2830 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2831 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2832 }
2833
2834 \f
2835 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2836 legitimate address support to figure out the appropriate addressing to
2837 use. */
2838
2839 static void
2840 rs6000_setup_reg_addr_masks (void)
2841 {
2842 ssize_t rc, reg, m, nregs;
2843 addr_mask_type any_addr_mask, addr_mask;
2844
2845 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2846 {
2847 machine_mode m2 = (machine_mode) m;
2848 bool complex_p = false;
2849 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2850 size_t msize;
2851
2852 if (COMPLEX_MODE_P (m2))
2853 {
2854 complex_p = true;
2855 m2 = GET_MODE_INNER (m2);
2856 }
2857
2858 msize = GET_MODE_SIZE (m2);
2859
2860 /* SDmode is special in that we want to access it only via REG+REG
2861 addressing on power7 and above, since we want to use the LFIWZX and
2862 STFIWZX instructions to load it. */
2863 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2864
2865 any_addr_mask = 0;
2866 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2867 {
2868 addr_mask = 0;
2869 reg = reload_reg_map[rc].reg;
2870
2871 /* Can mode values go in the GPR/FPR/Altivec registers? */
2872 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2873 {
2874 bool small_int_vsx_p = (small_int_p
2875 && (rc == RELOAD_REG_FPR
2876 || rc == RELOAD_REG_VMX));
2877
2878 nregs = rs6000_hard_regno_nregs[m][reg];
2879 addr_mask |= RELOAD_REG_VALID;
2880
2881 /* Indicate if the mode takes more than 1 physical register. If
2882 it takes a single register, indicate it can do REG+REG
2883 addressing. Small integers in VSX registers can only do
2884 REG+REG addressing. */
2885 if (small_int_vsx_p)
2886 addr_mask |= RELOAD_REG_INDEXED;
2887 else if (nregs > 1 || m == BLKmode || complex_p)
2888 addr_mask |= RELOAD_REG_MULTIPLE;
2889 else
2890 addr_mask |= RELOAD_REG_INDEXED;
2891
2892 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2893 addressing. If we allow scalars into Altivec registers,
2894 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2895
2896 For VSX systems, we don't allow update addressing for
2897 DFmode/SFmode if those registers can go in both the
2898 traditional floating point registers and Altivec registers.
2899 The load/store instructions for the Altivec registers do not
2900 have update forms. If we allowed update addressing, it seems
2901 to break IV-OPT code using floating point if the index type is
2902 int instead of long (PR target/81550 and target/84042). */
2903
2904 if (TARGET_UPDATE
2905 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2906 && msize <= 8
2907 && !VECTOR_MODE_P (m2)
2908 && !FLOAT128_VECTOR_P (m2)
2909 && !complex_p
2910 && (m != E_DFmode || !TARGET_VSX)
2911 && (m != E_SFmode || !TARGET_P8_VECTOR)
2912 && !small_int_vsx_p)
2913 {
2914 addr_mask |= RELOAD_REG_PRE_INCDEC;
2915
2916 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2917 we don't allow PRE_MODIFY for some multi-register
2918 operations. */
2919 switch (m)
2920 {
2921 default:
2922 addr_mask |= RELOAD_REG_PRE_MODIFY;
2923 break;
2924
2925 case E_DImode:
2926 if (TARGET_POWERPC64)
2927 addr_mask |= RELOAD_REG_PRE_MODIFY;
2928 break;
2929
2930 case E_DFmode:
2931 case E_DDmode:
2932 if (TARGET_HARD_FLOAT)
2933 addr_mask |= RELOAD_REG_PRE_MODIFY;
2934 break;
2935 }
2936 }
2937 }
2938
2939 /* GPR and FPR registers can do REG+OFFSET addressing, except
2940 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2941 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2942 if ((addr_mask != 0) && !indexed_only_p
2943 && msize <= 8
2944 && (rc == RELOAD_REG_GPR
2945 || ((msize == 8 || m2 == SFmode)
2946 && (rc == RELOAD_REG_FPR
2947 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2948 addr_mask |= RELOAD_REG_OFFSET;
2949
2950 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2951 instructions are enabled. The offset for 128-bit VSX registers is
2952 only 12-bits. While GPRs can handle the full offset range, VSX
2953 registers can only handle the restricted range. */
2954 else if ((addr_mask != 0) && !indexed_only_p
2955 && msize == 16 && TARGET_P9_VECTOR
2956 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2957 || (m2 == TImode && TARGET_VSX)))
2958 {
2959 addr_mask |= RELOAD_REG_OFFSET;
2960 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2961 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2962 }
2963
2964 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2965 addressing on 128-bit types. */
2966 if (rc == RELOAD_REG_VMX && msize == 16
2967 && (addr_mask & RELOAD_REG_VALID) != 0)
2968 addr_mask |= RELOAD_REG_AND_M16;
2969
2970 reg_addr[m].addr_mask[rc] = addr_mask;
2971 any_addr_mask |= addr_mask;
2972 }
2973
2974 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2975 }
2976 }
2977
2978 \f
2979 /* Initialize the various global tables that are based on register size. */
2980 static void
2981 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2982 {
2983 ssize_t r, m, c;
2984 int align64;
2985 int align32;
2986
2987 /* Precalculate REGNO_REG_CLASS. */
2988 rs6000_regno_regclass[0] = GENERAL_REGS;
2989 for (r = 1; r < 32; ++r)
2990 rs6000_regno_regclass[r] = BASE_REGS;
2991
2992 for (r = 32; r < 64; ++r)
2993 rs6000_regno_regclass[r] = FLOAT_REGS;
2994
2995 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2996 rs6000_regno_regclass[r] = NO_REGS;
2997
2998 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2999 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3000
3001 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3002 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3003 rs6000_regno_regclass[r] = CR_REGS;
3004
3005 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3006 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3007 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3008 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3009 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3010 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3011 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3012 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3013 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3014 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3015
3016 /* Precalculate register class to simpler reload register class. We don't
3017 need all of the register classes that are combinations of different
3018 classes, just the simple ones that have constraint letters. */
3019 for (c = 0; c < N_REG_CLASSES; c++)
3020 reg_class_to_reg_type[c] = NO_REG_TYPE;
3021
3022 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3023 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3024 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3025 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3026 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3029 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3031 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3032
3033 if (TARGET_VSX)
3034 {
3035 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3036 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3037 }
3038 else
3039 {
3040 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3041 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3042 }
3043
3044 /* Precalculate the valid memory formats as well as the vector information,
3045 this must be set up before the rs6000_hard_regno_nregs_internal calls
3046 below. */
3047 gcc_assert ((int)VECTOR_NONE == 0);
3048 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3049 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3050
3051 gcc_assert ((int)CODE_FOR_nothing == 0);
3052 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3053
3054 gcc_assert ((int)NO_REGS == 0);
3055 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3056
3057 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3058 believes it can use native alignment or still uses 128-bit alignment. */
3059 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3060 {
3061 align64 = 64;
3062 align32 = 32;
3063 }
3064 else
3065 {
3066 align64 = 128;
3067 align32 = 128;
3068 }
3069
3070 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3071 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3072 if (TARGET_FLOAT128_TYPE)
3073 {
3074 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3075 rs6000_vector_align[KFmode] = 128;
3076
3077 if (FLOAT128_IEEE_P (TFmode))
3078 {
3079 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3080 rs6000_vector_align[TFmode] = 128;
3081 }
3082 }
3083
3084 /* V2DF mode, VSX only. */
3085 if (TARGET_VSX)
3086 {
3087 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3088 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3089 rs6000_vector_align[V2DFmode] = align64;
3090 }
3091
3092 /* V4SF mode, either VSX or Altivec. */
3093 if (TARGET_VSX)
3094 {
3095 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3096 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3097 rs6000_vector_align[V4SFmode] = align32;
3098 }
3099 else if (TARGET_ALTIVEC)
3100 {
3101 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3102 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3103 rs6000_vector_align[V4SFmode] = align32;
3104 }
3105
3106 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3107 and stores. */
3108 if (TARGET_ALTIVEC)
3109 {
3110 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3111 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3112 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3113 rs6000_vector_align[V4SImode] = align32;
3114 rs6000_vector_align[V8HImode] = align32;
3115 rs6000_vector_align[V16QImode] = align32;
3116
3117 if (TARGET_VSX)
3118 {
3119 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3120 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3121 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3122 }
3123 else
3124 {
3125 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3126 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3127 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3128 }
3129 }
3130
3131 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3132 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3133 if (TARGET_VSX)
3134 {
3135 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3136 rs6000_vector_unit[V2DImode]
3137 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3138 rs6000_vector_align[V2DImode] = align64;
3139
3140 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3141 rs6000_vector_unit[V1TImode]
3142 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3143 rs6000_vector_align[V1TImode] = 128;
3144 }
3145
3146 /* DFmode, see if we want to use the VSX unit. Memory is handled
3147 differently, so don't set rs6000_vector_mem. */
3148 if (TARGET_VSX)
3149 {
3150 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3151 rs6000_vector_align[DFmode] = 64;
3152 }
3153
3154 /* SFmode, see if we want to use the VSX unit. */
3155 if (TARGET_P8_VECTOR)
3156 {
3157 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3158 rs6000_vector_align[SFmode] = 32;
3159 }
3160
3161 /* Allow TImode in VSX register and set the VSX memory macros. */
3162 if (TARGET_VSX)
3163 {
3164 rs6000_vector_mem[TImode] = VECTOR_VSX;
3165 rs6000_vector_align[TImode] = align64;
3166 }
3167
3168 /* Register class constraints for the constraints that depend on compile
3169 switches. When the VSX code was added, different constraints were added
3170 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3171 of the VSX registers are used. The register classes for scalar floating
3172 point types is set, based on whether we allow that type into the upper
3173 (Altivec) registers. GCC has register classes to target the Altivec
3174 registers for load/store operations, to select using a VSX memory
3175 operation instead of the traditional floating point operation. The
3176 constraints are:
3177
3178 d - Register class to use with traditional DFmode instructions.
3179 f - Register class to use with traditional SFmode instructions.
3180 v - Altivec register.
3181 wa - Any VSX register.
3182 wc - Reserved to represent individual CR bits (used in LLVM).
3183 wd - Preferred register class for V2DFmode.
3184 wf - Preferred register class for V4SFmode.
3185 wg - Float register for power6x move insns.
3186 wh - FP register for direct move instructions.
3187 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3188 wj - FP or VSX register to hold 64-bit integers for direct moves.
3189 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3190 wl - Float register if we can do 32-bit signed int loads.
3191 wm - VSX register for ISA 2.07 direct move operations.
3192 wn - always NO_REGS.
3193 wr - GPR if 64-bit mode is permitted.
3194 ws - Register class to do ISA 2.06 DF operations.
3195 wt - VSX register for TImode in VSX registers.
3196 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3197 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3198 ww - Register class to do SF conversions in with VSX operations.
3199 wx - Float register if we can do 32-bit int stores.
3200 wy - Register class to do ISA 2.07 SF operations.
3201 wz - Float register if we can do 32-bit unsigned int loads.
3202 wH - Altivec register if SImode is allowed in VSX registers.
3203 wI - VSX register if SImode is allowed in VSX registers.
3204 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3205 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3206
3207 if (TARGET_HARD_FLOAT)
3208 {
3209 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3210 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3211 }
3212
3213 if (TARGET_VSX)
3214 {
3215 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3216 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3217 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3218 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3219 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3220 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3222 }
3223
3224 /* Add conditional constraints based on various options, to allow us to
3225 collapse multiple insn patterns. */
3226 if (TARGET_ALTIVEC)
3227 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3228
3229 if (TARGET_MFPGPR) /* DFmode */
3230 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3231
3232 if (TARGET_LFIWAX)
3233 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3234
3235 if (TARGET_DIRECT_MOVE)
3236 {
3237 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3238 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3239 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3240 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3241 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3242 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3243 }
3244
3245 if (TARGET_POWERPC64)
3246 {
3247 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3248 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3249 }
3250
3251 if (TARGET_P8_VECTOR) /* SFmode */
3252 {
3253 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3254 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3255 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3256 }
3257 else if (TARGET_VSX)
3258 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3259
3260 if (TARGET_STFIWX)
3261 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3262
3263 if (TARGET_LFIWZX)
3264 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3265
3266 if (TARGET_FLOAT128_TYPE)
3267 {
3268 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3269 if (FLOAT128_IEEE_P (TFmode))
3270 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3271 }
3272
3273 if (TARGET_P9_VECTOR)
3274 {
3275 /* Support for new D-form instructions. */
3276 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3277
3278 /* Support for ISA 3.0 (power9) vectors. */
3279 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3280 }
3281
3282 /* Support for new direct moves (ISA 3.0 + 64bit). */
3283 if (TARGET_DIRECT_MOVE_128)
3284 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3285
3286 /* Support small integers in VSX registers. */
3287 if (TARGET_P8_VECTOR)
3288 {
3289 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3290 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3291 if (TARGET_P9_VECTOR)
3292 {
3293 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3294 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3295 }
3296 }
3297
3298 /* Set up the reload helper and direct move functions. */
3299 if (TARGET_VSX || TARGET_ALTIVEC)
3300 {
3301 if (TARGET_64BIT)
3302 {
3303 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3304 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3305 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3306 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3307 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3308 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3309 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3310 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3311 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3312 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3313 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3314 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3315 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3316 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3317 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3318 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3319 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3320 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3321 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3322 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3323
3324 if (FLOAT128_VECTOR_P (KFmode))
3325 {
3326 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3327 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3328 }
3329
3330 if (FLOAT128_VECTOR_P (TFmode))
3331 {
3332 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3333 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3334 }
3335
3336 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3337 available. */
3338 if (TARGET_NO_SDMODE_STACK)
3339 {
3340 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3341 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3342 }
3343
3344 if (TARGET_VSX)
3345 {
3346 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3347 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3348 }
3349
3350 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3351 {
3352 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3353 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3354 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3355 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3356 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3357 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3358 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3359 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3360 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3361
3362 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3363 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3364 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3365 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3366 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3367 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3368 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3369 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3370 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3371
3372 if (FLOAT128_VECTOR_P (KFmode))
3373 {
3374 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3375 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3376 }
3377
3378 if (FLOAT128_VECTOR_P (TFmode))
3379 {
3380 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3381 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3382 }
3383 }
3384 }
3385 else
3386 {
3387 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3388 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3389 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3390 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3391 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3392 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3393 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3394 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3395 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3396 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3397 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3398 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3399 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3400 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3401 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3402 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3403 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3404 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3405 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3406 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3407
3408 if (FLOAT128_VECTOR_P (KFmode))
3409 {
3410 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3411 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3412 }
3413
3414 if (FLOAT128_IEEE_P (TFmode))
3415 {
3416 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3417 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3418 }
3419
3420 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3421 available. */
3422 if (TARGET_NO_SDMODE_STACK)
3423 {
3424 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3425 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3426 }
3427
3428 if (TARGET_VSX)
3429 {
3430 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3431 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3432 }
3433
3434 if (TARGET_DIRECT_MOVE)
3435 {
3436 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3437 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3438 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3439 }
3440 }
3441
3442 reg_addr[DFmode].scalar_in_vmx_p = true;
3443 reg_addr[DImode].scalar_in_vmx_p = true;
3444
3445 if (TARGET_P8_VECTOR)
3446 {
3447 reg_addr[SFmode].scalar_in_vmx_p = true;
3448 reg_addr[SImode].scalar_in_vmx_p = true;
3449
3450 if (TARGET_P9_VECTOR)
3451 {
3452 reg_addr[HImode].scalar_in_vmx_p = true;
3453 reg_addr[QImode].scalar_in_vmx_p = true;
3454 }
3455 }
3456 }
3457
3458 /* Precalculate HARD_REGNO_NREGS. */
3459 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3460 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3461 rs6000_hard_regno_nregs[m][r]
3462 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3463
3464 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3465 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3466 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3467 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3468 rs6000_hard_regno_mode_ok_p[m][r] = true;
3469
3470 /* Precalculate CLASS_MAX_NREGS sizes. */
3471 for (c = 0; c < LIM_REG_CLASSES; ++c)
3472 {
3473 int reg_size;
3474
3475 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3476 reg_size = UNITS_PER_VSX_WORD;
3477
3478 else if (c == ALTIVEC_REGS)
3479 reg_size = UNITS_PER_ALTIVEC_WORD;
3480
3481 else if (c == FLOAT_REGS)
3482 reg_size = UNITS_PER_FP_WORD;
3483
3484 else
3485 reg_size = UNITS_PER_WORD;
3486
3487 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3488 {
3489 machine_mode m2 = (machine_mode)m;
3490 int reg_size2 = reg_size;
3491
3492 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3493 in VSX. */
3494 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3495 reg_size2 = UNITS_PER_FP_WORD;
3496
3497 rs6000_class_max_nregs[m][c]
3498 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3499 }
3500 }
3501
3502 /* Calculate which modes to automatically generate code to use a the
3503 reciprocal divide and square root instructions. In the future, possibly
3504 automatically generate the instructions even if the user did not specify
3505 -mrecip. The older machines double precision reciprocal sqrt estimate is
3506 not accurate enough. */
3507 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3508 if (TARGET_FRES)
3509 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3510 if (TARGET_FRE)
3511 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3512 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3513 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3514 if (VECTOR_UNIT_VSX_P (V2DFmode))
3515 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3516
3517 if (TARGET_FRSQRTES)
3518 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3519 if (TARGET_FRSQRTE)
3520 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3521 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3522 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3523 if (VECTOR_UNIT_VSX_P (V2DFmode))
3524 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3525
3526 if (rs6000_recip_control)
3527 {
3528 if (!flag_finite_math_only)
3529 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3530 "-ffast-math");
3531 if (flag_trapping_math)
3532 warning (0, "%qs requires %qs or %qs", "-mrecip",
3533 "-fno-trapping-math", "-ffast-math");
3534 if (!flag_reciprocal_math)
3535 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3536 "-ffast-math");
3537 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3538 {
3539 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3540 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3541 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3542
3543 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3544 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3545 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546
3547 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3548 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3549 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3550
3551 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3552 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3553 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3554
3555 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3556 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3557 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3558
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3560 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3561 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3562
3563 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3564 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3565 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3566
3567 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3568 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3569 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3570 }
3571 }
3572
3573 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3574 legitimate address support to figure out the appropriate addressing to
3575 use. */
3576 rs6000_setup_reg_addr_masks ();
3577
3578 if (global_init_p || TARGET_DEBUG_TARGET)
3579 {
3580 if (TARGET_DEBUG_REG)
3581 rs6000_debug_reg_global ();
3582
3583 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3584 fprintf (stderr,
3585 "SImode variable mult cost = %d\n"
3586 "SImode constant mult cost = %d\n"
3587 "SImode short constant mult cost = %d\n"
3588 "DImode multipliciation cost = %d\n"
3589 "SImode division cost = %d\n"
3590 "DImode division cost = %d\n"
3591 "Simple fp operation cost = %d\n"
3592 "DFmode multiplication cost = %d\n"
3593 "SFmode division cost = %d\n"
3594 "DFmode division cost = %d\n"
3595 "cache line size = %d\n"
3596 "l1 cache size = %d\n"
3597 "l2 cache size = %d\n"
3598 "simultaneous prefetches = %d\n"
3599 "\n",
3600 rs6000_cost->mulsi,
3601 rs6000_cost->mulsi_const,
3602 rs6000_cost->mulsi_const9,
3603 rs6000_cost->muldi,
3604 rs6000_cost->divsi,
3605 rs6000_cost->divdi,
3606 rs6000_cost->fp,
3607 rs6000_cost->dmul,
3608 rs6000_cost->sdiv,
3609 rs6000_cost->ddiv,
3610 rs6000_cost->cache_line_size,
3611 rs6000_cost->l1_cache_size,
3612 rs6000_cost->l2_cache_size,
3613 rs6000_cost->simultaneous_prefetches);
3614 }
3615 }
3616
3617 #if TARGET_MACHO
3618 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3619
3620 static void
3621 darwin_rs6000_override_options (void)
3622 {
3623 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3624 off. */
3625 rs6000_altivec_abi = 1;
3626 TARGET_ALTIVEC_VRSAVE = 1;
3627 rs6000_current_abi = ABI_DARWIN;
3628
3629 if (DEFAULT_ABI == ABI_DARWIN
3630 && TARGET_64BIT)
3631 darwin_one_byte_bool = 1;
3632
3633 if (TARGET_64BIT && ! TARGET_POWERPC64)
3634 {
3635 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3636 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3637 }
3638 if (flag_mkernel)
3639 {
3640 rs6000_default_long_calls = 1;
3641 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3642 }
3643
3644 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3645 Altivec. */
3646 if (!flag_mkernel && !flag_apple_kext
3647 && TARGET_64BIT
3648 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3649 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3650
3651 /* Unless the user (not the configurer) has explicitly overridden
3652 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3653 G4 unless targeting the kernel. */
3654 if (!flag_mkernel
3655 && !flag_apple_kext
3656 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3657 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3658 && ! global_options_set.x_rs6000_cpu_index)
3659 {
3660 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3661 }
3662 }
3663 #endif
3664
3665 /* If not otherwise specified by a target, make 'long double' equivalent to
3666 'double'. */
3667
3668 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3669 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3670 #endif
3671
3672 /* Return the builtin mask of the various options used that could affect which
3673 builtins were used. In the past we used target_flags, but we've run out of
3674 bits, and some options are no longer in target_flags. */
3675
3676 HOST_WIDE_INT
3677 rs6000_builtin_mask_calculate (void)
3678 {
3679 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3680 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3681 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3682 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3683 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3684 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3685 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3686 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3687 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3688 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3689 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3690 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3691 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3692 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3693 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3694 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3695 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3696 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3697 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3698 | ((TARGET_LONG_DOUBLE_128
3699 && TARGET_HARD_FLOAT
3700 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3701 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3702 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3703 }
3704
3705 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3706 to clobber the XER[CA] bit because clobbering that bit without telling
3707 the compiler worked just fine with versions of GCC before GCC 5, and
3708 breaking a lot of older code in ways that are hard to track down is
3709 not such a great idea. */
3710
3711 static rtx_insn *
3712 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3713 vec<const char *> &/*constraints*/,
3714 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3715 {
3716 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3717 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3718 return NULL;
3719 }
3720
3721 /* Override command line options.
3722
3723 Combine build-specific configuration information with options
3724 specified on the command line to set various state variables which
3725 influence code generation, optimization, and expansion of built-in
3726 functions. Assure that command-line configuration preferences are
3727 compatible with each other and with the build configuration; issue
3728 warnings while adjusting configuration or error messages while
3729 rejecting configuration.
3730
3731 Upon entry to this function:
3732
3733 This function is called once at the beginning of
3734 compilation, and then again at the start and end of compiling
3735 each section of code that has a different configuration, as
3736 indicated, for example, by adding the
3737
3738 __attribute__((__target__("cpu=power9")))
3739
3740 qualifier to a function definition or, for example, by bracketing
3741 code between
3742
3743 #pragma GCC target("altivec")
3744
3745 and
3746
3747 #pragma GCC reset_options
3748
3749 directives. Parameter global_init_p is true for the initial
3750 invocation, which initializes global variables, and false for all
3751 subsequent invocations.
3752
3753
3754 Various global state information is assumed to be valid. This
3755 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3756 default CPU specified at build configure time, TARGET_DEFAULT,
3757 representing the default set of option flags for the default
3758 target, and global_options_set.x_rs6000_isa_flags, representing
3759 which options were requested on the command line.
3760
3761 Upon return from this function:
3762
3763 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3764 was set by name on the command line. Additionally, if certain
3765 attributes are automatically enabled or disabled by this function
3766 in order to assure compatibility between options and
3767 configuration, the flags associated with those attributes are
3768 also set. By setting these "explicit bits", we avoid the risk
3769 that other code might accidentally overwrite these particular
3770 attributes with "default values".
3771
3772 The various bits of rs6000_isa_flags are set to indicate the
3773 target options that have been selected for the most current
3774 compilation efforts. This has the effect of also turning on the
3775 associated TARGET_XXX values since these are macros which are
3776 generally defined to test the corresponding bit of the
3777 rs6000_isa_flags variable.
3778
3779 The variable rs6000_builtin_mask is set to represent the target
3780 options for the most current compilation efforts, consistent with
3781 the current contents of rs6000_isa_flags. This variable controls
3782 expansion of built-in functions.
3783
3784 Various other global variables and fields of global structures
3785 (over 50 in all) are initialized to reflect the desired options
3786 for the most current compilation efforts. */
3787
3788 static bool
3789 rs6000_option_override_internal (bool global_init_p)
3790 {
3791 bool ret = true;
3792
3793 HOST_WIDE_INT set_masks;
3794 HOST_WIDE_INT ignore_masks;
3795 int cpu_index = -1;
3796 int tune_index;
3797 struct cl_target_option *main_target_opt
3798 = ((global_init_p || target_option_default_node == NULL)
3799 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3800
3801 /* Print defaults. */
3802 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3803 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3804
3805 /* Remember the explicit arguments. */
3806 if (global_init_p)
3807 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3808
3809 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3810 library functions, so warn about it. The flag may be useful for
3811 performance studies from time to time though, so don't disable it
3812 entirely. */
3813 if (global_options_set.x_rs6000_alignment_flags
3814 && rs6000_alignment_flags == MASK_ALIGN_POWER
3815 && DEFAULT_ABI == ABI_DARWIN
3816 && TARGET_64BIT)
3817 warning (0, "%qs is not supported for 64-bit Darwin;"
3818 " it is incompatible with the installed C and C++ libraries",
3819 "-malign-power");
3820
3821 /* Numerous experiment shows that IRA based loop pressure
3822 calculation works better for RTL loop invariant motion on targets
3823 with enough (>= 32) registers. It is an expensive optimization.
3824 So it is on only for peak performance. */
3825 if (optimize >= 3 && global_init_p
3826 && !global_options_set.x_flag_ira_loop_pressure)
3827 flag_ira_loop_pressure = 1;
3828
3829 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3830 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3831 options were already specified. */
3832 if (flag_sanitize & SANITIZE_USER_ADDRESS
3833 && !global_options_set.x_flag_asynchronous_unwind_tables)
3834 flag_asynchronous_unwind_tables = 1;
3835
3836 /* Set the pointer size. */
3837 if (TARGET_64BIT)
3838 {
3839 rs6000_pmode = DImode;
3840 rs6000_pointer_size = 64;
3841 }
3842 else
3843 {
3844 rs6000_pmode = SImode;
3845 rs6000_pointer_size = 32;
3846 }
3847
3848 /* Some OSs don't support saving the high part of 64-bit registers on context
3849 switch. Other OSs don't support saving Altivec registers. On those OSs,
3850 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3851 if the user wants either, the user must explicitly specify them and we
3852 won't interfere with the user's specification. */
3853
3854 set_masks = POWERPC_MASKS;
3855 #ifdef OS_MISSING_POWERPC64
3856 if (OS_MISSING_POWERPC64)
3857 set_masks &= ~OPTION_MASK_POWERPC64;
3858 #endif
3859 #ifdef OS_MISSING_ALTIVEC
3860 if (OS_MISSING_ALTIVEC)
3861 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3862 | OTHER_VSX_VECTOR_MASKS);
3863 #endif
3864
3865 /* Don't override by the processor default if given explicitly. */
3866 set_masks &= ~rs6000_isa_flags_explicit;
3867
3868 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3869 the cpu in a target attribute or pragma, but did not specify a tuning
3870 option, use the cpu for the tuning option rather than the option specified
3871 with -mtune on the command line. Process a '--with-cpu' configuration
3872 request as an implicit --cpu. */
3873 if (rs6000_cpu_index >= 0)
3874 cpu_index = rs6000_cpu_index;
3875 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3876 cpu_index = main_target_opt->x_rs6000_cpu_index;
3877 else if (OPTION_TARGET_CPU_DEFAULT)
3878 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3879
3880 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3881 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3882 with those from the cpu, except for options that were explicitly set. If
3883 we don't have a cpu, do not override the target bits set in
3884 TARGET_DEFAULT. */
3885 if (cpu_index >= 0)
3886 {
3887 rs6000_cpu_index = cpu_index;
3888 rs6000_isa_flags &= ~set_masks;
3889 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3890 & set_masks);
3891 }
3892 else
3893 {
3894 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3895 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3896 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3897 to using rs6000_isa_flags, we need to do the initialization here.
3898
3899 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3900 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3901 HOST_WIDE_INT flags;
3902 if (TARGET_DEFAULT)
3903 flags = TARGET_DEFAULT;
3904 else
3905 {
3906 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3907 const char *default_cpu = (!TARGET_POWERPC64
3908 ? "powerpc"
3909 : (BYTES_BIG_ENDIAN
3910 ? "powerpc64"
3911 : "powerpc64le"));
3912 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3913 flags = processor_target_table[default_cpu_index].target_enable;
3914 }
3915 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3916 }
3917
3918 if (rs6000_tune_index >= 0)
3919 tune_index = rs6000_tune_index;
3920 else if (cpu_index >= 0)
3921 rs6000_tune_index = tune_index = cpu_index;
3922 else
3923 {
3924 size_t i;
3925 enum processor_type tune_proc
3926 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3927
3928 tune_index = -1;
3929 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3930 if (processor_target_table[i].processor == tune_proc)
3931 {
3932 tune_index = i;
3933 break;
3934 }
3935 }
3936
3937 if (cpu_index >= 0)
3938 rs6000_cpu = processor_target_table[cpu_index].processor;
3939 else
3940 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3941
3942 gcc_assert (tune_index >= 0);
3943 rs6000_tune = processor_target_table[tune_index].processor;
3944
3945 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3946 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3947 || rs6000_cpu == PROCESSOR_PPCE5500)
3948 {
3949 if (TARGET_ALTIVEC)
3950 error ("AltiVec not supported in this target");
3951 }
3952
3953 /* If we are optimizing big endian systems for space, use the load/store
3954 multiple instructions. */
3955 if (BYTES_BIG_ENDIAN && optimize_size)
3956 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3957
3958 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3959 because the hardware doesn't support the instructions used in little
3960 endian mode, and causes an alignment trap. The 750 does not cause an
3961 alignment trap (except when the target is unaligned). */
3962
3963 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3964 {
3965 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3966 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3967 warning (0, "%qs is not supported on little endian systems",
3968 "-mmultiple");
3969 }
3970
3971 /* If little-endian, default to -mstrict-align on older processors.
3972 Testing for htm matches power8 and later. */
3973 if (!BYTES_BIG_ENDIAN
3974 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3975 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3976
3977 if (!rs6000_fold_gimple)
3978 fprintf (stderr,
3979 "gimple folding of rs6000 builtins has been disabled.\n");
3980
3981 /* Add some warnings for VSX. */
3982 if (TARGET_VSX)
3983 {
3984 const char *msg = NULL;
3985 if (!TARGET_HARD_FLOAT)
3986 {
3987 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3988 msg = N_("-mvsx requires hardware floating point");
3989 else
3990 {
3991 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3992 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3993 }
3994 }
3995 else if (TARGET_AVOID_XFORM > 0)
3996 msg = N_("-mvsx needs indexed addressing");
3997 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3998 & OPTION_MASK_ALTIVEC))
3999 {
4000 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4001 msg = N_("-mvsx and -mno-altivec are incompatible");
4002 else
4003 msg = N_("-mno-altivec disables vsx");
4004 }
4005
4006 if (msg)
4007 {
4008 warning (0, msg);
4009 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4010 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4011 }
4012 }
4013
4014 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4015 the -mcpu setting to enable options that conflict. */
4016 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4017 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4018 | OPTION_MASK_ALTIVEC
4019 | OPTION_MASK_VSX)) != 0)
4020 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4021 | OPTION_MASK_DIRECT_MOVE)
4022 & ~rs6000_isa_flags_explicit);
4023
4024 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4025 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4026
4027 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4028 off all of the options that depend on those flags. */
4029 ignore_masks = rs6000_disable_incompatible_switches ();
4030
4031 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4032 unless the user explicitly used the -mno-<option> to disable the code. */
4033 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4034 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4035 else if (TARGET_P9_MINMAX)
4036 {
4037 if (cpu_index >= 0)
4038 {
4039 if (cpu_index == PROCESSOR_POWER9)
4040 {
4041 /* legacy behavior: allow -mcpu=power9 with certain
4042 capabilities explicitly disabled. */
4043 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4044 }
4045 else
4046 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4047 "for <xxx> less than power9", "-mcpu");
4048 }
4049 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4050 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4051 & rs6000_isa_flags_explicit))
4052 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4053 were explicitly cleared. */
4054 error ("%qs incompatible with explicitly disabled options",
4055 "-mpower9-minmax");
4056 else
4057 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4058 }
4059 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4060 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4061 else if (TARGET_VSX)
4062 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4063 else if (TARGET_POPCNTD)
4064 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4065 else if (TARGET_DFP)
4066 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4067 else if (TARGET_CMPB)
4068 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4069 else if (TARGET_FPRND)
4070 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4071 else if (TARGET_POPCNTB)
4072 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4073 else if (TARGET_ALTIVEC)
4074 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4075
4076 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4077 {
4078 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4079 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4080 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4081 }
4082
4083 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4084 {
4085 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4086 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4087 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4088 }
4089
4090 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4091 {
4092 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4093 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4094 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4095 }
4096
4097 if (TARGET_P8_VECTOR && !TARGET_VSX)
4098 {
4099 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4100 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4101 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4102 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4103 {
4104 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4105 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4106 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4107 }
4108 else
4109 {
4110 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4111 not explicit. */
4112 rs6000_isa_flags |= OPTION_MASK_VSX;
4113 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4114 }
4115 }
4116
4117 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4118 {
4119 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4120 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4121 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4122 }
4123
4124 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4125 silently turn off quad memory mode. */
4126 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4127 {
4128 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4129 warning (0, N_("-mquad-memory requires 64-bit mode"));
4130
4131 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4132 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4133
4134 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4135 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4136 }
4137
4138 /* Non-atomic quad memory load/store are disabled for little endian, since
4139 the words are reversed, but atomic operations can still be done by
4140 swapping the words. */
4141 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4142 {
4143 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4144 warning (0, N_("-mquad-memory is not available in little endian "
4145 "mode"));
4146
4147 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4148 }
4149
4150 /* Assume if the user asked for normal quad memory instructions, they want
4151 the atomic versions as well, unless they explicity told us not to use quad
4152 word atomic instructions. */
4153 if (TARGET_QUAD_MEMORY
4154 && !TARGET_QUAD_MEMORY_ATOMIC
4155 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4156 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4157
4158 /* If we can shrink-wrap the TOC register save separately, then use
4159 -msave-toc-indirect unless explicitly disabled. */
4160 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4161 && flag_shrink_wrap_separate
4162 && optimize_function_for_speed_p (cfun))
4163 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4164
4165 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4166 generating power8 instructions. */
4167 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4168 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4169 & OPTION_MASK_P8_FUSION);
4170
4171 /* Setting additional fusion flags turns on base fusion. */
4172 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4173 {
4174 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4175 {
4176 if (TARGET_P8_FUSION_SIGN)
4177 error ("%qs requires %qs", "-mpower8-fusion-sign",
4178 "-mpower8-fusion");
4179
4180 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4181 }
4182 else
4183 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4184 }
4185
4186 /* Power9 fusion is a superset over power8 fusion. */
4187 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4188 {
4189 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4190 {
4191 /* We prefer to not mention undocumented options in
4192 error messages. However, if users have managed to select
4193 power9-fusion without selecting power8-fusion, they
4194 already know about undocumented flags. */
4195 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4196 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4197 }
4198 else
4199 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4200 }
4201
4202 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4203 generating power9 instructions. */
4204 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4205 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4206 & OPTION_MASK_P9_FUSION);
4207
4208 /* Power8 does not fuse sign extended loads with the addis. If we are
4209 optimizing at high levels for speed, convert a sign extended load into a
4210 zero extending load, and an explicit sign extension. */
4211 if (TARGET_P8_FUSION
4212 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4213 && optimize_function_for_speed_p (cfun)
4214 && optimize >= 3)
4215 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4216
4217 /* ISA 3.0 vector instructions include ISA 2.07. */
4218 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4219 {
4220 /* We prefer to not mention undocumented options in
4221 error messages. However, if users have managed to select
4222 power9-vector without selecting power8-vector, they
4223 already know about undocumented flags. */
4224 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4225 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4226 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4227 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4228 {
4229 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4230 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4231 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4232 }
4233 else
4234 {
4235 /* OPTION_MASK_P9_VECTOR is explicit and
4236 OPTION_MASK_P8_VECTOR is not explicit. */
4237 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4238 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4239 }
4240 }
4241
4242 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4243 support. If we only have ISA 2.06 support, and the user did not specify
4244 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4245 but we don't enable the full vectorization support */
4246 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4247 TARGET_ALLOW_MOVMISALIGN = 1;
4248
4249 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4250 {
4251 if (TARGET_ALLOW_MOVMISALIGN > 0
4252 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4253 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4254
4255 TARGET_ALLOW_MOVMISALIGN = 0;
4256 }
4257
4258 /* Determine when unaligned vector accesses are permitted, and when
4259 they are preferred over masked Altivec loads. Note that if
4260 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4261 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4262 not true. */
4263 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4264 {
4265 if (!TARGET_VSX)
4266 {
4267 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4268 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4269
4270 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4271 }
4272
4273 else if (!TARGET_ALLOW_MOVMISALIGN)
4274 {
4275 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4276 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4277 "-mallow-movmisalign");
4278
4279 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4280 }
4281 }
4282
4283 /* Use long double size to select the appropriate long double. We use
4284 TYPE_PRECISION to differentiate the 3 different long double types. We map
4285 128 into the precision used for TFmode. */
4286 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4287 ? 64
4288 : FLOAT_PRECISION_TFmode);
4289
4290 /* Set long double size before the IEEE 128-bit tests. */
4291 if (!global_options_set.x_rs6000_long_double_type_size)
4292 {
4293 if (main_target_opt != NULL
4294 && (main_target_opt->x_rs6000_long_double_type_size
4295 != default_long_double_size))
4296 error ("target attribute or pragma changes long double size");
4297 else
4298 rs6000_long_double_type_size = default_long_double_size;
4299 }
4300 else if (rs6000_long_double_type_size == 128)
4301 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4302
4303 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4304 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4305 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4306 those systems will not pick up this default. Warn if the user changes the
4307 default unless -Wno-psabi. */
4308 if (!global_options_set.x_rs6000_ieeequad)
4309 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4310
4311 else if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4312 {
4313 static bool warned_change_long_double;
4314 if (!warned_change_long_double)
4315 {
4316 warned_change_long_double = true;
4317 if (TARGET_IEEEQUAD)
4318 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4319 else
4320 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4321 }
4322 }
4323
4324 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4325 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4326 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4327 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4328 the keyword as well as the type. */
4329 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4330
4331 /* IEEE 128-bit floating point requires VSX support. */
4332 if (TARGET_FLOAT128_KEYWORD)
4333 {
4334 if (!TARGET_VSX)
4335 {
4336 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4337 error ("%qs requires VSX support", "-mfloat128");
4338
4339 TARGET_FLOAT128_TYPE = 0;
4340 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4341 | OPTION_MASK_FLOAT128_HW);
4342 }
4343 else if (!TARGET_FLOAT128_TYPE)
4344 {
4345 TARGET_FLOAT128_TYPE = 1;
4346 warning (0, "The -mfloat128 option may not be fully supported");
4347 }
4348 }
4349
4350 /* Enable the __float128 keyword under Linux by default. */
4351 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4352 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4353 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4354
4355 /* If we have are supporting the float128 type and full ISA 3.0 support,
4356 enable -mfloat128-hardware by default. However, don't enable the
4357 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4358 because sometimes the compiler wants to put things in an integer
4359 container, and if we don't have __int128 support, it is impossible. */
4360 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4361 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4362 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4363 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4364
4365 if (TARGET_FLOAT128_HW
4366 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4367 {
4368 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4369 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4370
4371 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4372 }
4373
4374 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4375 {
4376 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4377 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4378
4379 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4380 }
4381
4382 /* Print the options after updating the defaults. */
4383 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4384 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4385
4386 /* E500mc does "better" if we inline more aggressively. Respect the
4387 user's opinion, though. */
4388 if (rs6000_block_move_inline_limit == 0
4389 && (rs6000_tune == PROCESSOR_PPCE500MC
4390 || rs6000_tune == PROCESSOR_PPCE500MC64
4391 || rs6000_tune == PROCESSOR_PPCE5500
4392 || rs6000_tune == PROCESSOR_PPCE6500))
4393 rs6000_block_move_inline_limit = 128;
4394
4395 /* store_one_arg depends on expand_block_move to handle at least the
4396 size of reg_parm_stack_space. */
4397 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4398 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4399
4400 if (global_init_p)
4401 {
4402 /* If the appropriate debug option is enabled, replace the target hooks
4403 with debug versions that call the real version and then prints
4404 debugging information. */
4405 if (TARGET_DEBUG_COST)
4406 {
4407 targetm.rtx_costs = rs6000_debug_rtx_costs;
4408 targetm.address_cost = rs6000_debug_address_cost;
4409 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4410 }
4411
4412 if (TARGET_DEBUG_ADDR)
4413 {
4414 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4415 targetm.legitimize_address = rs6000_debug_legitimize_address;
4416 rs6000_secondary_reload_class_ptr
4417 = rs6000_debug_secondary_reload_class;
4418 targetm.secondary_memory_needed
4419 = rs6000_debug_secondary_memory_needed;
4420 targetm.can_change_mode_class
4421 = rs6000_debug_can_change_mode_class;
4422 rs6000_preferred_reload_class_ptr
4423 = rs6000_debug_preferred_reload_class;
4424 rs6000_legitimize_reload_address_ptr
4425 = rs6000_debug_legitimize_reload_address;
4426 rs6000_mode_dependent_address_ptr
4427 = rs6000_debug_mode_dependent_address;
4428 }
4429
4430 if (rs6000_veclibabi_name)
4431 {
4432 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4433 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4434 else
4435 {
4436 error ("unknown vectorization library ABI type (%qs) for "
4437 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4438 ret = false;
4439 }
4440 }
4441 }
4442
4443 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4444 target attribute or pragma which automatically enables both options,
4445 unless the altivec ABI was set. This is set by default for 64-bit, but
4446 not for 32-bit. */
4447 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4448 {
4449 TARGET_FLOAT128_TYPE = 0;
4450 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4451 | OPTION_MASK_FLOAT128_KEYWORD)
4452 & ~rs6000_isa_flags_explicit);
4453 }
4454
4455 /* Enable Altivec ABI for AIX -maltivec. */
4456 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4457 {
4458 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4459 error ("target attribute or pragma changes AltiVec ABI");
4460 else
4461 rs6000_altivec_abi = 1;
4462 }
4463
4464 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4465 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4466 be explicitly overridden in either case. */
4467 if (TARGET_ELF)
4468 {
4469 if (!global_options_set.x_rs6000_altivec_abi
4470 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4471 {
4472 if (main_target_opt != NULL &&
4473 !main_target_opt->x_rs6000_altivec_abi)
4474 error ("target attribute or pragma changes AltiVec ABI");
4475 else
4476 rs6000_altivec_abi = 1;
4477 }
4478 }
4479
4480 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4481 So far, the only darwin64 targets are also MACH-O. */
4482 if (TARGET_MACHO
4483 && DEFAULT_ABI == ABI_DARWIN
4484 && TARGET_64BIT)
4485 {
4486 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4487 error ("target attribute or pragma changes darwin64 ABI");
4488 else
4489 {
4490 rs6000_darwin64_abi = 1;
4491 /* Default to natural alignment, for better performance. */
4492 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4493 }
4494 }
4495
4496 /* Place FP constants in the constant pool instead of TOC
4497 if section anchors enabled. */
4498 if (flag_section_anchors
4499 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4500 TARGET_NO_FP_IN_TOC = 1;
4501
4502 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4503 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4504
4505 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4506 SUBTARGET_OVERRIDE_OPTIONS;
4507 #endif
4508 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4509 SUBSUBTARGET_OVERRIDE_OPTIONS;
4510 #endif
4511 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4512 SUB3TARGET_OVERRIDE_OPTIONS;
4513 #endif
4514
4515 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4516 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4517
4518 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4519 && rs6000_tune != PROCESSOR_POWER5
4520 && rs6000_tune != PROCESSOR_POWER6
4521 && rs6000_tune != PROCESSOR_POWER7
4522 && rs6000_tune != PROCESSOR_POWER8
4523 && rs6000_tune != PROCESSOR_POWER9
4524 && rs6000_tune != PROCESSOR_PPCA2
4525 && rs6000_tune != PROCESSOR_CELL
4526 && rs6000_tune != PROCESSOR_PPC476);
4527 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4528 || rs6000_tune == PROCESSOR_POWER5
4529 || rs6000_tune == PROCESSOR_POWER7
4530 || rs6000_tune == PROCESSOR_POWER8);
4531 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4532 || rs6000_tune == PROCESSOR_POWER5
4533 || rs6000_tune == PROCESSOR_POWER6
4534 || rs6000_tune == PROCESSOR_POWER7
4535 || rs6000_tune == PROCESSOR_POWER8
4536 || rs6000_tune == PROCESSOR_POWER9
4537 || rs6000_tune == PROCESSOR_PPCE500MC
4538 || rs6000_tune == PROCESSOR_PPCE500MC64
4539 || rs6000_tune == PROCESSOR_PPCE5500
4540 || rs6000_tune == PROCESSOR_PPCE6500);
4541
4542 /* Allow debug switches to override the above settings. These are set to -1
4543 in rs6000.opt to indicate the user hasn't directly set the switch. */
4544 if (TARGET_ALWAYS_HINT >= 0)
4545 rs6000_always_hint = TARGET_ALWAYS_HINT;
4546
4547 if (TARGET_SCHED_GROUPS >= 0)
4548 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4549
4550 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4551 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4552
4553 rs6000_sched_restricted_insns_priority
4554 = (rs6000_sched_groups ? 1 : 0);
4555
4556 /* Handle -msched-costly-dep option. */
4557 rs6000_sched_costly_dep
4558 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4559
4560 if (rs6000_sched_costly_dep_str)
4561 {
4562 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4563 rs6000_sched_costly_dep = no_dep_costly;
4564 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4565 rs6000_sched_costly_dep = all_deps_costly;
4566 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4567 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4568 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4569 rs6000_sched_costly_dep = store_to_load_dep_costly;
4570 else
4571 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4572 atoi (rs6000_sched_costly_dep_str));
4573 }
4574
4575 /* Handle -minsert-sched-nops option. */
4576 rs6000_sched_insert_nops
4577 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4578
4579 if (rs6000_sched_insert_nops_str)
4580 {
4581 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4582 rs6000_sched_insert_nops = sched_finish_none;
4583 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4584 rs6000_sched_insert_nops = sched_finish_pad_groups;
4585 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4586 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4587 else
4588 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4589 atoi (rs6000_sched_insert_nops_str));
4590 }
4591
4592 /* Handle stack protector */
4593 if (!global_options_set.x_rs6000_stack_protector_guard)
4594 #ifdef TARGET_THREAD_SSP_OFFSET
4595 rs6000_stack_protector_guard = SSP_TLS;
4596 #else
4597 rs6000_stack_protector_guard = SSP_GLOBAL;
4598 #endif
4599
4600 #ifdef TARGET_THREAD_SSP_OFFSET
4601 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4602 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4603 #endif
4604
4605 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4606 {
4607 char *endp;
4608 const char *str = rs6000_stack_protector_guard_offset_str;
4609
4610 errno = 0;
4611 long offset = strtol (str, &endp, 0);
4612 if (!*str || *endp || errno)
4613 error ("%qs is not a valid number in %qs", str,
4614 "-mstack-protector-guard-offset=");
4615
4616 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4617 || (TARGET_64BIT && (offset & 3)))
4618 error ("%qs is not a valid offset in %qs", str,
4619 "-mstack-protector-guard-offset=");
4620
4621 rs6000_stack_protector_guard_offset = offset;
4622 }
4623
4624 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4625 {
4626 const char *str = rs6000_stack_protector_guard_reg_str;
4627 int reg = decode_reg_name (str);
4628
4629 if (!IN_RANGE (reg, 1, 31))
4630 error ("%qs is not a valid base register in %qs", str,
4631 "-mstack-protector-guard-reg=");
4632
4633 rs6000_stack_protector_guard_reg = reg;
4634 }
4635
4636 if (rs6000_stack_protector_guard == SSP_TLS
4637 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4638 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4639
4640 if (global_init_p)
4641 {
4642 #ifdef TARGET_REGNAMES
4643 /* If the user desires alternate register names, copy in the
4644 alternate names now. */
4645 if (TARGET_REGNAMES)
4646 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4647 #endif
4648
4649 /* Set aix_struct_return last, after the ABI is determined.
4650 If -maix-struct-return or -msvr4-struct-return was explicitly
4651 used, don't override with the ABI default. */
4652 if (!global_options_set.x_aix_struct_return)
4653 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4654
4655 #if 0
4656 /* IBM XL compiler defaults to unsigned bitfields. */
4657 if (TARGET_XL_COMPAT)
4658 flag_signed_bitfields = 0;
4659 #endif
4660
4661 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4662 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4663
4664 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4665
4666 /* We can only guarantee the availability of DI pseudo-ops when
4667 assembling for 64-bit targets. */
4668 if (!TARGET_64BIT)
4669 {
4670 targetm.asm_out.aligned_op.di = NULL;
4671 targetm.asm_out.unaligned_op.di = NULL;
4672 }
4673
4674
4675 /* Set branch target alignment, if not optimizing for size. */
4676 if (!optimize_size)
4677 {
4678 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4679 aligned 8byte to avoid misprediction by the branch predictor. */
4680 if (rs6000_tune == PROCESSOR_TITAN
4681 || rs6000_tune == PROCESSOR_CELL)
4682 {
4683 if (flag_align_functions && !str_align_functions)
4684 str_align_functions = "8";
4685 if (flag_align_jumps && !str_align_jumps)
4686 str_align_jumps = "8";
4687 if (flag_align_loops && !str_align_loops)
4688 str_align_loops = "8";
4689 }
4690 if (rs6000_align_branch_targets)
4691 {
4692 if (flag_align_functions && !str_align_functions)
4693 str_align_functions = "16";
4694 if (flag_align_jumps && !str_align_jumps)
4695 str_align_jumps = "16";
4696 if (flag_align_loops && !str_align_loops)
4697 {
4698 can_override_loop_align = 1;
4699 str_align_loops = "16";
4700 }
4701 }
4702
4703 if (flag_align_jumps && !str_align_jumps)
4704 str_align_jumps = "16";
4705 if (flag_align_loops && !str_align_loops)
4706 str_align_loops = "16";
4707 }
4708
4709 /* Arrange to save and restore machine status around nested functions. */
4710 init_machine_status = rs6000_init_machine_status;
4711
4712 /* We should always be splitting complex arguments, but we can't break
4713 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4714 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4715 targetm.calls.split_complex_arg = NULL;
4716
4717 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4718 if (DEFAULT_ABI == ABI_AIX)
4719 targetm.calls.custom_function_descriptors = 0;
4720 }
4721
4722 /* Initialize rs6000_cost with the appropriate target costs. */
4723 if (optimize_size)
4724 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4725 else
4726 switch (rs6000_tune)
4727 {
4728 case PROCESSOR_RS64A:
4729 rs6000_cost = &rs64a_cost;
4730 break;
4731
4732 case PROCESSOR_MPCCORE:
4733 rs6000_cost = &mpccore_cost;
4734 break;
4735
4736 case PROCESSOR_PPC403:
4737 rs6000_cost = &ppc403_cost;
4738 break;
4739
4740 case PROCESSOR_PPC405:
4741 rs6000_cost = &ppc405_cost;
4742 break;
4743
4744 case PROCESSOR_PPC440:
4745 rs6000_cost = &ppc440_cost;
4746 break;
4747
4748 case PROCESSOR_PPC476:
4749 rs6000_cost = &ppc476_cost;
4750 break;
4751
4752 case PROCESSOR_PPC601:
4753 rs6000_cost = &ppc601_cost;
4754 break;
4755
4756 case PROCESSOR_PPC603:
4757 rs6000_cost = &ppc603_cost;
4758 break;
4759
4760 case PROCESSOR_PPC604:
4761 rs6000_cost = &ppc604_cost;
4762 break;
4763
4764 case PROCESSOR_PPC604e:
4765 rs6000_cost = &ppc604e_cost;
4766 break;
4767
4768 case PROCESSOR_PPC620:
4769 rs6000_cost = &ppc620_cost;
4770 break;
4771
4772 case PROCESSOR_PPC630:
4773 rs6000_cost = &ppc630_cost;
4774 break;
4775
4776 case PROCESSOR_CELL:
4777 rs6000_cost = &ppccell_cost;
4778 break;
4779
4780 case PROCESSOR_PPC750:
4781 case PROCESSOR_PPC7400:
4782 rs6000_cost = &ppc750_cost;
4783 break;
4784
4785 case PROCESSOR_PPC7450:
4786 rs6000_cost = &ppc7450_cost;
4787 break;
4788
4789 case PROCESSOR_PPC8540:
4790 case PROCESSOR_PPC8548:
4791 rs6000_cost = &ppc8540_cost;
4792 break;
4793
4794 case PROCESSOR_PPCE300C2:
4795 case PROCESSOR_PPCE300C3:
4796 rs6000_cost = &ppce300c2c3_cost;
4797 break;
4798
4799 case PROCESSOR_PPCE500MC:
4800 rs6000_cost = &ppce500mc_cost;
4801 break;
4802
4803 case PROCESSOR_PPCE500MC64:
4804 rs6000_cost = &ppce500mc64_cost;
4805 break;
4806
4807 case PROCESSOR_PPCE5500:
4808 rs6000_cost = &ppce5500_cost;
4809 break;
4810
4811 case PROCESSOR_PPCE6500:
4812 rs6000_cost = &ppce6500_cost;
4813 break;
4814
4815 case PROCESSOR_TITAN:
4816 rs6000_cost = &titan_cost;
4817 break;
4818
4819 case PROCESSOR_POWER4:
4820 case PROCESSOR_POWER5:
4821 rs6000_cost = &power4_cost;
4822 break;
4823
4824 case PROCESSOR_POWER6:
4825 rs6000_cost = &power6_cost;
4826 break;
4827
4828 case PROCESSOR_POWER7:
4829 rs6000_cost = &power7_cost;
4830 break;
4831
4832 case PROCESSOR_POWER8:
4833 rs6000_cost = &power8_cost;
4834 break;
4835
4836 case PROCESSOR_POWER9:
4837 rs6000_cost = &power9_cost;
4838 break;
4839
4840 case PROCESSOR_PPCA2:
4841 rs6000_cost = &ppca2_cost;
4842 break;
4843
4844 default:
4845 gcc_unreachable ();
4846 }
4847
4848 if (global_init_p)
4849 {
4850 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4851 rs6000_cost->simultaneous_prefetches,
4852 global_options.x_param_values,
4853 global_options_set.x_param_values);
4854 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4855 global_options.x_param_values,
4856 global_options_set.x_param_values);
4857 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4858 rs6000_cost->cache_line_size,
4859 global_options.x_param_values,
4860 global_options_set.x_param_values);
4861 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4862 global_options.x_param_values,
4863 global_options_set.x_param_values);
4864
4865 /* Increase loop peeling limits based on performance analysis. */
4866 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4867 global_options.x_param_values,
4868 global_options_set.x_param_values);
4869 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4870 global_options.x_param_values,
4871 global_options_set.x_param_values);
4872
4873 /* Use the 'model' -fsched-pressure algorithm by default. */
4874 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4875 SCHED_PRESSURE_MODEL,
4876 global_options.x_param_values,
4877 global_options_set.x_param_values);
4878
4879 /* If using typedef char *va_list, signal that
4880 __builtin_va_start (&ap, 0) can be optimized to
4881 ap = __builtin_next_arg (0). */
4882 if (DEFAULT_ABI != ABI_V4)
4883 targetm.expand_builtin_va_start = NULL;
4884 }
4885
4886 /* If not explicitly specified via option, decide whether to generate indexed
4887 load/store instructions. A value of -1 indicates that the
4888 initial value of this variable has not been overwritten. During
4889 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4890 if (TARGET_AVOID_XFORM == -1)
4891 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4892 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4893 need indexed accesses and the type used is the scalar type of the element
4894 being loaded or stored. */
4895 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4896 && !TARGET_ALTIVEC);
4897
4898 /* Set the -mrecip options. */
4899 if (rs6000_recip_name)
4900 {
4901 char *p = ASTRDUP (rs6000_recip_name);
4902 char *q;
4903 unsigned int mask, i;
4904 bool invert;
4905
4906 while ((q = strtok (p, ",")) != NULL)
4907 {
4908 p = NULL;
4909 if (*q == '!')
4910 {
4911 invert = true;
4912 q++;
4913 }
4914 else
4915 invert = false;
4916
4917 if (!strcmp (q, "default"))
4918 mask = ((TARGET_RECIP_PRECISION)
4919 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4920 else
4921 {
4922 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4923 if (!strcmp (q, recip_options[i].string))
4924 {
4925 mask = recip_options[i].mask;
4926 break;
4927 }
4928
4929 if (i == ARRAY_SIZE (recip_options))
4930 {
4931 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4932 invert = false;
4933 mask = 0;
4934 ret = false;
4935 }
4936 }
4937
4938 if (invert)
4939 rs6000_recip_control &= ~mask;
4940 else
4941 rs6000_recip_control |= mask;
4942 }
4943 }
4944
4945 /* Set the builtin mask of the various options used that could affect which
4946 builtins were used. In the past we used target_flags, but we've run out
4947 of bits, and some options are no longer in target_flags. */
4948 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4949 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4950 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4951 rs6000_builtin_mask);
4952
4953 /* Initialize all of the registers. */
4954 rs6000_init_hard_regno_mode_ok (global_init_p);
4955
4956 /* Save the initial options in case the user does function specific options */
4957 if (global_init_p)
4958 target_option_default_node = target_option_current_node
4959 = build_target_option_node (&global_options);
4960
4961 /* If not explicitly specified via option, decide whether to generate the
4962 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4963 if (TARGET_LINK_STACK == -1)
4964 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4965
4966 /* Deprecate use of -mno-speculate-indirect-jumps. */
4967 if (!rs6000_speculate_indirect_jumps)
4968 warning (0, "%qs is deprecated and not recommended in any circumstances",
4969 "-mno-speculate-indirect-jumps");
4970
4971 return ret;
4972 }
4973
4974 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4975 define the target cpu type. */
4976
4977 static void
4978 rs6000_option_override (void)
4979 {
4980 (void) rs6000_option_override_internal (true);
4981 }
4982
4983 \f
4984 /* Implement targetm.vectorize.builtin_mask_for_load. */
4985 static tree
4986 rs6000_builtin_mask_for_load (void)
4987 {
4988 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4989 if ((TARGET_ALTIVEC && !TARGET_VSX)
4990 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4991 return altivec_builtin_mask_for_load;
4992 else
4993 return 0;
4994 }
4995
4996 /* Implement LOOP_ALIGN. */
4997 align_flags
4998 rs6000_loop_align (rtx label)
4999 {
5000 basic_block bb;
5001 int ninsns;
5002
5003 /* Don't override loop alignment if -falign-loops was specified. */
5004 if (!can_override_loop_align)
5005 return align_loops;
5006
5007 bb = BLOCK_FOR_INSN (label);
5008 ninsns = num_loop_insns(bb->loop_father);
5009
5010 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5011 if (ninsns > 4 && ninsns <= 8
5012 && (rs6000_tune == PROCESSOR_POWER4
5013 || rs6000_tune == PROCESSOR_POWER5
5014 || rs6000_tune == PROCESSOR_POWER6
5015 || rs6000_tune == PROCESSOR_POWER7
5016 || rs6000_tune == PROCESSOR_POWER8))
5017 return align_flags (5);
5018 else
5019 return align_loops;
5020 }
5021
5022 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5023 after applying N number of iterations. This routine does not determine
5024 how may iterations are required to reach desired alignment. */
5025
5026 static bool
5027 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5028 {
5029 if (is_packed)
5030 return false;
5031
5032 if (TARGET_32BIT)
5033 {
5034 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5035 return true;
5036
5037 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5038 return true;
5039
5040 return false;
5041 }
5042 else
5043 {
5044 if (TARGET_MACHO)
5045 return false;
5046
5047 /* Assuming that all other types are naturally aligned. CHECKME! */
5048 return true;
5049 }
5050 }
5051
5052 /* Return true if the vector misalignment factor is supported by the
5053 target. */
5054 static bool
5055 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5056 const_tree type,
5057 int misalignment,
5058 bool is_packed)
5059 {
5060 if (TARGET_VSX)
5061 {
5062 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5063 return true;
5064
5065 /* Return if movmisalign pattern is not supported for this mode. */
5066 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5067 return false;
5068
5069 if (misalignment == -1)
5070 {
5071 /* Misalignment factor is unknown at compile time but we know
5072 it's word aligned. */
5073 if (rs6000_vector_alignment_reachable (type, is_packed))
5074 {
5075 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5076
5077 if (element_size == 64 || element_size == 32)
5078 return true;
5079 }
5080
5081 return false;
5082 }
5083
5084 /* VSX supports word-aligned vector. */
5085 if (misalignment % 4 == 0)
5086 return true;
5087 }
5088 return false;
5089 }
5090
5091 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5092 static int
5093 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5094 tree vectype, int misalign)
5095 {
5096 unsigned elements;
5097 tree elem_type;
5098
5099 switch (type_of_cost)
5100 {
5101 case scalar_stmt:
5102 case scalar_load:
5103 case scalar_store:
5104 case vector_stmt:
5105 case vector_load:
5106 case vector_store:
5107 case vec_to_scalar:
5108 case scalar_to_vec:
5109 case cond_branch_not_taken:
5110 return 1;
5111
5112 case vec_perm:
5113 if (TARGET_VSX)
5114 return 3;
5115 else
5116 return 1;
5117
5118 case vec_promote_demote:
5119 if (TARGET_VSX)
5120 return 4;
5121 else
5122 return 1;
5123
5124 case cond_branch_taken:
5125 return 3;
5126
5127 case unaligned_load:
5128 case vector_gather_load:
5129 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5130 return 1;
5131
5132 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5133 {
5134 elements = TYPE_VECTOR_SUBPARTS (vectype);
5135 if (elements == 2)
5136 /* Double word aligned. */
5137 return 2;
5138
5139 if (elements == 4)
5140 {
5141 switch (misalign)
5142 {
5143 case 8:
5144 /* Double word aligned. */
5145 return 2;
5146
5147 case -1:
5148 /* Unknown misalignment. */
5149 case 4:
5150 case 12:
5151 /* Word aligned. */
5152 return 22;
5153
5154 default:
5155 gcc_unreachable ();
5156 }
5157 }
5158 }
5159
5160 if (TARGET_ALTIVEC)
5161 /* Misaligned loads are not supported. */
5162 gcc_unreachable ();
5163
5164 return 2;
5165
5166 case unaligned_store:
5167 case vector_scatter_store:
5168 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5169 return 1;
5170
5171 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5172 {
5173 elements = TYPE_VECTOR_SUBPARTS (vectype);
5174 if (elements == 2)
5175 /* Double word aligned. */
5176 return 2;
5177
5178 if (elements == 4)
5179 {
5180 switch (misalign)
5181 {
5182 case 8:
5183 /* Double word aligned. */
5184 return 2;
5185
5186 case -1:
5187 /* Unknown misalignment. */
5188 case 4:
5189 case 12:
5190 /* Word aligned. */
5191 return 23;
5192
5193 default:
5194 gcc_unreachable ();
5195 }
5196 }
5197 }
5198
5199 if (TARGET_ALTIVEC)
5200 /* Misaligned stores are not supported. */
5201 gcc_unreachable ();
5202
5203 return 2;
5204
5205 case vec_construct:
5206 /* This is a rough approximation assuming non-constant elements
5207 constructed into a vector via element insertion. FIXME:
5208 vec_construct is not granular enough for uniformly good
5209 decisions. If the initialization is a splat, this is
5210 cheaper than we estimate. Improve this someday. */
5211 elem_type = TREE_TYPE (vectype);
5212 /* 32-bit vectors loaded into registers are stored as double
5213 precision, so we need 2 permutes, 2 converts, and 1 merge
5214 to construct a vector of short floats from them. */
5215 if (SCALAR_FLOAT_TYPE_P (elem_type)
5216 && TYPE_PRECISION (elem_type) == 32)
5217 return 5;
5218 /* On POWER9, integer vector types are built up in GPRs and then
5219 use a direct move (2 cycles). For POWER8 this is even worse,
5220 as we need two direct moves and a merge, and the direct moves
5221 are five cycles. */
5222 else if (INTEGRAL_TYPE_P (elem_type))
5223 {
5224 if (TARGET_P9_VECTOR)
5225 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5226 else
5227 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5228 }
5229 else
5230 /* V2DFmode doesn't need a direct move. */
5231 return 2;
5232
5233 default:
5234 gcc_unreachable ();
5235 }
5236 }
5237
5238 /* Implement targetm.vectorize.preferred_simd_mode. */
5239
5240 static machine_mode
5241 rs6000_preferred_simd_mode (scalar_mode mode)
5242 {
5243 if (TARGET_VSX)
5244 switch (mode)
5245 {
5246 case E_DFmode:
5247 return V2DFmode;
5248 default:;
5249 }
5250 if (TARGET_ALTIVEC || TARGET_VSX)
5251 switch (mode)
5252 {
5253 case E_SFmode:
5254 return V4SFmode;
5255 case E_TImode:
5256 return V1TImode;
5257 case E_DImode:
5258 return V2DImode;
5259 case E_SImode:
5260 return V4SImode;
5261 case E_HImode:
5262 return V8HImode;
5263 case E_QImode:
5264 return V16QImode;
5265 default:;
5266 }
5267 return word_mode;
5268 }
5269
5270 typedef struct _rs6000_cost_data
5271 {
5272 struct loop *loop_info;
5273 unsigned cost[3];
5274 } rs6000_cost_data;
5275
5276 /* Test for likely overcommitment of vector hardware resources. If a
5277 loop iteration is relatively large, and too large a percentage of
5278 instructions in the loop are vectorized, the cost model may not
5279 adequately reflect delays from unavailable vector resources.
5280 Penalize the loop body cost for this case. */
5281
5282 static void
5283 rs6000_density_test (rs6000_cost_data *data)
5284 {
5285 const int DENSITY_PCT_THRESHOLD = 85;
5286 const int DENSITY_SIZE_THRESHOLD = 70;
5287 const int DENSITY_PENALTY = 10;
5288 struct loop *loop = data->loop_info;
5289 basic_block *bbs = get_loop_body (loop);
5290 int nbbs = loop->num_nodes;
5291 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5292 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5293 int i, density_pct;
5294
5295 for (i = 0; i < nbbs; i++)
5296 {
5297 basic_block bb = bbs[i];
5298 gimple_stmt_iterator gsi;
5299
5300 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5301 {
5302 gimple *stmt = gsi_stmt (gsi);
5303 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5304
5305 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5306 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5307 not_vec_cost++;
5308 }
5309 }
5310
5311 free (bbs);
5312 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5313
5314 if (density_pct > DENSITY_PCT_THRESHOLD
5315 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5316 {
5317 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5318 if (dump_enabled_p ())
5319 dump_printf_loc (MSG_NOTE, vect_location,
5320 "density %d%%, cost %d exceeds threshold, penalizing "
5321 "loop body cost by %d%%", density_pct,
5322 vec_cost + not_vec_cost, DENSITY_PENALTY);
5323 }
5324 }
5325
5326 /* Implement targetm.vectorize.init_cost. */
5327
5328 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5329 instruction is needed by the vectorization. */
5330 static bool rs6000_vect_nonmem;
5331
5332 static void *
5333 rs6000_init_cost (struct loop *loop_info)
5334 {
5335 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5336 data->loop_info = loop_info;
5337 data->cost[vect_prologue] = 0;
5338 data->cost[vect_body] = 0;
5339 data->cost[vect_epilogue] = 0;
5340 rs6000_vect_nonmem = false;
5341 return data;
5342 }
5343
5344 /* Implement targetm.vectorize.add_stmt_cost. */
5345
5346 static unsigned
5347 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5348 struct _stmt_vec_info *stmt_info, int misalign,
5349 enum vect_cost_model_location where)
5350 {
5351 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5352 unsigned retval = 0;
5353
5354 if (flag_vect_cost_model)
5355 {
5356 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5357 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5358 misalign);
5359 /* Statements in an inner loop relative to the loop being
5360 vectorized are weighted more heavily. The value here is
5361 arbitrary and could potentially be improved with analysis. */
5362 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5363 count *= 50; /* FIXME. */
5364
5365 retval = (unsigned) (count * stmt_cost);
5366 cost_data->cost[where] += retval;
5367
5368 /* Check whether we're doing something other than just a copy loop.
5369 Not all such loops may be profitably vectorized; see
5370 rs6000_finish_cost. */
5371 if ((kind == vec_to_scalar || kind == vec_perm
5372 || kind == vec_promote_demote || kind == vec_construct
5373 || kind == scalar_to_vec)
5374 || (where == vect_body && kind == vector_stmt))
5375 rs6000_vect_nonmem = true;
5376 }
5377
5378 return retval;
5379 }
5380
5381 /* Implement targetm.vectorize.finish_cost. */
5382
5383 static void
5384 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5385 unsigned *body_cost, unsigned *epilogue_cost)
5386 {
5387 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5388
5389 if (cost_data->loop_info)
5390 rs6000_density_test (cost_data);
5391
5392 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5393 that require versioning for any reason. The vectorization is at
5394 best a wash inside the loop, and the versioning checks make
5395 profitability highly unlikely and potentially quite harmful. */
5396 if (cost_data->loop_info)
5397 {
5398 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5399 if (!rs6000_vect_nonmem
5400 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5401 && LOOP_REQUIRES_VERSIONING (vec_info))
5402 cost_data->cost[vect_body] += 10000;
5403 }
5404
5405 *prologue_cost = cost_data->cost[vect_prologue];
5406 *body_cost = cost_data->cost[vect_body];
5407 *epilogue_cost = cost_data->cost[vect_epilogue];
5408 }
5409
5410 /* Implement targetm.vectorize.destroy_cost_data. */
5411
5412 static void
5413 rs6000_destroy_cost_data (void *data)
5414 {
5415 free (data);
5416 }
5417
5418 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5419 library with vectorized intrinsics. */
5420
5421 static tree
5422 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5423 tree type_in)
5424 {
5425 char name[32];
5426 const char *suffix = NULL;
5427 tree fntype, new_fndecl, bdecl = NULL_TREE;
5428 int n_args = 1;
5429 const char *bname;
5430 machine_mode el_mode, in_mode;
5431 int n, in_n;
5432
5433 /* Libmass is suitable for unsafe math only as it does not correctly support
5434 parts of IEEE with the required precision such as denormals. Only support
5435 it if we have VSX to use the simd d2 or f4 functions.
5436 XXX: Add variable length support. */
5437 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5438 return NULL_TREE;
5439
5440 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5441 n = TYPE_VECTOR_SUBPARTS (type_out);
5442 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5443 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5444 if (el_mode != in_mode
5445 || n != in_n)
5446 return NULL_TREE;
5447
5448 switch (fn)
5449 {
5450 CASE_CFN_ATAN2:
5451 CASE_CFN_HYPOT:
5452 CASE_CFN_POW:
5453 n_args = 2;
5454 gcc_fallthrough ();
5455
5456 CASE_CFN_ACOS:
5457 CASE_CFN_ACOSH:
5458 CASE_CFN_ASIN:
5459 CASE_CFN_ASINH:
5460 CASE_CFN_ATAN:
5461 CASE_CFN_ATANH:
5462 CASE_CFN_CBRT:
5463 CASE_CFN_COS:
5464 CASE_CFN_COSH:
5465 CASE_CFN_ERF:
5466 CASE_CFN_ERFC:
5467 CASE_CFN_EXP2:
5468 CASE_CFN_EXP:
5469 CASE_CFN_EXPM1:
5470 CASE_CFN_LGAMMA:
5471 CASE_CFN_LOG10:
5472 CASE_CFN_LOG1P:
5473 CASE_CFN_LOG2:
5474 CASE_CFN_LOG:
5475 CASE_CFN_SIN:
5476 CASE_CFN_SINH:
5477 CASE_CFN_SQRT:
5478 CASE_CFN_TAN:
5479 CASE_CFN_TANH:
5480 if (el_mode == DFmode && n == 2)
5481 {
5482 bdecl = mathfn_built_in (double_type_node, fn);
5483 suffix = "d2"; /* pow -> powd2 */
5484 }
5485 else if (el_mode == SFmode && n == 4)
5486 {
5487 bdecl = mathfn_built_in (float_type_node, fn);
5488 suffix = "4"; /* powf -> powf4 */
5489 }
5490 else
5491 return NULL_TREE;
5492 if (!bdecl)
5493 return NULL_TREE;
5494 break;
5495
5496 default:
5497 return NULL_TREE;
5498 }
5499
5500 gcc_assert (suffix != NULL);
5501 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5502 if (!bname)
5503 return NULL_TREE;
5504
5505 strcpy (name, bname + sizeof ("__builtin_") - 1);
5506 strcat (name, suffix);
5507
5508 if (n_args == 1)
5509 fntype = build_function_type_list (type_out, type_in, NULL);
5510 else if (n_args == 2)
5511 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5512 else
5513 gcc_unreachable ();
5514
5515 /* Build a function declaration for the vectorized function. */
5516 new_fndecl = build_decl (BUILTINS_LOCATION,
5517 FUNCTION_DECL, get_identifier (name), fntype);
5518 TREE_PUBLIC (new_fndecl) = 1;
5519 DECL_EXTERNAL (new_fndecl) = 1;
5520 DECL_IS_NOVOPS (new_fndecl) = 1;
5521 TREE_READONLY (new_fndecl) = 1;
5522
5523 return new_fndecl;
5524 }
5525
5526 /* Returns a function decl for a vectorized version of the builtin function
5527 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5528 if it is not available. */
5529
5530 static tree
5531 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5532 tree type_in)
5533 {
5534 machine_mode in_mode, out_mode;
5535 int in_n, out_n;
5536
5537 if (TARGET_DEBUG_BUILTIN)
5538 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5539 combined_fn_name (combined_fn (fn)),
5540 GET_MODE_NAME (TYPE_MODE (type_out)),
5541 GET_MODE_NAME (TYPE_MODE (type_in)));
5542
5543 if (TREE_CODE (type_out) != VECTOR_TYPE
5544 || TREE_CODE (type_in) != VECTOR_TYPE)
5545 return NULL_TREE;
5546
5547 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5548 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5549 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5550 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5551
5552 switch (fn)
5553 {
5554 CASE_CFN_COPYSIGN:
5555 if (VECTOR_UNIT_VSX_P (V2DFmode)
5556 && out_mode == DFmode && out_n == 2
5557 && in_mode == DFmode && in_n == 2)
5558 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5559 if (VECTOR_UNIT_VSX_P (V4SFmode)
5560 && out_mode == SFmode && out_n == 4
5561 && in_mode == SFmode && in_n == 4)
5562 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5563 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5564 && out_mode == SFmode && out_n == 4
5565 && in_mode == SFmode && in_n == 4)
5566 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5567 break;
5568 CASE_CFN_CEIL:
5569 if (VECTOR_UNIT_VSX_P (V2DFmode)
5570 && out_mode == DFmode && out_n == 2
5571 && in_mode == DFmode && in_n == 2)
5572 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5573 if (VECTOR_UNIT_VSX_P (V4SFmode)
5574 && out_mode == SFmode && out_n == 4
5575 && in_mode == SFmode && in_n == 4)
5576 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5577 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5578 && out_mode == SFmode && out_n == 4
5579 && in_mode == SFmode && in_n == 4)
5580 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5581 break;
5582 CASE_CFN_FLOOR:
5583 if (VECTOR_UNIT_VSX_P (V2DFmode)
5584 && out_mode == DFmode && out_n == 2
5585 && in_mode == DFmode && in_n == 2)
5586 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5587 if (VECTOR_UNIT_VSX_P (V4SFmode)
5588 && out_mode == SFmode && out_n == 4
5589 && in_mode == SFmode && in_n == 4)
5590 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5591 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5592 && out_mode == SFmode && out_n == 4
5593 && in_mode == SFmode && in_n == 4)
5594 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5595 break;
5596 CASE_CFN_FMA:
5597 if (VECTOR_UNIT_VSX_P (V2DFmode)
5598 && out_mode == DFmode && out_n == 2
5599 && in_mode == DFmode && in_n == 2)
5600 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5601 if (VECTOR_UNIT_VSX_P (V4SFmode)
5602 && out_mode == SFmode && out_n == 4
5603 && in_mode == SFmode && in_n == 4)
5604 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5605 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5606 && out_mode == SFmode && out_n == 4
5607 && in_mode == SFmode && in_n == 4)
5608 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5609 break;
5610 CASE_CFN_TRUNC:
5611 if (VECTOR_UNIT_VSX_P (V2DFmode)
5612 && out_mode == DFmode && out_n == 2
5613 && in_mode == DFmode && in_n == 2)
5614 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5615 if (VECTOR_UNIT_VSX_P (V4SFmode)
5616 && out_mode == SFmode && out_n == 4
5617 && in_mode == SFmode && in_n == 4)
5618 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5619 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5620 && out_mode == SFmode && out_n == 4
5621 && in_mode == SFmode && in_n == 4)
5622 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5623 break;
5624 CASE_CFN_NEARBYINT:
5625 if (VECTOR_UNIT_VSX_P (V2DFmode)
5626 && flag_unsafe_math_optimizations
5627 && out_mode == DFmode && out_n == 2
5628 && in_mode == DFmode && in_n == 2)
5629 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5630 if (VECTOR_UNIT_VSX_P (V4SFmode)
5631 && flag_unsafe_math_optimizations
5632 && out_mode == SFmode && out_n == 4
5633 && in_mode == SFmode && in_n == 4)
5634 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5635 break;
5636 CASE_CFN_RINT:
5637 if (VECTOR_UNIT_VSX_P (V2DFmode)
5638 && !flag_trapping_math
5639 && out_mode == DFmode && out_n == 2
5640 && in_mode == DFmode && in_n == 2)
5641 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5642 if (VECTOR_UNIT_VSX_P (V4SFmode)
5643 && !flag_trapping_math
5644 && out_mode == SFmode && out_n == 4
5645 && in_mode == SFmode && in_n == 4)
5646 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5647 break;
5648 default:
5649 break;
5650 }
5651
5652 /* Generate calls to libmass if appropriate. */
5653 if (rs6000_veclib_handler)
5654 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5655
5656 return NULL_TREE;
5657 }
5658
5659 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5660
5661 static tree
5662 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5663 tree type_in)
5664 {
5665 machine_mode in_mode, out_mode;
5666 int in_n, out_n;
5667
5668 if (TARGET_DEBUG_BUILTIN)
5669 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5670 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5671 GET_MODE_NAME (TYPE_MODE (type_out)),
5672 GET_MODE_NAME (TYPE_MODE (type_in)));
5673
5674 if (TREE_CODE (type_out) != VECTOR_TYPE
5675 || TREE_CODE (type_in) != VECTOR_TYPE)
5676 return NULL_TREE;
5677
5678 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5679 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5680 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5681 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5682
5683 enum rs6000_builtins fn
5684 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5685 switch (fn)
5686 {
5687 case RS6000_BUILTIN_RSQRTF:
5688 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5689 && out_mode == SFmode && out_n == 4
5690 && in_mode == SFmode && in_n == 4)
5691 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5692 break;
5693 case RS6000_BUILTIN_RSQRT:
5694 if (VECTOR_UNIT_VSX_P (V2DFmode)
5695 && out_mode == DFmode && out_n == 2
5696 && in_mode == DFmode && in_n == 2)
5697 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5698 break;
5699 case RS6000_BUILTIN_RECIPF:
5700 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5701 && out_mode == SFmode && out_n == 4
5702 && in_mode == SFmode && in_n == 4)
5703 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5704 break;
5705 case RS6000_BUILTIN_RECIP:
5706 if (VECTOR_UNIT_VSX_P (V2DFmode)
5707 && out_mode == DFmode && out_n == 2
5708 && in_mode == DFmode && in_n == 2)
5709 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5710 break;
5711 default:
5712 break;
5713 }
5714 return NULL_TREE;
5715 }
5716 \f
5717 /* Default CPU string for rs6000*_file_start functions. */
5718 static const char *rs6000_default_cpu;
5719
5720 /* Do anything needed at the start of the asm file. */
5721
5722 static void
5723 rs6000_file_start (void)
5724 {
5725 char buffer[80];
5726 const char *start = buffer;
5727 FILE *file = asm_out_file;
5728
5729 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5730
5731 default_file_start ();
5732
5733 if (flag_verbose_asm)
5734 {
5735 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5736
5737 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5738 {
5739 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5740 start = "";
5741 }
5742
5743 if (global_options_set.x_rs6000_cpu_index)
5744 {
5745 fprintf (file, "%s -mcpu=%s", start,
5746 processor_target_table[rs6000_cpu_index].name);
5747 start = "";
5748 }
5749
5750 if (global_options_set.x_rs6000_tune_index)
5751 {
5752 fprintf (file, "%s -mtune=%s", start,
5753 processor_target_table[rs6000_tune_index].name);
5754 start = "";
5755 }
5756
5757 if (PPC405_ERRATUM77)
5758 {
5759 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5760 start = "";
5761 }
5762
5763 #ifdef USING_ELFOS_H
5764 switch (rs6000_sdata)
5765 {
5766 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5767 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5768 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5769 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5770 }
5771
5772 if (rs6000_sdata && g_switch_value)
5773 {
5774 fprintf (file, "%s -G %d", start,
5775 g_switch_value);
5776 start = "";
5777 }
5778 #endif
5779
5780 if (*start == '\0')
5781 putc ('\n', file);
5782 }
5783
5784 #ifdef USING_ELFOS_H
5785 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5786 && !global_options_set.x_rs6000_cpu_index)
5787 {
5788 fputs ("\t.machine ", asm_out_file);
5789 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5790 fputs ("power9\n", asm_out_file);
5791 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5792 fputs ("power8\n", asm_out_file);
5793 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5794 fputs ("power7\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5796 fputs ("power6\n", asm_out_file);
5797 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5798 fputs ("power5\n", asm_out_file);
5799 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5800 fputs ("power4\n", asm_out_file);
5801 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5802 fputs ("ppc64\n", asm_out_file);
5803 else
5804 fputs ("ppc\n", asm_out_file);
5805 }
5806 #endif
5807
5808 if (DEFAULT_ABI == ABI_ELFv2)
5809 fprintf (file, "\t.abiversion 2\n");
5810 }
5811
5812 \f
5813 /* Return nonzero if this function is known to have a null epilogue. */
5814
5815 int
5816 direct_return (void)
5817 {
5818 if (reload_completed)
5819 {
5820 rs6000_stack_t *info = rs6000_stack_info ();
5821
5822 if (info->first_gp_reg_save == 32
5823 && info->first_fp_reg_save == 64
5824 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5825 && ! info->lr_save_p
5826 && ! info->cr_save_p
5827 && info->vrsave_size == 0
5828 && ! info->push_p)
5829 return 1;
5830 }
5831
5832 return 0;
5833 }
5834
5835 /* Return the number of instructions it takes to form a constant in an
5836 integer register. */
5837
5838 int
5839 num_insns_constant_wide (HOST_WIDE_INT value)
5840 {
5841 /* signed constant loadable with addi */
5842 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5843 return 1;
5844
5845 /* constant loadable with addis */
5846 else if ((value & 0xffff) == 0
5847 && (value >> 31 == -1 || value >> 31 == 0))
5848 return 1;
5849
5850 else if (TARGET_POWERPC64)
5851 {
5852 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5853 HOST_WIDE_INT high = value >> 31;
5854
5855 if (high == 0 || high == -1)
5856 return 2;
5857
5858 high >>= 1;
5859
5860 if (low == 0)
5861 return num_insns_constant_wide (high) + 1;
5862 else if (high == 0)
5863 return num_insns_constant_wide (low) + 1;
5864 else
5865 return (num_insns_constant_wide (high)
5866 + num_insns_constant_wide (low) + 1);
5867 }
5868
5869 else
5870 return 2;
5871 }
5872
5873 int
5874 num_insns_constant (rtx op, machine_mode mode)
5875 {
5876 HOST_WIDE_INT low, high;
5877
5878 switch (GET_CODE (op))
5879 {
5880 case CONST_INT:
5881 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5882 && rs6000_is_valid_and_mask (op, mode))
5883 return 2;
5884 else
5885 return num_insns_constant_wide (INTVAL (op));
5886
5887 case CONST_WIDE_INT:
5888 {
5889 int i;
5890 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5891 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5892 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5893 return ins;
5894 }
5895
5896 case CONST_DOUBLE:
5897 if (mode == SFmode || mode == SDmode)
5898 {
5899 long l;
5900
5901 if (DECIMAL_FLOAT_MODE_P (mode))
5902 REAL_VALUE_TO_TARGET_DECIMAL32
5903 (*CONST_DOUBLE_REAL_VALUE (op), l);
5904 else
5905 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5906 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5907 }
5908
5909 long l[2];
5910 if (DECIMAL_FLOAT_MODE_P (mode))
5911 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
5912 else
5913 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5914 high = l[WORDS_BIG_ENDIAN == 0];
5915 low = l[WORDS_BIG_ENDIAN != 0];
5916
5917 if (TARGET_32BIT)
5918 return (num_insns_constant_wide (low)
5919 + num_insns_constant_wide (high));
5920 else
5921 {
5922 if ((high == 0 && low >= 0)
5923 || (high == -1 && low < 0))
5924 return num_insns_constant_wide (low);
5925
5926 else if (rs6000_is_valid_and_mask (op, mode))
5927 return 2;
5928
5929 else if (low == 0)
5930 return num_insns_constant_wide (high) + 1;
5931
5932 else
5933 return (num_insns_constant_wide (high)
5934 + num_insns_constant_wide (low) + 1);
5935 }
5936
5937 default:
5938 gcc_unreachable ();
5939 }
5940 }
5941
5942 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5943 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5944 corresponding element of the vector, but for V4SFmode, the
5945 corresponding "float" is interpreted as an SImode integer. */
5946
5947 HOST_WIDE_INT
5948 const_vector_elt_as_int (rtx op, unsigned int elt)
5949 {
5950 rtx tmp;
5951
5952 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5953 gcc_assert (GET_MODE (op) != V2DImode
5954 && GET_MODE (op) != V2DFmode);
5955
5956 tmp = CONST_VECTOR_ELT (op, elt);
5957 if (GET_MODE (op) == V4SFmode)
5958 tmp = gen_lowpart (SImode, tmp);
5959 return INTVAL (tmp);
5960 }
5961
5962 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5963 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5964 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5965 all items are set to the same value and contain COPIES replicas of the
5966 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5967 operand and the others are set to the value of the operand's msb. */
5968
5969 static bool
5970 vspltis_constant (rtx op, unsigned step, unsigned copies)
5971 {
5972 machine_mode mode = GET_MODE (op);
5973 machine_mode inner = GET_MODE_INNER (mode);
5974
5975 unsigned i;
5976 unsigned nunits;
5977 unsigned bitsize;
5978 unsigned mask;
5979
5980 HOST_WIDE_INT val;
5981 HOST_WIDE_INT splat_val;
5982 HOST_WIDE_INT msb_val;
5983
5984 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5985 return false;
5986
5987 nunits = GET_MODE_NUNITS (mode);
5988 bitsize = GET_MODE_BITSIZE (inner);
5989 mask = GET_MODE_MASK (inner);
5990
5991 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5992 splat_val = val;
5993 msb_val = val >= 0 ? 0 : -1;
5994
5995 /* Construct the value to be splatted, if possible. If not, return 0. */
5996 for (i = 2; i <= copies; i *= 2)
5997 {
5998 HOST_WIDE_INT small_val;
5999 bitsize /= 2;
6000 small_val = splat_val >> bitsize;
6001 mask >>= bitsize;
6002 if (splat_val != ((HOST_WIDE_INT)
6003 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6004 | (small_val & mask)))
6005 return false;
6006 splat_val = small_val;
6007 }
6008
6009 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6010 if (EASY_VECTOR_15 (splat_val))
6011 ;
6012
6013 /* Also check if we can splat, and then add the result to itself. Do so if
6014 the value is positive, of if the splat instruction is using OP's mode;
6015 for splat_val < 0, the splat and the add should use the same mode. */
6016 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6017 && (splat_val >= 0 || (step == 1 && copies == 1)))
6018 ;
6019
6020 /* Also check if are loading up the most significant bit which can be done by
6021 loading up -1 and shifting the value left by -1. */
6022 else if (EASY_VECTOR_MSB (splat_val, inner))
6023 ;
6024
6025 else
6026 return false;
6027
6028 /* Check if VAL is present in every STEP-th element, and the
6029 other elements are filled with its most significant bit. */
6030 for (i = 1; i < nunits; ++i)
6031 {
6032 HOST_WIDE_INT desired_val;
6033 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6034 if ((i & (step - 1)) == 0)
6035 desired_val = val;
6036 else
6037 desired_val = msb_val;
6038
6039 if (desired_val != const_vector_elt_as_int (op, elt))
6040 return false;
6041 }
6042
6043 return true;
6044 }
6045
6046 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6047 instruction, filling in the bottom elements with 0 or -1.
6048
6049 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6050 for the number of zeroes to shift in, or negative for the number of 0xff
6051 bytes to shift in.
6052
6053 OP is a CONST_VECTOR. */
6054
6055 int
6056 vspltis_shifted (rtx op)
6057 {
6058 machine_mode mode = GET_MODE (op);
6059 machine_mode inner = GET_MODE_INNER (mode);
6060
6061 unsigned i, j;
6062 unsigned nunits;
6063 unsigned mask;
6064
6065 HOST_WIDE_INT val;
6066
6067 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6068 return false;
6069
6070 /* We need to create pseudo registers to do the shift, so don't recognize
6071 shift vector constants after reload. */
6072 if (!can_create_pseudo_p ())
6073 return false;
6074
6075 nunits = GET_MODE_NUNITS (mode);
6076 mask = GET_MODE_MASK (inner);
6077
6078 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6079
6080 /* Check if the value can really be the operand of a vspltis[bhw]. */
6081 if (EASY_VECTOR_15 (val))
6082 ;
6083
6084 /* Also check if we are loading up the most significant bit which can be done
6085 by loading up -1 and shifting the value left by -1. */
6086 else if (EASY_VECTOR_MSB (val, inner))
6087 ;
6088
6089 else
6090 return 0;
6091
6092 /* Check if VAL is present in every STEP-th element until we find elements
6093 that are 0 or all 1 bits. */
6094 for (i = 1; i < nunits; ++i)
6095 {
6096 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6097 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6098
6099 /* If the value isn't the splat value, check for the remaining elements
6100 being 0/-1. */
6101 if (val != elt_val)
6102 {
6103 if (elt_val == 0)
6104 {
6105 for (j = i+1; j < nunits; ++j)
6106 {
6107 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6108 if (const_vector_elt_as_int (op, elt2) != 0)
6109 return 0;
6110 }
6111
6112 return (nunits - i) * GET_MODE_SIZE (inner);
6113 }
6114
6115 else if ((elt_val & mask) == mask)
6116 {
6117 for (j = i+1; j < nunits; ++j)
6118 {
6119 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6120 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6121 return 0;
6122 }
6123
6124 return -((nunits - i) * GET_MODE_SIZE (inner));
6125 }
6126
6127 else
6128 return 0;
6129 }
6130 }
6131
6132 /* If all elements are equal, we don't need to do VLSDOI. */
6133 return 0;
6134 }
6135
6136
6137 /* Return true if OP is of the given MODE and can be synthesized
6138 with a vspltisb, vspltish or vspltisw. */
6139
6140 bool
6141 easy_altivec_constant (rtx op, machine_mode mode)
6142 {
6143 unsigned step, copies;
6144
6145 if (mode == VOIDmode)
6146 mode = GET_MODE (op);
6147 else if (mode != GET_MODE (op))
6148 return false;
6149
6150 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6151 constants. */
6152 if (mode == V2DFmode)
6153 return zero_constant (op, mode);
6154
6155 else if (mode == V2DImode)
6156 {
6157 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6158 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6159 return false;
6160
6161 if (zero_constant (op, mode))
6162 return true;
6163
6164 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6165 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6166 return true;
6167
6168 return false;
6169 }
6170
6171 /* V1TImode is a special container for TImode. Ignore for now. */
6172 else if (mode == V1TImode)
6173 return false;
6174
6175 /* Start with a vspltisw. */
6176 step = GET_MODE_NUNITS (mode) / 4;
6177 copies = 1;
6178
6179 if (vspltis_constant (op, step, copies))
6180 return true;
6181
6182 /* Then try with a vspltish. */
6183 if (step == 1)
6184 copies <<= 1;
6185 else
6186 step >>= 1;
6187
6188 if (vspltis_constant (op, step, copies))
6189 return true;
6190
6191 /* And finally a vspltisb. */
6192 if (step == 1)
6193 copies <<= 1;
6194 else
6195 step >>= 1;
6196
6197 if (vspltis_constant (op, step, copies))
6198 return true;
6199
6200 if (vspltis_shifted (op) != 0)
6201 return true;
6202
6203 return false;
6204 }
6205
6206 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6207 result is OP. Abort if it is not possible. */
6208
6209 rtx
6210 gen_easy_altivec_constant (rtx op)
6211 {
6212 machine_mode mode = GET_MODE (op);
6213 int nunits = GET_MODE_NUNITS (mode);
6214 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6215 unsigned step = nunits / 4;
6216 unsigned copies = 1;
6217
6218 /* Start with a vspltisw. */
6219 if (vspltis_constant (op, step, copies))
6220 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6221
6222 /* Then try with a vspltish. */
6223 if (step == 1)
6224 copies <<= 1;
6225 else
6226 step >>= 1;
6227
6228 if (vspltis_constant (op, step, copies))
6229 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6230
6231 /* And finally a vspltisb. */
6232 if (step == 1)
6233 copies <<= 1;
6234 else
6235 step >>= 1;
6236
6237 if (vspltis_constant (op, step, copies))
6238 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6239
6240 gcc_unreachable ();
6241 }
6242
6243 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6244 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6245
6246 Return the number of instructions needed (1 or 2) into the address pointed
6247 via NUM_INSNS_PTR.
6248
6249 Return the constant that is being split via CONSTANT_PTR. */
6250
6251 bool
6252 xxspltib_constant_p (rtx op,
6253 machine_mode mode,
6254 int *num_insns_ptr,
6255 int *constant_ptr)
6256 {
6257 size_t nunits = GET_MODE_NUNITS (mode);
6258 size_t i;
6259 HOST_WIDE_INT value;
6260 rtx element;
6261
6262 /* Set the returned values to out of bound values. */
6263 *num_insns_ptr = -1;
6264 *constant_ptr = 256;
6265
6266 if (!TARGET_P9_VECTOR)
6267 return false;
6268
6269 if (mode == VOIDmode)
6270 mode = GET_MODE (op);
6271
6272 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6273 return false;
6274
6275 /* Handle (vec_duplicate <constant>). */
6276 if (GET_CODE (op) == VEC_DUPLICATE)
6277 {
6278 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6279 && mode != V2DImode)
6280 return false;
6281
6282 element = XEXP (op, 0);
6283 if (!CONST_INT_P (element))
6284 return false;
6285
6286 value = INTVAL (element);
6287 if (!IN_RANGE (value, -128, 127))
6288 return false;
6289 }
6290
6291 /* Handle (const_vector [...]). */
6292 else if (GET_CODE (op) == CONST_VECTOR)
6293 {
6294 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6295 && mode != V2DImode)
6296 return false;
6297
6298 element = CONST_VECTOR_ELT (op, 0);
6299 if (!CONST_INT_P (element))
6300 return false;
6301
6302 value = INTVAL (element);
6303 if (!IN_RANGE (value, -128, 127))
6304 return false;
6305
6306 for (i = 1; i < nunits; i++)
6307 {
6308 element = CONST_VECTOR_ELT (op, i);
6309 if (!CONST_INT_P (element))
6310 return false;
6311
6312 if (value != INTVAL (element))
6313 return false;
6314 }
6315 }
6316
6317 /* Handle integer constants being loaded into the upper part of the VSX
6318 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6319 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6320 else if (CONST_INT_P (op))
6321 {
6322 if (!SCALAR_INT_MODE_P (mode))
6323 return false;
6324
6325 value = INTVAL (op);
6326 if (!IN_RANGE (value, -128, 127))
6327 return false;
6328
6329 if (!IN_RANGE (value, -1, 0))
6330 {
6331 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6332 return false;
6333
6334 if (EASY_VECTOR_15 (value))
6335 return false;
6336 }
6337 }
6338
6339 else
6340 return false;
6341
6342 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6343 sign extend. Special case 0/-1 to allow getting any VSX register instead
6344 of an Altivec register. */
6345 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6346 && EASY_VECTOR_15 (value))
6347 return false;
6348
6349 /* Return # of instructions and the constant byte for XXSPLTIB. */
6350 if (mode == V16QImode)
6351 *num_insns_ptr = 1;
6352
6353 else if (IN_RANGE (value, -1, 0))
6354 *num_insns_ptr = 1;
6355
6356 else
6357 *num_insns_ptr = 2;
6358
6359 *constant_ptr = (int) value;
6360 return true;
6361 }
6362
6363 const char *
6364 output_vec_const_move (rtx *operands)
6365 {
6366 int shift;
6367 machine_mode mode;
6368 rtx dest, vec;
6369
6370 dest = operands[0];
6371 vec = operands[1];
6372 mode = GET_MODE (dest);
6373
6374 if (TARGET_VSX)
6375 {
6376 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6377 int xxspltib_value = 256;
6378 int num_insns = -1;
6379
6380 if (zero_constant (vec, mode))
6381 {
6382 if (TARGET_P9_VECTOR)
6383 return "xxspltib %x0,0";
6384
6385 else if (dest_vmx_p)
6386 return "vspltisw %0,0";
6387
6388 else
6389 return "xxlxor %x0,%x0,%x0";
6390 }
6391
6392 if (all_ones_constant (vec, mode))
6393 {
6394 if (TARGET_P9_VECTOR)
6395 return "xxspltib %x0,255";
6396
6397 else if (dest_vmx_p)
6398 return "vspltisw %0,-1";
6399
6400 else if (TARGET_P8_VECTOR)
6401 return "xxlorc %x0,%x0,%x0";
6402
6403 else
6404 gcc_unreachable ();
6405 }
6406
6407 if (TARGET_P9_VECTOR
6408 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6409 {
6410 if (num_insns == 1)
6411 {
6412 operands[2] = GEN_INT (xxspltib_value & 0xff);
6413 return "xxspltib %x0,%2";
6414 }
6415
6416 return "#";
6417 }
6418 }
6419
6420 if (TARGET_ALTIVEC)
6421 {
6422 rtx splat_vec;
6423
6424 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6425 if (zero_constant (vec, mode))
6426 return "vspltisw %0,0";
6427
6428 if (all_ones_constant (vec, mode))
6429 return "vspltisw %0,-1";
6430
6431 /* Do we need to construct a value using VSLDOI? */
6432 shift = vspltis_shifted (vec);
6433 if (shift != 0)
6434 return "#";
6435
6436 splat_vec = gen_easy_altivec_constant (vec);
6437 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6438 operands[1] = XEXP (splat_vec, 0);
6439 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6440 return "#";
6441
6442 switch (GET_MODE (splat_vec))
6443 {
6444 case E_V4SImode:
6445 return "vspltisw %0,%1";
6446
6447 case E_V8HImode:
6448 return "vspltish %0,%1";
6449
6450 case E_V16QImode:
6451 return "vspltisb %0,%1";
6452
6453 default:
6454 gcc_unreachable ();
6455 }
6456 }
6457
6458 gcc_unreachable ();
6459 }
6460
6461 /* Initialize vector TARGET to VALS. */
6462
6463 void
6464 rs6000_expand_vector_init (rtx target, rtx vals)
6465 {
6466 machine_mode mode = GET_MODE (target);
6467 machine_mode inner_mode = GET_MODE_INNER (mode);
6468 int n_elts = GET_MODE_NUNITS (mode);
6469 int n_var = 0, one_var = -1;
6470 bool all_same = true, all_const_zero = true;
6471 rtx x, mem;
6472 int i;
6473
6474 for (i = 0; i < n_elts; ++i)
6475 {
6476 x = XVECEXP (vals, 0, i);
6477 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6478 ++n_var, one_var = i;
6479 else if (x != CONST0_RTX (inner_mode))
6480 all_const_zero = false;
6481
6482 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6483 all_same = false;
6484 }
6485
6486 if (n_var == 0)
6487 {
6488 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6489 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6490 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6491 {
6492 /* Zero register. */
6493 emit_move_insn (target, CONST0_RTX (mode));
6494 return;
6495 }
6496 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6497 {
6498 /* Splat immediate. */
6499 emit_insn (gen_rtx_SET (target, const_vec));
6500 return;
6501 }
6502 else
6503 {
6504 /* Load from constant pool. */
6505 emit_move_insn (target, const_vec);
6506 return;
6507 }
6508 }
6509
6510 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6511 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6512 {
6513 rtx op[2];
6514 size_t i;
6515 size_t num_elements = all_same ? 1 : 2;
6516 for (i = 0; i < num_elements; i++)
6517 {
6518 op[i] = XVECEXP (vals, 0, i);
6519 /* Just in case there is a SUBREG with a smaller mode, do a
6520 conversion. */
6521 if (GET_MODE (op[i]) != inner_mode)
6522 {
6523 rtx tmp = gen_reg_rtx (inner_mode);
6524 convert_move (tmp, op[i], 0);
6525 op[i] = tmp;
6526 }
6527 /* Allow load with splat double word. */
6528 else if (MEM_P (op[i]))
6529 {
6530 if (!all_same)
6531 op[i] = force_reg (inner_mode, op[i]);
6532 }
6533 else if (!REG_P (op[i]))
6534 op[i] = force_reg (inner_mode, op[i]);
6535 }
6536
6537 if (all_same)
6538 {
6539 if (mode == V2DFmode)
6540 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6541 else
6542 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6543 }
6544 else
6545 {
6546 if (mode == V2DFmode)
6547 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6548 else
6549 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6550 }
6551 return;
6552 }
6553
6554 /* Special case initializing vector int if we are on 64-bit systems with
6555 direct move or we have the ISA 3.0 instructions. */
6556 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6557 && TARGET_DIRECT_MOVE_64BIT)
6558 {
6559 if (all_same)
6560 {
6561 rtx element0 = XVECEXP (vals, 0, 0);
6562 if (MEM_P (element0))
6563 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6564 else
6565 element0 = force_reg (SImode, element0);
6566
6567 if (TARGET_P9_VECTOR)
6568 emit_insn (gen_vsx_splat_v4si (target, element0));
6569 else
6570 {
6571 rtx tmp = gen_reg_rtx (DImode);
6572 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6573 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6574 }
6575 return;
6576 }
6577 else
6578 {
6579 rtx elements[4];
6580 size_t i;
6581
6582 for (i = 0; i < 4; i++)
6583 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6584
6585 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6586 elements[2], elements[3]));
6587 return;
6588 }
6589 }
6590
6591 /* With single precision floating point on VSX, know that internally single
6592 precision is actually represented as a double, and either make 2 V2DF
6593 vectors, and convert these vectors to single precision, or do one
6594 conversion, and splat the result to the other elements. */
6595 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6596 {
6597 if (all_same)
6598 {
6599 rtx element0 = XVECEXP (vals, 0, 0);
6600
6601 if (TARGET_P9_VECTOR)
6602 {
6603 if (MEM_P (element0))
6604 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6605
6606 emit_insn (gen_vsx_splat_v4sf (target, element0));
6607 }
6608
6609 else
6610 {
6611 rtx freg = gen_reg_rtx (V4SFmode);
6612 rtx sreg = force_reg (SFmode, element0);
6613 rtx cvt = (TARGET_XSCVDPSPN
6614 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6615 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6616
6617 emit_insn (cvt);
6618 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6619 const0_rtx));
6620 }
6621 }
6622 else
6623 {
6624 rtx dbl_even = gen_reg_rtx (V2DFmode);
6625 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6626 rtx flt_even = gen_reg_rtx (V4SFmode);
6627 rtx flt_odd = gen_reg_rtx (V4SFmode);
6628 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6629 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6630 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6631 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6632
6633 /* Use VMRGEW if we can instead of doing a permute. */
6634 if (TARGET_P8_VECTOR)
6635 {
6636 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6637 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6638 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6639 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6640 if (BYTES_BIG_ENDIAN)
6641 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6642 else
6643 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6644 }
6645 else
6646 {
6647 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6648 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6649 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6650 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6651 rs6000_expand_extract_even (target, flt_even, flt_odd);
6652 }
6653 }
6654 return;
6655 }
6656
6657 /* Special case initializing vector short/char that are splats if we are on
6658 64-bit systems with direct move. */
6659 if (all_same && TARGET_DIRECT_MOVE_64BIT
6660 && (mode == V16QImode || mode == V8HImode))
6661 {
6662 rtx op0 = XVECEXP (vals, 0, 0);
6663 rtx di_tmp = gen_reg_rtx (DImode);
6664
6665 if (!REG_P (op0))
6666 op0 = force_reg (GET_MODE_INNER (mode), op0);
6667
6668 if (mode == V16QImode)
6669 {
6670 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6671 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6672 return;
6673 }
6674
6675 if (mode == V8HImode)
6676 {
6677 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6678 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6679 return;
6680 }
6681 }
6682
6683 /* Store value to stack temp. Load vector element. Splat. However, splat
6684 of 64-bit items is not supported on Altivec. */
6685 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6686 {
6687 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6688 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6689 XVECEXP (vals, 0, 0));
6690 x = gen_rtx_UNSPEC (VOIDmode,
6691 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6692 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6693 gen_rtvec (2,
6694 gen_rtx_SET (target, mem),
6695 x)));
6696 x = gen_rtx_VEC_SELECT (inner_mode, target,
6697 gen_rtx_PARALLEL (VOIDmode,
6698 gen_rtvec (1, const0_rtx)));
6699 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6700 return;
6701 }
6702
6703 /* One field is non-constant. Load constant then overwrite
6704 varying field. */
6705 if (n_var == 1)
6706 {
6707 rtx copy = copy_rtx (vals);
6708
6709 /* Load constant part of vector, substitute neighboring value for
6710 varying element. */
6711 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6712 rs6000_expand_vector_init (target, copy);
6713
6714 /* Insert variable. */
6715 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6716 return;
6717 }
6718
6719 /* Construct the vector in memory one field at a time
6720 and load the whole vector. */
6721 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6722 for (i = 0; i < n_elts; i++)
6723 emit_move_insn (adjust_address_nv (mem, inner_mode,
6724 i * GET_MODE_SIZE (inner_mode)),
6725 XVECEXP (vals, 0, i));
6726 emit_move_insn (target, mem);
6727 }
6728
6729 /* Set field ELT of TARGET to VAL. */
6730
6731 void
6732 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6733 {
6734 machine_mode mode = GET_MODE (target);
6735 machine_mode inner_mode = GET_MODE_INNER (mode);
6736 rtx reg = gen_reg_rtx (mode);
6737 rtx mask, mem, x;
6738 int width = GET_MODE_SIZE (inner_mode);
6739 int i;
6740
6741 val = force_reg (GET_MODE (val), val);
6742
6743 if (VECTOR_MEM_VSX_P (mode))
6744 {
6745 rtx insn = NULL_RTX;
6746 rtx elt_rtx = GEN_INT (elt);
6747
6748 if (mode == V2DFmode)
6749 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6750
6751 else if (mode == V2DImode)
6752 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6753
6754 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6755 {
6756 if (mode == V4SImode)
6757 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6758 else if (mode == V8HImode)
6759 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6760 else if (mode == V16QImode)
6761 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6762 else if (mode == V4SFmode)
6763 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6764 }
6765
6766 if (insn)
6767 {
6768 emit_insn (insn);
6769 return;
6770 }
6771 }
6772
6773 /* Simplify setting single element vectors like V1TImode. */
6774 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6775 {
6776 emit_move_insn (target, gen_lowpart (mode, val));
6777 return;
6778 }
6779
6780 /* Load single variable value. */
6781 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6782 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6783 x = gen_rtx_UNSPEC (VOIDmode,
6784 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6785 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6786 gen_rtvec (2,
6787 gen_rtx_SET (reg, mem),
6788 x)));
6789
6790 /* Linear sequence. */
6791 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6792 for (i = 0; i < 16; ++i)
6793 XVECEXP (mask, 0, i) = GEN_INT (i);
6794
6795 /* Set permute mask to insert element into target. */
6796 for (i = 0; i < width; ++i)
6797 XVECEXP (mask, 0, elt*width + i)
6798 = GEN_INT (i + 0x10);
6799 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6800
6801 if (BYTES_BIG_ENDIAN)
6802 x = gen_rtx_UNSPEC (mode,
6803 gen_rtvec (3, target, reg,
6804 force_reg (V16QImode, x)),
6805 UNSPEC_VPERM);
6806 else
6807 {
6808 if (TARGET_P9_VECTOR)
6809 x = gen_rtx_UNSPEC (mode,
6810 gen_rtvec (3, reg, target,
6811 force_reg (V16QImode, x)),
6812 UNSPEC_VPERMR);
6813 else
6814 {
6815 /* Invert selector. We prefer to generate VNAND on P8 so
6816 that future fusion opportunities can kick in, but must
6817 generate VNOR elsewhere. */
6818 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6819 rtx iorx = (TARGET_P8_VECTOR
6820 ? gen_rtx_IOR (V16QImode, notx, notx)
6821 : gen_rtx_AND (V16QImode, notx, notx));
6822 rtx tmp = gen_reg_rtx (V16QImode);
6823 emit_insn (gen_rtx_SET (tmp, iorx));
6824
6825 /* Permute with operands reversed and adjusted selector. */
6826 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6827 UNSPEC_VPERM);
6828 }
6829 }
6830
6831 emit_insn (gen_rtx_SET (target, x));
6832 }
6833
6834 /* Extract field ELT from VEC into TARGET. */
6835
6836 void
6837 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6838 {
6839 machine_mode mode = GET_MODE (vec);
6840 machine_mode inner_mode = GET_MODE_INNER (mode);
6841 rtx mem;
6842
6843 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6844 {
6845 switch (mode)
6846 {
6847 default:
6848 break;
6849 case E_V1TImode:
6850 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6851 emit_move_insn (target, gen_lowpart (TImode, vec));
6852 break;
6853 case E_V2DFmode:
6854 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6855 return;
6856 case E_V2DImode:
6857 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6858 return;
6859 case E_V4SFmode:
6860 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6861 return;
6862 case E_V16QImode:
6863 if (TARGET_DIRECT_MOVE_64BIT)
6864 {
6865 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6866 return;
6867 }
6868 else
6869 break;
6870 case E_V8HImode:
6871 if (TARGET_DIRECT_MOVE_64BIT)
6872 {
6873 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6874 return;
6875 }
6876 else
6877 break;
6878 case E_V4SImode:
6879 if (TARGET_DIRECT_MOVE_64BIT)
6880 {
6881 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6882 return;
6883 }
6884 break;
6885 }
6886 }
6887 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6888 && TARGET_DIRECT_MOVE_64BIT)
6889 {
6890 if (GET_MODE (elt) != DImode)
6891 {
6892 rtx tmp = gen_reg_rtx (DImode);
6893 convert_move (tmp, elt, 0);
6894 elt = tmp;
6895 }
6896 else if (!REG_P (elt))
6897 elt = force_reg (DImode, elt);
6898
6899 switch (mode)
6900 {
6901 case E_V2DFmode:
6902 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6903 return;
6904
6905 case E_V2DImode:
6906 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6907 return;
6908
6909 case E_V4SFmode:
6910 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6911 return;
6912
6913 case E_V4SImode:
6914 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6915 return;
6916
6917 case E_V8HImode:
6918 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6919 return;
6920
6921 case E_V16QImode:
6922 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6923 return;
6924
6925 default:
6926 gcc_unreachable ();
6927 }
6928 }
6929
6930 gcc_assert (CONST_INT_P (elt));
6931
6932 /* Allocate mode-sized buffer. */
6933 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6934
6935 emit_move_insn (mem, vec);
6936
6937 /* Add offset to field within buffer matching vector element. */
6938 mem = adjust_address_nv (mem, inner_mode,
6939 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6940
6941 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6942 }
6943
6944 /* Helper function to return the register number of a RTX. */
6945 static inline int
6946 regno_or_subregno (rtx op)
6947 {
6948 if (REG_P (op))
6949 return REGNO (op);
6950 else if (SUBREG_P (op))
6951 return subreg_regno (op);
6952 else
6953 gcc_unreachable ();
6954 }
6955
6956 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6957 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6958 temporary (BASE_TMP) to fixup the address. Return the new memory address
6959 that is valid for reads or writes to a given register (SCALAR_REG). */
6960
6961 rtx
6962 rs6000_adjust_vec_address (rtx scalar_reg,
6963 rtx mem,
6964 rtx element,
6965 rtx base_tmp,
6966 machine_mode scalar_mode)
6967 {
6968 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
6969 rtx addr = XEXP (mem, 0);
6970 rtx element_offset;
6971 rtx new_addr;
6972 bool valid_addr_p;
6973
6974 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6975 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
6976
6977 /* Calculate what we need to add to the address to get the element
6978 address. */
6979 if (CONST_INT_P (element))
6980 element_offset = GEN_INT (INTVAL (element) * scalar_size);
6981 else
6982 {
6983 int byte_shift = exact_log2 (scalar_size);
6984 gcc_assert (byte_shift >= 0);
6985
6986 if (byte_shift == 0)
6987 element_offset = element;
6988
6989 else
6990 {
6991 if (TARGET_POWERPC64)
6992 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
6993 else
6994 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
6995
6996 element_offset = base_tmp;
6997 }
6998 }
6999
7000 /* Create the new address pointing to the element within the vector. If we
7001 are adding 0, we don't have to change the address. */
7002 if (element_offset == const0_rtx)
7003 new_addr = addr;
7004
7005 /* A simple indirect address can be converted into a reg + offset
7006 address. */
7007 else if (REG_P (addr) || SUBREG_P (addr))
7008 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7009
7010 /* Optimize D-FORM addresses with constant offset with a constant element, to
7011 include the element offset in the address directly. */
7012 else if (GET_CODE (addr) == PLUS)
7013 {
7014 rtx op0 = XEXP (addr, 0);
7015 rtx op1 = XEXP (addr, 1);
7016 rtx insn;
7017
7018 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7019 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7020 {
7021 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7022 rtx offset_rtx = GEN_INT (offset);
7023
7024 if (IN_RANGE (offset, -32768, 32767)
7025 && (scalar_size < 8 || (offset & 0x3) == 0))
7026 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7027 else
7028 {
7029 emit_move_insn (base_tmp, offset_rtx);
7030 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7031 }
7032 }
7033 else
7034 {
7035 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7036 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7037
7038 /* Note, ADDI requires the register being added to be a base
7039 register. If the register was R0, load it up into the temporary
7040 and do the add. */
7041 if (op1_reg_p
7042 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7043 {
7044 insn = gen_add3_insn (base_tmp, op1, element_offset);
7045 gcc_assert (insn != NULL_RTX);
7046 emit_insn (insn);
7047 }
7048
7049 else if (ele_reg_p
7050 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7051 {
7052 insn = gen_add3_insn (base_tmp, element_offset, op1);
7053 gcc_assert (insn != NULL_RTX);
7054 emit_insn (insn);
7055 }
7056
7057 else
7058 {
7059 emit_move_insn (base_tmp, op1);
7060 emit_insn (gen_add2_insn (base_tmp, element_offset));
7061 }
7062
7063 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7064 }
7065 }
7066
7067 else
7068 {
7069 emit_move_insn (base_tmp, addr);
7070 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7071 }
7072
7073 /* If we have a PLUS, we need to see whether the particular register class
7074 allows for D-FORM or X-FORM addressing. */
7075 if (GET_CODE (new_addr) == PLUS)
7076 {
7077 rtx op1 = XEXP (new_addr, 1);
7078 addr_mask_type addr_mask;
7079 int scalar_regno = regno_or_subregno (scalar_reg);
7080
7081 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7082 if (INT_REGNO_P (scalar_regno))
7083 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7084
7085 else if (FP_REGNO_P (scalar_regno))
7086 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7087
7088 else if (ALTIVEC_REGNO_P (scalar_regno))
7089 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7090
7091 else
7092 gcc_unreachable ();
7093
7094 if (REG_P (op1) || SUBREG_P (op1))
7095 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7096 else
7097 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7098 }
7099
7100 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7101 valid_addr_p = true;
7102
7103 else
7104 valid_addr_p = false;
7105
7106 if (!valid_addr_p)
7107 {
7108 emit_move_insn (base_tmp, new_addr);
7109 new_addr = base_tmp;
7110 }
7111
7112 return change_address (mem, scalar_mode, new_addr);
7113 }
7114
7115 /* Split a variable vec_extract operation into the component instructions. */
7116
7117 void
7118 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7119 rtx tmp_altivec)
7120 {
7121 machine_mode mode = GET_MODE (src);
7122 machine_mode scalar_mode = GET_MODE (dest);
7123 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7124 int byte_shift = exact_log2 (scalar_size);
7125
7126 gcc_assert (byte_shift >= 0);
7127
7128 /* If we are given a memory address, optimize to load just the element. We
7129 don't have to adjust the vector element number on little endian
7130 systems. */
7131 if (MEM_P (src))
7132 {
7133 gcc_assert (REG_P (tmp_gpr));
7134 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7135 tmp_gpr, scalar_mode));
7136 return;
7137 }
7138
7139 else if (REG_P (src) || SUBREG_P (src))
7140 {
7141 int bit_shift = byte_shift + 3;
7142 rtx element2;
7143 int dest_regno = regno_or_subregno (dest);
7144 int src_regno = regno_or_subregno (src);
7145 int element_regno = regno_or_subregno (element);
7146
7147 gcc_assert (REG_P (tmp_gpr));
7148
7149 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7150 a general purpose register. */
7151 if (TARGET_P9_VECTOR
7152 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7153 && INT_REGNO_P (dest_regno)
7154 && ALTIVEC_REGNO_P (src_regno)
7155 && INT_REGNO_P (element_regno))
7156 {
7157 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7158 rtx element_si = gen_rtx_REG (SImode, element_regno);
7159
7160 if (mode == V16QImode)
7161 emit_insn (BYTES_BIG_ENDIAN
7162 ? gen_vextublx (dest_si, element_si, src)
7163 : gen_vextubrx (dest_si, element_si, src));
7164
7165 else if (mode == V8HImode)
7166 {
7167 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7168 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7169 emit_insn (BYTES_BIG_ENDIAN
7170 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7171 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7172 }
7173
7174
7175 else
7176 {
7177 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7178 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7179 emit_insn (BYTES_BIG_ENDIAN
7180 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7181 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7182 }
7183
7184 return;
7185 }
7186
7187
7188 gcc_assert (REG_P (tmp_altivec));
7189
7190 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7191 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7192 will shift the element into the upper position (adding 3 to convert a
7193 byte shift into a bit shift). */
7194 if (scalar_size == 8)
7195 {
7196 if (!BYTES_BIG_ENDIAN)
7197 {
7198 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7199 element2 = tmp_gpr;
7200 }
7201 else
7202 element2 = element;
7203
7204 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7205 bit. */
7206 emit_insn (gen_rtx_SET (tmp_gpr,
7207 gen_rtx_AND (DImode,
7208 gen_rtx_ASHIFT (DImode,
7209 element2,
7210 GEN_INT (6)),
7211 GEN_INT (64))));
7212 }
7213 else
7214 {
7215 if (!BYTES_BIG_ENDIAN)
7216 {
7217 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7218
7219 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7220 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7221 element2 = tmp_gpr;
7222 }
7223 else
7224 element2 = element;
7225
7226 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7227 }
7228
7229 /* Get the value into the lower byte of the Altivec register where VSLO
7230 expects it. */
7231 if (TARGET_P9_VECTOR)
7232 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7233 else if (can_create_pseudo_p ())
7234 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7235 else
7236 {
7237 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7238 emit_move_insn (tmp_di, tmp_gpr);
7239 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7240 }
7241
7242 /* Do the VSLO to get the value into the final location. */
7243 switch (mode)
7244 {
7245 case E_V2DFmode:
7246 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7247 return;
7248
7249 case E_V2DImode:
7250 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7251 return;
7252
7253 case E_V4SFmode:
7254 {
7255 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7256 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7257 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7258 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7259 tmp_altivec));
7260
7261 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7262 return;
7263 }
7264
7265 case E_V4SImode:
7266 case E_V8HImode:
7267 case E_V16QImode:
7268 {
7269 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7270 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7271 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7272 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7273 tmp_altivec));
7274 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7275 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7276 GEN_INT (64 - (8 * scalar_size))));
7277 return;
7278 }
7279
7280 default:
7281 gcc_unreachable ();
7282 }
7283
7284 return;
7285 }
7286 else
7287 gcc_unreachable ();
7288 }
7289
7290 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7291 selects whether the alignment is abi mandated, optional, or
7292 both abi and optional alignment. */
7293
7294 unsigned int
7295 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7296 {
7297 if (how != align_opt)
7298 {
7299 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7300 align = 128;
7301 }
7302
7303 if (how != align_abi)
7304 {
7305 if (TREE_CODE (type) == ARRAY_TYPE
7306 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7307 {
7308 if (align < BITS_PER_WORD)
7309 align = BITS_PER_WORD;
7310 }
7311 }
7312
7313 return align;
7314 }
7315
7316 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7317 instructions simply ignore the low bits; VSX memory instructions
7318 are aligned to 4 or 8 bytes. */
7319
7320 static bool
7321 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7322 {
7323 return (STRICT_ALIGNMENT
7324 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7325 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7326 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7327 && (int) align < VECTOR_ALIGN (mode)))));
7328 }
7329
7330 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7331
7332 bool
7333 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7334 {
7335 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7336 {
7337 if (computed != 128)
7338 {
7339 static bool warned;
7340 if (!warned && warn_psabi)
7341 {
7342 warned = true;
7343 inform (input_location,
7344 "the layout of aggregates containing vectors with"
7345 " %d-byte alignment has changed in GCC 5",
7346 computed / BITS_PER_UNIT);
7347 }
7348 }
7349 /* In current GCC there is no special case. */
7350 return false;
7351 }
7352
7353 return false;
7354 }
7355
7356 /* AIX increases natural record alignment to doubleword if the first
7357 field is an FP double while the FP fields remain word aligned. */
7358
7359 unsigned int
7360 rs6000_special_round_type_align (tree type, unsigned int computed,
7361 unsigned int specified)
7362 {
7363 unsigned int align = MAX (computed, specified);
7364 tree field = TYPE_FIELDS (type);
7365
7366 /* Skip all non field decls */
7367 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7368 field = DECL_CHAIN (field);
7369
7370 if (field != NULL && field != type)
7371 {
7372 type = TREE_TYPE (field);
7373 while (TREE_CODE (type) == ARRAY_TYPE)
7374 type = TREE_TYPE (type);
7375
7376 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7377 align = MAX (align, 64);
7378 }
7379
7380 return align;
7381 }
7382
7383 /* Darwin increases record alignment to the natural alignment of
7384 the first field. */
7385
7386 unsigned int
7387 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7388 unsigned int specified)
7389 {
7390 unsigned int align = MAX (computed, specified);
7391
7392 if (TYPE_PACKED (type))
7393 return align;
7394
7395 /* Find the first field, looking down into aggregates. */
7396 do {
7397 tree field = TYPE_FIELDS (type);
7398 /* Skip all non field decls */
7399 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7400 field = DECL_CHAIN (field);
7401 if (! field)
7402 break;
7403 /* A packed field does not contribute any extra alignment. */
7404 if (DECL_PACKED (field))
7405 return align;
7406 type = TREE_TYPE (field);
7407 while (TREE_CODE (type) == ARRAY_TYPE)
7408 type = TREE_TYPE (type);
7409 } while (AGGREGATE_TYPE_P (type));
7410
7411 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7412 align = MAX (align, TYPE_ALIGN (type));
7413
7414 return align;
7415 }
7416
7417 /* Return 1 for an operand in small memory on V.4/eabi. */
7418
7419 int
7420 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7421 machine_mode mode ATTRIBUTE_UNUSED)
7422 {
7423 #if TARGET_ELF
7424 rtx sym_ref;
7425
7426 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7427 return 0;
7428
7429 if (DEFAULT_ABI != ABI_V4)
7430 return 0;
7431
7432 if (GET_CODE (op) == SYMBOL_REF)
7433 sym_ref = op;
7434
7435 else if (GET_CODE (op) != CONST
7436 || GET_CODE (XEXP (op, 0)) != PLUS
7437 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7438 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7439 return 0;
7440
7441 else
7442 {
7443 rtx sum = XEXP (op, 0);
7444 HOST_WIDE_INT summand;
7445
7446 /* We have to be careful here, because it is the referenced address
7447 that must be 32k from _SDA_BASE_, not just the symbol. */
7448 summand = INTVAL (XEXP (sum, 1));
7449 if (summand < 0 || summand > g_switch_value)
7450 return 0;
7451
7452 sym_ref = XEXP (sum, 0);
7453 }
7454
7455 return SYMBOL_REF_SMALL_P (sym_ref);
7456 #else
7457 return 0;
7458 #endif
7459 }
7460
7461 /* Return true if either operand is a general purpose register. */
7462
7463 bool
7464 gpr_or_gpr_p (rtx op0, rtx op1)
7465 {
7466 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7467 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7468 }
7469
7470 /* Return true if this is a move direct operation between GPR registers and
7471 floating point/VSX registers. */
7472
7473 bool
7474 direct_move_p (rtx op0, rtx op1)
7475 {
7476 int regno0, regno1;
7477
7478 if (!REG_P (op0) || !REG_P (op1))
7479 return false;
7480
7481 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7482 return false;
7483
7484 regno0 = REGNO (op0);
7485 regno1 = REGNO (op1);
7486 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7487 return false;
7488
7489 if (INT_REGNO_P (regno0))
7490 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7491
7492 else if (INT_REGNO_P (regno1))
7493 {
7494 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7495 return true;
7496
7497 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7498 return true;
7499 }
7500
7501 return false;
7502 }
7503
7504 /* Return true if the OFFSET is valid for the quad address instructions that
7505 use d-form (register + offset) addressing. */
7506
7507 static inline bool
7508 quad_address_offset_p (HOST_WIDE_INT offset)
7509 {
7510 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7511 }
7512
7513 /* Return true if the ADDR is an acceptable address for a quad memory
7514 operation of mode MODE (either LQ/STQ for general purpose registers, or
7515 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7516 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7517 3.0 LXV/STXV instruction. */
7518
7519 bool
7520 quad_address_p (rtx addr, machine_mode mode, bool strict)
7521 {
7522 rtx op0, op1;
7523
7524 if (GET_MODE_SIZE (mode) != 16)
7525 return false;
7526
7527 if (legitimate_indirect_address_p (addr, strict))
7528 return true;
7529
7530 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7531 return false;
7532
7533 if (GET_CODE (addr) != PLUS)
7534 return false;
7535
7536 op0 = XEXP (addr, 0);
7537 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7538 return false;
7539
7540 op1 = XEXP (addr, 1);
7541 if (!CONST_INT_P (op1))
7542 return false;
7543
7544 return quad_address_offset_p (INTVAL (op1));
7545 }
7546
7547 /* Return true if this is a load or store quad operation. This function does
7548 not handle the atomic quad memory instructions. */
7549
7550 bool
7551 quad_load_store_p (rtx op0, rtx op1)
7552 {
7553 bool ret;
7554
7555 if (!TARGET_QUAD_MEMORY)
7556 ret = false;
7557
7558 else if (REG_P (op0) && MEM_P (op1))
7559 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7560 && quad_memory_operand (op1, GET_MODE (op1))
7561 && !reg_overlap_mentioned_p (op0, op1));
7562
7563 else if (MEM_P (op0) && REG_P (op1))
7564 ret = (quad_memory_operand (op0, GET_MODE (op0))
7565 && quad_int_reg_operand (op1, GET_MODE (op1)));
7566
7567 else
7568 ret = false;
7569
7570 if (TARGET_DEBUG_ADDR)
7571 {
7572 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7573 ret ? "true" : "false");
7574 debug_rtx (gen_rtx_SET (op0, op1));
7575 }
7576
7577 return ret;
7578 }
7579
7580 /* Given an address, return a constant offset term if one exists. */
7581
7582 static rtx
7583 address_offset (rtx op)
7584 {
7585 if (GET_CODE (op) == PRE_INC
7586 || GET_CODE (op) == PRE_DEC)
7587 op = XEXP (op, 0);
7588 else if (GET_CODE (op) == PRE_MODIFY
7589 || GET_CODE (op) == LO_SUM)
7590 op = XEXP (op, 1);
7591
7592 if (GET_CODE (op) == CONST)
7593 op = XEXP (op, 0);
7594
7595 if (GET_CODE (op) == PLUS)
7596 op = XEXP (op, 1);
7597
7598 if (CONST_INT_P (op))
7599 return op;
7600
7601 return NULL_RTX;
7602 }
7603
7604 /* Return true if the MEM operand is a memory operand suitable for use
7605 with a (full width, possibly multiple) gpr load/store. On
7606 powerpc64 this means the offset must be divisible by 4.
7607 Implements 'Y' constraint.
7608
7609 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7610 a constraint function we know the operand has satisfied a suitable
7611 memory predicate. Also accept some odd rtl generated by reload
7612 (see rs6000_legitimize_reload_address for various forms). It is
7613 important that reload rtl be accepted by appropriate constraints
7614 but not by the operand predicate.
7615
7616 Offsetting a lo_sum should not be allowed, except where we know by
7617 alignment that a 32k boundary is not crossed, but see the ???
7618 comment in rs6000_legitimize_reload_address. Note that by
7619 "offsetting" here we mean a further offset to access parts of the
7620 MEM. It's fine to have a lo_sum where the inner address is offset
7621 from a sym, since the same sym+offset will appear in the high part
7622 of the address calculation. */
7623
7624 bool
7625 mem_operand_gpr (rtx op, machine_mode mode)
7626 {
7627 unsigned HOST_WIDE_INT offset;
7628 int extra;
7629 rtx addr = XEXP (op, 0);
7630
7631 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7632 if (TARGET_UPDATE
7633 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7634 && mode_supports_pre_incdec_p (mode)
7635 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7636 return true;
7637
7638 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7639 if (!rs6000_offsettable_memref_p (op, mode, false))
7640 return false;
7641
7642 op = address_offset (addr);
7643 if (op == NULL_RTX)
7644 return true;
7645
7646 offset = INTVAL (op);
7647 if (TARGET_POWERPC64 && (offset & 3) != 0)
7648 return false;
7649
7650 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7651 if (extra < 0)
7652 extra = 0;
7653
7654 if (GET_CODE (addr) == LO_SUM)
7655 /* For lo_sum addresses, we must allow any offset except one that
7656 causes a wrap, so test only the low 16 bits. */
7657 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7658
7659 return offset + 0x8000 < 0x10000u - extra;
7660 }
7661
7662 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7663 enforce an offset divisible by 4 even for 32-bit. */
7664
7665 bool
7666 mem_operand_ds_form (rtx op, machine_mode mode)
7667 {
7668 unsigned HOST_WIDE_INT offset;
7669 int extra;
7670 rtx addr = XEXP (op, 0);
7671
7672 if (!offsettable_address_p (false, mode, addr))
7673 return false;
7674
7675 op = address_offset (addr);
7676 if (op == NULL_RTX)
7677 return true;
7678
7679 offset = INTVAL (op);
7680 if ((offset & 3) != 0)
7681 return false;
7682
7683 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7684 if (extra < 0)
7685 extra = 0;
7686
7687 if (GET_CODE (addr) == LO_SUM)
7688 /* For lo_sum addresses, we must allow any offset except one that
7689 causes a wrap, so test only the low 16 bits. */
7690 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7691
7692 return offset + 0x8000 < 0x10000u - extra;
7693 }
7694 \f
7695 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7696
7697 static bool
7698 reg_offset_addressing_ok_p (machine_mode mode)
7699 {
7700 switch (mode)
7701 {
7702 case E_V16QImode:
7703 case E_V8HImode:
7704 case E_V4SFmode:
7705 case E_V4SImode:
7706 case E_V2DFmode:
7707 case E_V2DImode:
7708 case E_V1TImode:
7709 case E_TImode:
7710 case E_TFmode:
7711 case E_KFmode:
7712 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7713 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7714 a vector mode, if we want to use the VSX registers to move it around,
7715 we need to restrict ourselves to reg+reg addressing. Similarly for
7716 IEEE 128-bit floating point that is passed in a single vector
7717 register. */
7718 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7719 return mode_supports_dq_form (mode);
7720 break;
7721
7722 case E_SDmode:
7723 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7724 addressing for the LFIWZX and STFIWX instructions. */
7725 if (TARGET_NO_SDMODE_STACK)
7726 return false;
7727 break;
7728
7729 default:
7730 break;
7731 }
7732
7733 return true;
7734 }
7735
7736 static bool
7737 virtual_stack_registers_memory_p (rtx op)
7738 {
7739 int regnum;
7740
7741 if (GET_CODE (op) == REG)
7742 regnum = REGNO (op);
7743
7744 else if (GET_CODE (op) == PLUS
7745 && GET_CODE (XEXP (op, 0)) == REG
7746 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7747 regnum = REGNO (XEXP (op, 0));
7748
7749 else
7750 return false;
7751
7752 return (regnum >= FIRST_VIRTUAL_REGISTER
7753 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7754 }
7755
7756 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7757 is known to not straddle a 32k boundary. This function is used
7758 to determine whether -mcmodel=medium code can use TOC pointer
7759 relative addressing for OP. This means the alignment of the TOC
7760 pointer must also be taken into account, and unfortunately that is
7761 only 8 bytes. */
7762
7763 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7764 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7765 #endif
7766
7767 static bool
7768 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7769 machine_mode mode)
7770 {
7771 tree decl;
7772 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7773
7774 if (GET_CODE (op) != SYMBOL_REF)
7775 return false;
7776
7777 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7778 SYMBOL_REF. */
7779 if (mode_supports_dq_form (mode))
7780 return false;
7781
7782 dsize = GET_MODE_SIZE (mode);
7783 decl = SYMBOL_REF_DECL (op);
7784 if (!decl)
7785 {
7786 if (dsize == 0)
7787 return false;
7788
7789 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7790 replacing memory addresses with an anchor plus offset. We
7791 could find the decl by rummaging around in the block->objects
7792 VEC for the given offset but that seems like too much work. */
7793 dalign = BITS_PER_UNIT;
7794 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7795 && SYMBOL_REF_ANCHOR_P (op)
7796 && SYMBOL_REF_BLOCK (op) != NULL)
7797 {
7798 struct object_block *block = SYMBOL_REF_BLOCK (op);
7799
7800 dalign = block->alignment;
7801 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7802 }
7803 else if (CONSTANT_POOL_ADDRESS_P (op))
7804 {
7805 /* It would be nice to have get_pool_align().. */
7806 machine_mode cmode = get_pool_mode (op);
7807
7808 dalign = GET_MODE_ALIGNMENT (cmode);
7809 }
7810 }
7811 else if (DECL_P (decl))
7812 {
7813 dalign = DECL_ALIGN (decl);
7814
7815 if (dsize == 0)
7816 {
7817 /* Allow BLKmode when the entire object is known to not
7818 cross a 32k boundary. */
7819 if (!DECL_SIZE_UNIT (decl))
7820 return false;
7821
7822 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7823 return false;
7824
7825 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7826 if (dsize > 32768)
7827 return false;
7828
7829 dalign /= BITS_PER_UNIT;
7830 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7831 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7832 return dalign >= dsize;
7833 }
7834 }
7835 else
7836 gcc_unreachable ();
7837
7838 /* Find how many bits of the alignment we know for this access. */
7839 dalign /= BITS_PER_UNIT;
7840 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7841 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7842 mask = dalign - 1;
7843 lsb = offset & -offset;
7844 mask &= lsb - 1;
7845 dalign = mask + 1;
7846
7847 return dalign >= dsize;
7848 }
7849
7850 static bool
7851 constant_pool_expr_p (rtx op)
7852 {
7853 rtx base, offset;
7854
7855 split_const (op, &base, &offset);
7856 return (GET_CODE (base) == SYMBOL_REF
7857 && CONSTANT_POOL_ADDRESS_P (base)
7858 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7859 }
7860
7861 /* These are only used to pass through from print_operand/print_operand_address
7862 to rs6000_output_addr_const_extra over the intervening function
7863 output_addr_const which is not target code. */
7864 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7865
7866 /* Return true if OP is a toc pointer relative address (the output
7867 of create_TOC_reference). If STRICT, do not match non-split
7868 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7869 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7870 TOCREL_OFFSET_RET respectively. */
7871
7872 bool
7873 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7874 const_rtx *tocrel_offset_ret)
7875 {
7876 if (!TARGET_TOC)
7877 return false;
7878
7879 if (TARGET_CMODEL != CMODEL_SMALL)
7880 {
7881 /* When strict ensure we have everything tidy. */
7882 if (strict
7883 && !(GET_CODE (op) == LO_SUM
7884 && REG_P (XEXP (op, 0))
7885 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7886 return false;
7887
7888 /* When not strict, allow non-split TOC addresses and also allow
7889 (lo_sum (high ..)) TOC addresses created during reload. */
7890 if (GET_CODE (op) == LO_SUM)
7891 op = XEXP (op, 1);
7892 }
7893
7894 const_rtx tocrel_base = op;
7895 const_rtx tocrel_offset = const0_rtx;
7896
7897 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7898 {
7899 tocrel_base = XEXP (op, 0);
7900 tocrel_offset = XEXP (op, 1);
7901 }
7902
7903 if (tocrel_base_ret)
7904 *tocrel_base_ret = tocrel_base;
7905 if (tocrel_offset_ret)
7906 *tocrel_offset_ret = tocrel_offset;
7907
7908 return (GET_CODE (tocrel_base) == UNSPEC
7909 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7910 && REG_P (XVECEXP (tocrel_base, 0, 1))
7911 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7912 }
7913
7914 /* Return true if X is a constant pool address, and also for cmodel=medium
7915 if X is a toc-relative address known to be offsettable within MODE. */
7916
7917 bool
7918 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7919 bool strict)
7920 {
7921 const_rtx tocrel_base, tocrel_offset;
7922 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7923 && (TARGET_CMODEL != CMODEL_MEDIUM
7924 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7925 || mode == QImode
7926 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7927 INTVAL (tocrel_offset), mode)));
7928 }
7929
7930 static bool
7931 legitimate_small_data_p (machine_mode mode, rtx x)
7932 {
7933 return (DEFAULT_ABI == ABI_V4
7934 && !flag_pic && !TARGET_TOC
7935 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7936 && small_data_operand (x, mode));
7937 }
7938
7939 bool
7940 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7941 bool strict, bool worst_case)
7942 {
7943 unsigned HOST_WIDE_INT offset;
7944 unsigned int extra;
7945
7946 if (GET_CODE (x) != PLUS)
7947 return false;
7948 if (!REG_P (XEXP (x, 0)))
7949 return false;
7950 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7951 return false;
7952 if (mode_supports_dq_form (mode))
7953 return quad_address_p (x, mode, strict);
7954 if (!reg_offset_addressing_ok_p (mode))
7955 return virtual_stack_registers_memory_p (x);
7956 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7957 return true;
7958 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
7959 return false;
7960
7961 offset = INTVAL (XEXP (x, 1));
7962 extra = 0;
7963 switch (mode)
7964 {
7965 case E_DFmode:
7966 case E_DDmode:
7967 case E_DImode:
7968 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7969 addressing. */
7970 if (VECTOR_MEM_VSX_P (mode))
7971 return false;
7972
7973 if (!worst_case)
7974 break;
7975 if (!TARGET_POWERPC64)
7976 extra = 4;
7977 else if (offset & 3)
7978 return false;
7979 break;
7980
7981 case E_TFmode:
7982 case E_IFmode:
7983 case E_KFmode:
7984 case E_TDmode:
7985 case E_TImode:
7986 case E_PTImode:
7987 extra = 8;
7988 if (!worst_case)
7989 break;
7990 if (!TARGET_POWERPC64)
7991 extra = 12;
7992 else if (offset & 3)
7993 return false;
7994 break;
7995
7996 default:
7997 break;
7998 }
7999
8000 offset += 0x8000;
8001 return offset < 0x10000 - extra;
8002 }
8003
8004 bool
8005 legitimate_indexed_address_p (rtx x, int strict)
8006 {
8007 rtx op0, op1;
8008
8009 if (GET_CODE (x) != PLUS)
8010 return false;
8011
8012 op0 = XEXP (x, 0);
8013 op1 = XEXP (x, 1);
8014
8015 return (REG_P (op0) && REG_P (op1)
8016 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8017 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8018 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8019 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8020 }
8021
8022 bool
8023 avoiding_indexed_address_p (machine_mode mode)
8024 {
8025 /* Avoid indexed addressing for modes that have non-indexed
8026 load/store instruction forms. */
8027 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8028 }
8029
8030 bool
8031 legitimate_indirect_address_p (rtx x, int strict)
8032 {
8033 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8034 }
8035
8036 bool
8037 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8038 {
8039 if (!TARGET_MACHO || !flag_pic
8040 || mode != SImode || GET_CODE (x) != MEM)
8041 return false;
8042 x = XEXP (x, 0);
8043
8044 if (GET_CODE (x) != LO_SUM)
8045 return false;
8046 if (GET_CODE (XEXP (x, 0)) != REG)
8047 return false;
8048 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8049 return false;
8050 x = XEXP (x, 1);
8051
8052 return CONSTANT_P (x);
8053 }
8054
8055 static bool
8056 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8057 {
8058 if (GET_CODE (x) != LO_SUM)
8059 return false;
8060 if (GET_CODE (XEXP (x, 0)) != REG)
8061 return false;
8062 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8063 return false;
8064 /* quad word addresses are restricted, and we can't use LO_SUM. */
8065 if (mode_supports_dq_form (mode))
8066 return false;
8067 x = XEXP (x, 1);
8068
8069 if (TARGET_ELF || TARGET_MACHO)
8070 {
8071 bool large_toc_ok;
8072
8073 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8074 return false;
8075 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8076 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8077 recognizes some LO_SUM addresses as valid although this
8078 function says opposite. In most cases, LRA through different
8079 transformations can generate correct code for address reloads.
8080 It can not manage only some LO_SUM cases. So we need to add
8081 code analogous to one in rs6000_legitimize_reload_address for
8082 LOW_SUM here saying that some addresses are still valid. */
8083 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8084 && small_toc_ref (x, VOIDmode));
8085 if (TARGET_TOC && ! large_toc_ok)
8086 return false;
8087 if (GET_MODE_NUNITS (mode) != 1)
8088 return false;
8089 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8090 && !(/* ??? Assume floating point reg based on mode? */
8091 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8092 return false;
8093
8094 return CONSTANT_P (x) || large_toc_ok;
8095 }
8096
8097 return false;
8098 }
8099
8100
8101 /* Try machine-dependent ways of modifying an illegitimate address
8102 to be legitimate. If we find one, return the new, valid address.
8103 This is used from only one place: `memory_address' in explow.c.
8104
8105 OLDX is the address as it was before break_out_memory_refs was
8106 called. In some cases it is useful to look at this to decide what
8107 needs to be done.
8108
8109 It is always safe for this function to do nothing. It exists to
8110 recognize opportunities to optimize the output.
8111
8112 On RS/6000, first check for the sum of a register with a constant
8113 integer that is out of range. If so, generate code to add the
8114 constant with the low-order 16 bits masked to the register and force
8115 this result into another register (this can be done with `cau').
8116 Then generate an address of REG+(CONST&0xffff), allowing for the
8117 possibility of bit 16 being a one.
8118
8119 Then check for the sum of a register and something not constant, try to
8120 load the other things into a register and return the sum. */
8121
8122 static rtx
8123 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8124 machine_mode mode)
8125 {
8126 unsigned int extra;
8127
8128 if (!reg_offset_addressing_ok_p (mode)
8129 || mode_supports_dq_form (mode))
8130 {
8131 if (virtual_stack_registers_memory_p (x))
8132 return x;
8133
8134 /* In theory we should not be seeing addresses of the form reg+0,
8135 but just in case it is generated, optimize it away. */
8136 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8137 return force_reg (Pmode, XEXP (x, 0));
8138
8139 /* For TImode with load/store quad, restrict addresses to just a single
8140 pointer, so it works with both GPRs and VSX registers. */
8141 /* Make sure both operands are registers. */
8142 else if (GET_CODE (x) == PLUS
8143 && (mode != TImode || !TARGET_VSX))
8144 return gen_rtx_PLUS (Pmode,
8145 force_reg (Pmode, XEXP (x, 0)),
8146 force_reg (Pmode, XEXP (x, 1)));
8147 else
8148 return force_reg (Pmode, x);
8149 }
8150 if (GET_CODE (x) == SYMBOL_REF)
8151 {
8152 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8153 if (model != 0)
8154 return rs6000_legitimize_tls_address (x, model);
8155 }
8156
8157 extra = 0;
8158 switch (mode)
8159 {
8160 case E_TFmode:
8161 case E_TDmode:
8162 case E_TImode:
8163 case E_PTImode:
8164 case E_IFmode:
8165 case E_KFmode:
8166 /* As in legitimate_offset_address_p we do not assume
8167 worst-case. The mode here is just a hint as to the registers
8168 used. A TImode is usually in gprs, but may actually be in
8169 fprs. Leave worst-case scenario for reload to handle via
8170 insn constraints. PTImode is only GPRs. */
8171 extra = 8;
8172 break;
8173 default:
8174 break;
8175 }
8176
8177 if (GET_CODE (x) == PLUS
8178 && GET_CODE (XEXP (x, 0)) == REG
8179 && GET_CODE (XEXP (x, 1)) == CONST_INT
8180 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8181 >= 0x10000 - extra))
8182 {
8183 HOST_WIDE_INT high_int, low_int;
8184 rtx sum;
8185 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8186 if (low_int >= 0x8000 - extra)
8187 low_int = 0;
8188 high_int = INTVAL (XEXP (x, 1)) - low_int;
8189 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8190 GEN_INT (high_int)), 0);
8191 return plus_constant (Pmode, sum, low_int);
8192 }
8193 else if (GET_CODE (x) == PLUS
8194 && GET_CODE (XEXP (x, 0)) == REG
8195 && GET_CODE (XEXP (x, 1)) != CONST_INT
8196 && GET_MODE_NUNITS (mode) == 1
8197 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8198 || (/* ??? Assume floating point reg based on mode? */
8199 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8200 && !avoiding_indexed_address_p (mode))
8201 {
8202 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8203 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8204 }
8205 else if ((TARGET_ELF
8206 #if TARGET_MACHO
8207 || !MACHO_DYNAMIC_NO_PIC_P
8208 #endif
8209 )
8210 && TARGET_32BIT
8211 && TARGET_NO_TOC
8212 && ! flag_pic
8213 && GET_CODE (x) != CONST_INT
8214 && GET_CODE (x) != CONST_WIDE_INT
8215 && GET_CODE (x) != CONST_DOUBLE
8216 && CONSTANT_P (x)
8217 && GET_MODE_NUNITS (mode) == 1
8218 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8219 || (/* ??? Assume floating point reg based on mode? */
8220 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8221 {
8222 rtx reg = gen_reg_rtx (Pmode);
8223 if (TARGET_ELF)
8224 emit_insn (gen_elf_high (reg, x));
8225 else
8226 emit_insn (gen_macho_high (reg, x));
8227 return gen_rtx_LO_SUM (Pmode, reg, x);
8228 }
8229 else if (TARGET_TOC
8230 && GET_CODE (x) == SYMBOL_REF
8231 && constant_pool_expr_p (x)
8232 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8233 return create_TOC_reference (x, NULL_RTX);
8234 else
8235 return x;
8236 }
8237
8238 /* Debug version of rs6000_legitimize_address. */
8239 static rtx
8240 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8241 {
8242 rtx ret;
8243 rtx_insn *insns;
8244
8245 start_sequence ();
8246 ret = rs6000_legitimize_address (x, oldx, mode);
8247 insns = get_insns ();
8248 end_sequence ();
8249
8250 if (ret != x)
8251 {
8252 fprintf (stderr,
8253 "\nrs6000_legitimize_address: mode %s, old code %s, "
8254 "new code %s, modified\n",
8255 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8256 GET_RTX_NAME (GET_CODE (ret)));
8257
8258 fprintf (stderr, "Original address:\n");
8259 debug_rtx (x);
8260
8261 fprintf (stderr, "oldx:\n");
8262 debug_rtx (oldx);
8263
8264 fprintf (stderr, "New address:\n");
8265 debug_rtx (ret);
8266
8267 if (insns)
8268 {
8269 fprintf (stderr, "Insns added:\n");
8270 debug_rtx_list (insns, 20);
8271 }
8272 }
8273 else
8274 {
8275 fprintf (stderr,
8276 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8277 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8278
8279 debug_rtx (x);
8280 }
8281
8282 if (insns)
8283 emit_insn (insns);
8284
8285 return ret;
8286 }
8287
8288 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8289 We need to emit DTP-relative relocations. */
8290
8291 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8292 static void
8293 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8294 {
8295 switch (size)
8296 {
8297 case 4:
8298 fputs ("\t.long\t", file);
8299 break;
8300 case 8:
8301 fputs (DOUBLE_INT_ASM_OP, file);
8302 break;
8303 default:
8304 gcc_unreachable ();
8305 }
8306 output_addr_const (file, x);
8307 if (TARGET_ELF)
8308 fputs ("@dtprel+0x8000", file);
8309 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8310 {
8311 switch (SYMBOL_REF_TLS_MODEL (x))
8312 {
8313 case 0:
8314 break;
8315 case TLS_MODEL_LOCAL_EXEC:
8316 fputs ("@le", file);
8317 break;
8318 case TLS_MODEL_INITIAL_EXEC:
8319 fputs ("@ie", file);
8320 break;
8321 case TLS_MODEL_GLOBAL_DYNAMIC:
8322 case TLS_MODEL_LOCAL_DYNAMIC:
8323 fputs ("@m", file);
8324 break;
8325 default:
8326 gcc_unreachable ();
8327 }
8328 }
8329 }
8330
8331 /* Return true if X is a symbol that refers to real (rather than emulated)
8332 TLS. */
8333
8334 static bool
8335 rs6000_real_tls_symbol_ref_p (rtx x)
8336 {
8337 return (GET_CODE (x) == SYMBOL_REF
8338 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8339 }
8340
8341 /* In the name of slightly smaller debug output, and to cater to
8342 general assembler lossage, recognize various UNSPEC sequences
8343 and turn them back into a direct symbol reference. */
8344
8345 static rtx
8346 rs6000_delegitimize_address (rtx orig_x)
8347 {
8348 rtx x, y, offset;
8349
8350 orig_x = delegitimize_mem_from_attrs (orig_x);
8351 x = orig_x;
8352 if (MEM_P (x))
8353 x = XEXP (x, 0);
8354
8355 y = x;
8356 if (TARGET_CMODEL != CMODEL_SMALL
8357 && GET_CODE (y) == LO_SUM)
8358 y = XEXP (y, 1);
8359
8360 offset = NULL_RTX;
8361 if (GET_CODE (y) == PLUS
8362 && GET_MODE (y) == Pmode
8363 && CONST_INT_P (XEXP (y, 1)))
8364 {
8365 offset = XEXP (y, 1);
8366 y = XEXP (y, 0);
8367 }
8368
8369 if (GET_CODE (y) == UNSPEC
8370 && XINT (y, 1) == UNSPEC_TOCREL)
8371 {
8372 y = XVECEXP (y, 0, 0);
8373
8374 #ifdef HAVE_AS_TLS
8375 /* Do not associate thread-local symbols with the original
8376 constant pool symbol. */
8377 if (TARGET_XCOFF
8378 && GET_CODE (y) == SYMBOL_REF
8379 && CONSTANT_POOL_ADDRESS_P (y)
8380 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8381 return orig_x;
8382 #endif
8383
8384 if (offset != NULL_RTX)
8385 y = gen_rtx_PLUS (Pmode, y, offset);
8386 if (!MEM_P (orig_x))
8387 return y;
8388 else
8389 return replace_equiv_address_nv (orig_x, y);
8390 }
8391
8392 if (TARGET_MACHO
8393 && GET_CODE (orig_x) == LO_SUM
8394 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8395 {
8396 y = XEXP (XEXP (orig_x, 1), 0);
8397 if (GET_CODE (y) == UNSPEC
8398 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8399 return XVECEXP (y, 0, 0);
8400 }
8401
8402 return orig_x;
8403 }
8404
8405 /* Return true if X shouldn't be emitted into the debug info.
8406 The linker doesn't like .toc section references from
8407 .debug_* sections, so reject .toc section symbols. */
8408
8409 static bool
8410 rs6000_const_not_ok_for_debug_p (rtx x)
8411 {
8412 if (GET_CODE (x) == UNSPEC)
8413 return true;
8414 if (GET_CODE (x) == SYMBOL_REF
8415 && CONSTANT_POOL_ADDRESS_P (x))
8416 {
8417 rtx c = get_pool_constant (x);
8418 machine_mode cmode = get_pool_mode (x);
8419 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8420 return true;
8421 }
8422
8423 return false;
8424 }
8425
8426 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8427
8428 static bool
8429 rs6000_legitimate_combined_insn (rtx_insn *insn)
8430 {
8431 int icode = INSN_CODE (insn);
8432
8433 /* Reject creating doloop insns. Combine should not be allowed
8434 to create these for a number of reasons:
8435 1) In a nested loop, if combine creates one of these in an
8436 outer loop and the register allocator happens to allocate ctr
8437 to the outer loop insn, then the inner loop can't use ctr.
8438 Inner loops ought to be more highly optimized.
8439 2) Combine often wants to create one of these from what was
8440 originally a three insn sequence, first combining the three
8441 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8442 allocated ctr, the splitter takes use back to the three insn
8443 sequence. It's better to stop combine at the two insn
8444 sequence.
8445 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8446 insns, the register allocator sometimes uses floating point
8447 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8448 jump insn and output reloads are not implemented for jumps,
8449 the ctrsi/ctrdi splitters need to handle all possible cases.
8450 That's a pain, and it gets to be seriously difficult when a
8451 splitter that runs after reload needs memory to transfer from
8452 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8453 for the difficult case. It's better to not create problems
8454 in the first place. */
8455 if (icode != CODE_FOR_nothing
8456 && (icode == CODE_FOR_bdz_si
8457 || icode == CODE_FOR_bdz_di
8458 || icode == CODE_FOR_bdnz_si
8459 || icode == CODE_FOR_bdnz_di
8460 || icode == CODE_FOR_bdztf_si
8461 || icode == CODE_FOR_bdztf_di
8462 || icode == CODE_FOR_bdnztf_si
8463 || icode == CODE_FOR_bdnztf_di))
8464 return false;
8465
8466 return true;
8467 }
8468
8469 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8470
8471 static GTY(()) rtx rs6000_tls_symbol;
8472 static rtx
8473 rs6000_tls_get_addr (void)
8474 {
8475 if (!rs6000_tls_symbol)
8476 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8477
8478 return rs6000_tls_symbol;
8479 }
8480
8481 /* Construct the SYMBOL_REF for TLS GOT references. */
8482
8483 static GTY(()) rtx rs6000_got_symbol;
8484 static rtx
8485 rs6000_got_sym (void)
8486 {
8487 if (!rs6000_got_symbol)
8488 {
8489 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8490 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8491 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8492 }
8493
8494 return rs6000_got_symbol;
8495 }
8496
8497 /* AIX Thread-Local Address support. */
8498
8499 static rtx
8500 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8501 {
8502 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8503 const char *name;
8504 char *tlsname;
8505
8506 name = XSTR (addr, 0);
8507 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8508 or the symbol will be in TLS private data section. */
8509 if (name[strlen (name) - 1] != ']'
8510 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8511 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8512 {
8513 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8514 strcpy (tlsname, name);
8515 strcat (tlsname,
8516 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8517 tlsaddr = copy_rtx (addr);
8518 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8519 }
8520 else
8521 tlsaddr = addr;
8522
8523 /* Place addr into TOC constant pool. */
8524 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8525
8526 /* Output the TOC entry and create the MEM referencing the value. */
8527 if (constant_pool_expr_p (XEXP (sym, 0))
8528 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8529 {
8530 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8531 mem = gen_const_mem (Pmode, tocref);
8532 set_mem_alias_set (mem, get_TOC_alias_set ());
8533 }
8534 else
8535 return sym;
8536
8537 /* Use global-dynamic for local-dynamic. */
8538 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8539 || model == TLS_MODEL_LOCAL_DYNAMIC)
8540 {
8541 /* Create new TOC reference for @m symbol. */
8542 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8543 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8544 strcpy (tlsname, "*LCM");
8545 strcat (tlsname, name + 3);
8546 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8547 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8548 tocref = create_TOC_reference (modaddr, NULL_RTX);
8549 rtx modmem = gen_const_mem (Pmode, tocref);
8550 set_mem_alias_set (modmem, get_TOC_alias_set ());
8551
8552 rtx modreg = gen_reg_rtx (Pmode);
8553 emit_insn (gen_rtx_SET (modreg, modmem));
8554
8555 tmpreg = gen_reg_rtx (Pmode);
8556 emit_insn (gen_rtx_SET (tmpreg, mem));
8557
8558 dest = gen_reg_rtx (Pmode);
8559 if (TARGET_32BIT)
8560 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8561 else
8562 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8563 return dest;
8564 }
8565 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8566 else if (TARGET_32BIT)
8567 {
8568 tlsreg = gen_reg_rtx (SImode);
8569 emit_insn (gen_tls_get_tpointer (tlsreg));
8570 }
8571 else
8572 tlsreg = gen_rtx_REG (DImode, 13);
8573
8574 /* Load the TOC value into temporary register. */
8575 tmpreg = gen_reg_rtx (Pmode);
8576 emit_insn (gen_rtx_SET (tmpreg, mem));
8577 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8578 gen_rtx_MINUS (Pmode, addr, tlsreg));
8579
8580 /* Add TOC symbol value to TLS pointer. */
8581 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8582
8583 return dest;
8584 }
8585
8586 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8587 this (thread-local) address. */
8588
8589 static rtx
8590 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8591 {
8592 rtx dest, insn;
8593
8594 if (TARGET_XCOFF)
8595 return rs6000_legitimize_tls_address_aix (addr, model);
8596
8597 dest = gen_reg_rtx (Pmode);
8598 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8599 {
8600 rtx tlsreg;
8601
8602 if (TARGET_64BIT)
8603 {
8604 tlsreg = gen_rtx_REG (Pmode, 13);
8605 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8606 }
8607 else
8608 {
8609 tlsreg = gen_rtx_REG (Pmode, 2);
8610 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8611 }
8612 emit_insn (insn);
8613 }
8614 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8615 {
8616 rtx tlsreg, tmp;
8617
8618 tmp = gen_reg_rtx (Pmode);
8619 if (TARGET_64BIT)
8620 {
8621 tlsreg = gen_rtx_REG (Pmode, 13);
8622 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8623 }
8624 else
8625 {
8626 tlsreg = gen_rtx_REG (Pmode, 2);
8627 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8628 }
8629 emit_insn (insn);
8630 if (TARGET_64BIT)
8631 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8632 else
8633 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8634 emit_insn (insn);
8635 }
8636 else
8637 {
8638 rtx r3, got, tga, tmp1, tmp2, call_insn;
8639
8640 /* We currently use relocations like @got@tlsgd for tls, which
8641 means the linker will handle allocation of tls entries, placing
8642 them in the .got section. So use a pointer to the .got section,
8643 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8644 or to secondary GOT sections used by 32-bit -fPIC. */
8645 if (TARGET_64BIT)
8646 got = gen_rtx_REG (Pmode, 2);
8647 else
8648 {
8649 if (flag_pic == 1)
8650 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8651 else
8652 {
8653 rtx gsym = rs6000_got_sym ();
8654 got = gen_reg_rtx (Pmode);
8655 if (flag_pic == 0)
8656 rs6000_emit_move (got, gsym, Pmode);
8657 else
8658 {
8659 rtx mem, lab;
8660
8661 tmp1 = gen_reg_rtx (Pmode);
8662 tmp2 = gen_reg_rtx (Pmode);
8663 mem = gen_const_mem (Pmode, tmp1);
8664 lab = gen_label_rtx ();
8665 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8666 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8667 if (TARGET_LINK_STACK)
8668 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8669 emit_move_insn (tmp2, mem);
8670 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8671 set_unique_reg_note (last, REG_EQUAL, gsym);
8672 }
8673 }
8674 }
8675
8676 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8677 {
8678 tga = rs6000_tls_get_addr ();
8679 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8680 const0_rtx, Pmode);
8681
8682 r3 = gen_rtx_REG (Pmode, 3);
8683 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8684 {
8685 if (TARGET_64BIT)
8686 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
8687 else
8688 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
8689 }
8690 else if (DEFAULT_ABI == ABI_V4)
8691 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
8692 else
8693 gcc_unreachable ();
8694 call_insn = last_call_insn ();
8695 PATTERN (call_insn) = insn;
8696 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8697 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8698 pic_offset_table_rtx);
8699 }
8700 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8701 {
8702 tga = rs6000_tls_get_addr ();
8703 tmp1 = gen_reg_rtx (Pmode);
8704 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8705 const0_rtx, Pmode);
8706
8707 r3 = gen_rtx_REG (Pmode, 3);
8708 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8709 {
8710 if (TARGET_64BIT)
8711 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
8712 else
8713 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
8714 }
8715 else if (DEFAULT_ABI == ABI_V4)
8716 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
8717 else
8718 gcc_unreachable ();
8719 call_insn = last_call_insn ();
8720 PATTERN (call_insn) = insn;
8721 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8722 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8723 pic_offset_table_rtx);
8724
8725 if (rs6000_tls_size == 16)
8726 {
8727 if (TARGET_64BIT)
8728 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8729 else
8730 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8731 }
8732 else if (rs6000_tls_size == 32)
8733 {
8734 tmp2 = gen_reg_rtx (Pmode);
8735 if (TARGET_64BIT)
8736 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8737 else
8738 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8739 emit_insn (insn);
8740 if (TARGET_64BIT)
8741 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8742 else
8743 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8744 }
8745 else
8746 {
8747 tmp2 = gen_reg_rtx (Pmode);
8748 if (TARGET_64BIT)
8749 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8750 else
8751 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8752 emit_insn (insn);
8753 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8754 }
8755 emit_insn (insn);
8756 }
8757 else
8758 {
8759 /* IE, or 64-bit offset LE. */
8760 tmp2 = gen_reg_rtx (Pmode);
8761 if (TARGET_64BIT)
8762 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8763 else
8764 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8765 emit_insn (insn);
8766 if (TARGET_64BIT)
8767 insn = gen_tls_tls_64 (dest, tmp2, addr);
8768 else
8769 insn = gen_tls_tls_32 (dest, tmp2, addr);
8770 emit_insn (insn);
8771 }
8772 }
8773
8774 return dest;
8775 }
8776
8777 /* Only create the global variable for the stack protect guard if we are using
8778 the global flavor of that guard. */
8779 static tree
8780 rs6000_init_stack_protect_guard (void)
8781 {
8782 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8783 return default_stack_protect_guard ();
8784
8785 return NULL_TREE;
8786 }
8787
8788 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8789
8790 static bool
8791 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8792 {
8793 if (GET_CODE (x) == HIGH
8794 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8795 return true;
8796
8797 /* A TLS symbol in the TOC cannot contain a sum. */
8798 if (GET_CODE (x) == CONST
8799 && GET_CODE (XEXP (x, 0)) == PLUS
8800 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8801 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8802 return true;
8803
8804 /* Do not place an ELF TLS symbol in the constant pool. */
8805 return TARGET_ELF && tls_referenced_p (x);
8806 }
8807
8808 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8809 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8810 can be addressed relative to the toc pointer. */
8811
8812 static bool
8813 use_toc_relative_ref (rtx sym, machine_mode mode)
8814 {
8815 return ((constant_pool_expr_p (sym)
8816 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8817 get_pool_mode (sym)))
8818 || (TARGET_CMODEL == CMODEL_MEDIUM
8819 && SYMBOL_REF_LOCAL_P (sym)
8820 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8821 }
8822
8823 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8824 replace the input X, or the original X if no replacement is called for.
8825 The output parameter *WIN is 1 if the calling macro should goto WIN,
8826 0 if it should not.
8827
8828 For RS/6000, we wish to handle large displacements off a base
8829 register by splitting the addend across an addiu/addis and the mem insn.
8830 This cuts number of extra insns needed from 3 to 1.
8831
8832 On Darwin, we use this to generate code for floating point constants.
8833 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8834 The Darwin code is inside #if TARGET_MACHO because only then are the
8835 machopic_* functions defined. */
8836 static rtx
8837 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8838 int opnum, int type,
8839 int ind_levels ATTRIBUTE_UNUSED, int *win)
8840 {
8841 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8842 bool quad_offset_p = mode_supports_dq_form (mode);
8843
8844 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8845 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8846 if (reg_offset_p
8847 && opnum == 1
8848 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8849 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8850 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8851 && TARGET_P9_VECTOR)
8852 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8853 && TARGET_P9_VECTOR)))
8854 reg_offset_p = false;
8855
8856 /* We must recognize output that we have already generated ourselves. */
8857 if (GET_CODE (x) == PLUS
8858 && GET_CODE (XEXP (x, 0)) == PLUS
8859 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8860 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8861 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8862 {
8863 if (TARGET_DEBUG_ADDR)
8864 {
8865 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8866 debug_rtx (x);
8867 }
8868 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8869 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8870 opnum, (enum reload_type) type);
8871 *win = 1;
8872 return x;
8873 }
8874
8875 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8876 if (GET_CODE (x) == LO_SUM
8877 && GET_CODE (XEXP (x, 0)) == HIGH)
8878 {
8879 if (TARGET_DEBUG_ADDR)
8880 {
8881 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8882 debug_rtx (x);
8883 }
8884 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8885 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8886 opnum, (enum reload_type) type);
8887 *win = 1;
8888 return x;
8889 }
8890
8891 #if TARGET_MACHO
8892 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8893 && GET_CODE (x) == LO_SUM
8894 && GET_CODE (XEXP (x, 0)) == PLUS
8895 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8896 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8897 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8898 && machopic_operand_p (XEXP (x, 1)))
8899 {
8900 /* Result of previous invocation of this function on Darwin
8901 floating point constant. */
8902 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8903 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8904 opnum, (enum reload_type) type);
8905 *win = 1;
8906 return x;
8907 }
8908 #endif
8909
8910 if (TARGET_CMODEL != CMODEL_SMALL
8911 && reg_offset_p
8912 && !quad_offset_p
8913 && small_toc_ref (x, VOIDmode))
8914 {
8915 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8916 x = gen_rtx_LO_SUM (Pmode, hi, x);
8917 if (TARGET_DEBUG_ADDR)
8918 {
8919 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8920 debug_rtx (x);
8921 }
8922 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8923 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8924 opnum, (enum reload_type) type);
8925 *win = 1;
8926 return x;
8927 }
8928
8929 if (GET_CODE (x) == PLUS
8930 && REG_P (XEXP (x, 0))
8931 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
8932 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8933 && CONST_INT_P (XEXP (x, 1))
8934 && reg_offset_p
8935 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8936 {
8937 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
8938 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
8939 HOST_WIDE_INT high
8940 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8941
8942 /* Check for 32-bit overflow or quad addresses with one of the
8943 four least significant bits set. */
8944 if (high + low != val
8945 || (quad_offset_p && (low & 0xf)))
8946 {
8947 *win = 0;
8948 return x;
8949 }
8950
8951 /* Reload the high part into a base reg; leave the low part
8952 in the mem directly. */
8953
8954 x = gen_rtx_PLUS (GET_MODE (x),
8955 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8956 GEN_INT (high)),
8957 GEN_INT (low));
8958
8959 if (TARGET_DEBUG_ADDR)
8960 {
8961 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
8962 debug_rtx (x);
8963 }
8964 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8965 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8966 opnum, (enum reload_type) type);
8967 *win = 1;
8968 return x;
8969 }
8970
8971 if (GET_CODE (x) == SYMBOL_REF
8972 && reg_offset_p
8973 && !quad_offset_p
8974 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
8975 #if TARGET_MACHO
8976 && DEFAULT_ABI == ABI_DARWIN
8977 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
8978 && machopic_symbol_defined_p (x)
8979 #else
8980 && DEFAULT_ABI == ABI_V4
8981 && !flag_pic
8982 #endif
8983 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
8984 The same goes for DImode without 64-bit gprs and DFmode and DDmode
8985 without fprs.
8986 ??? Assume floating point reg based on mode? This assumption is
8987 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
8988 where reload ends up doing a DFmode load of a constant from
8989 mem using two gprs. Unfortunately, at this point reload
8990 hasn't yet selected regs so poking around in reload data
8991 won't help and even if we could figure out the regs reliably,
8992 we'd still want to allow this transformation when the mem is
8993 naturally aligned. Since we say the address is good here, we
8994 can't disable offsets from LO_SUMs in mem_operand_gpr.
8995 FIXME: Allow offset from lo_sum for other modes too, when
8996 mem is sufficiently aligned.
8997
8998 Also disallow this if the type can go in VMX/Altivec registers, since
8999 those registers do not have d-form (reg+offset) address modes. */
9000 && !reg_addr[mode].scalar_in_vmx_p
9001 && mode != TFmode
9002 && mode != TDmode
9003 && mode != IFmode
9004 && mode != KFmode
9005 && (mode != TImode || !TARGET_VSX)
9006 && mode != PTImode
9007 && (mode != DImode || TARGET_POWERPC64)
9008 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9009 || TARGET_HARD_FLOAT))
9010 {
9011 #if TARGET_MACHO
9012 if (flag_pic)
9013 {
9014 rtx offset = machopic_gen_offset (x);
9015 x = gen_rtx_LO_SUM (GET_MODE (x),
9016 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9017 gen_rtx_HIGH (Pmode, offset)), offset);
9018 }
9019 else
9020 #endif
9021 x = gen_rtx_LO_SUM (GET_MODE (x),
9022 gen_rtx_HIGH (Pmode, x), x);
9023
9024 if (TARGET_DEBUG_ADDR)
9025 {
9026 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9027 debug_rtx (x);
9028 }
9029 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9030 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9031 opnum, (enum reload_type) type);
9032 *win = 1;
9033 return x;
9034 }
9035
9036 /* Reload an offset address wrapped by an AND that represents the
9037 masking of the lower bits. Strip the outer AND and let reload
9038 convert the offset address into an indirect address. For VSX,
9039 force reload to create the address with an AND in a separate
9040 register, because we can't guarantee an altivec register will
9041 be used. */
9042 if (VECTOR_MEM_ALTIVEC_P (mode)
9043 && GET_CODE (x) == AND
9044 && GET_CODE (XEXP (x, 0)) == PLUS
9045 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9046 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9047 && GET_CODE (XEXP (x, 1)) == CONST_INT
9048 && INTVAL (XEXP (x, 1)) == -16)
9049 {
9050 x = XEXP (x, 0);
9051 *win = 1;
9052 return x;
9053 }
9054
9055 if (TARGET_TOC
9056 && reg_offset_p
9057 && !quad_offset_p
9058 && GET_CODE (x) == SYMBOL_REF
9059 && use_toc_relative_ref (x, mode))
9060 {
9061 x = create_TOC_reference (x, NULL_RTX);
9062 if (TARGET_CMODEL != CMODEL_SMALL)
9063 {
9064 if (TARGET_DEBUG_ADDR)
9065 {
9066 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9067 debug_rtx (x);
9068 }
9069 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9070 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9071 opnum, (enum reload_type) type);
9072 }
9073 *win = 1;
9074 return x;
9075 }
9076 *win = 0;
9077 return x;
9078 }
9079
9080 /* Debug version of rs6000_legitimize_reload_address. */
9081 static rtx
9082 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9083 int opnum, int type,
9084 int ind_levels, int *win)
9085 {
9086 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9087 ind_levels, win);
9088 fprintf (stderr,
9089 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9090 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9091 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9092 debug_rtx (x);
9093
9094 if (x == ret)
9095 fprintf (stderr, "Same address returned\n");
9096 else if (!ret)
9097 fprintf (stderr, "NULL returned\n");
9098 else
9099 {
9100 fprintf (stderr, "New address:\n");
9101 debug_rtx (ret);
9102 }
9103
9104 return ret;
9105 }
9106
9107 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9108 that is a valid memory address for an instruction.
9109 The MODE argument is the machine mode for the MEM expression
9110 that wants to use this address.
9111
9112 On the RS/6000, there are four valid address: a SYMBOL_REF that
9113 refers to a constant pool entry of an address (or the sum of it
9114 plus a constant), a short (16-bit signed) constant plus a register,
9115 the sum of two registers, or a register indirect, possibly with an
9116 auto-increment. For DFmode, DDmode and DImode with a constant plus
9117 register, we must ensure that both words are addressable or PowerPC64
9118 with offset word aligned.
9119
9120 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9121 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9122 because adjacent memory cells are accessed by adding word-sized offsets
9123 during assembly output. */
9124 static bool
9125 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9126 {
9127 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9128 bool quad_offset_p = mode_supports_dq_form (mode);
9129
9130 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9131 if (VECTOR_MEM_ALTIVEC_P (mode)
9132 && GET_CODE (x) == AND
9133 && GET_CODE (XEXP (x, 1)) == CONST_INT
9134 && INTVAL (XEXP (x, 1)) == -16)
9135 x = XEXP (x, 0);
9136
9137 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9138 return 0;
9139 if (legitimate_indirect_address_p (x, reg_ok_strict))
9140 return 1;
9141 if (TARGET_UPDATE
9142 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9143 && mode_supports_pre_incdec_p (mode)
9144 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9145 return 1;
9146 /* Handle restricted vector d-form offsets in ISA 3.0. */
9147 if (quad_offset_p)
9148 {
9149 if (quad_address_p (x, mode, reg_ok_strict))
9150 return 1;
9151 }
9152 else if (virtual_stack_registers_memory_p (x))
9153 return 1;
9154
9155 else if (reg_offset_p)
9156 {
9157 if (legitimate_small_data_p (mode, x))
9158 return 1;
9159 if (legitimate_constant_pool_address_p (x, mode,
9160 reg_ok_strict || lra_in_progress))
9161 return 1;
9162 }
9163
9164 /* For TImode, if we have TImode in VSX registers, only allow register
9165 indirect addresses. This will allow the values to go in either GPRs
9166 or VSX registers without reloading. The vector types would tend to
9167 go into VSX registers, so we allow REG+REG, while TImode seems
9168 somewhat split, in that some uses are GPR based, and some VSX based. */
9169 /* FIXME: We could loosen this by changing the following to
9170 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9171 but currently we cannot allow REG+REG addressing for TImode. See
9172 PR72827 for complete details on how this ends up hoodwinking DSE. */
9173 if (mode == TImode && TARGET_VSX)
9174 return 0;
9175 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9176 if (! reg_ok_strict
9177 && reg_offset_p
9178 && GET_CODE (x) == PLUS
9179 && GET_CODE (XEXP (x, 0)) == REG
9180 && (XEXP (x, 0) == virtual_stack_vars_rtx
9181 || XEXP (x, 0) == arg_pointer_rtx)
9182 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9183 return 1;
9184 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9185 return 1;
9186 if (!FLOAT128_2REG_P (mode)
9187 && (TARGET_HARD_FLOAT
9188 || TARGET_POWERPC64
9189 || (mode != DFmode && mode != DDmode))
9190 && (TARGET_POWERPC64 || mode != DImode)
9191 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9192 && mode != PTImode
9193 && !avoiding_indexed_address_p (mode)
9194 && legitimate_indexed_address_p (x, reg_ok_strict))
9195 return 1;
9196 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9197 && mode_supports_pre_modify_p (mode)
9198 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9199 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9200 reg_ok_strict, false)
9201 || (!avoiding_indexed_address_p (mode)
9202 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9203 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9204 return 1;
9205 if (reg_offset_p && !quad_offset_p
9206 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9207 return 1;
9208 return 0;
9209 }
9210
9211 /* Debug version of rs6000_legitimate_address_p. */
9212 static bool
9213 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9214 bool reg_ok_strict)
9215 {
9216 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9217 fprintf (stderr,
9218 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9219 "strict = %d, reload = %s, code = %s\n",
9220 ret ? "true" : "false",
9221 GET_MODE_NAME (mode),
9222 reg_ok_strict,
9223 (reload_completed ? "after" : "before"),
9224 GET_RTX_NAME (GET_CODE (x)));
9225 debug_rtx (x);
9226
9227 return ret;
9228 }
9229
9230 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9231
9232 static bool
9233 rs6000_mode_dependent_address_p (const_rtx addr,
9234 addr_space_t as ATTRIBUTE_UNUSED)
9235 {
9236 return rs6000_mode_dependent_address_ptr (addr);
9237 }
9238
9239 /* Go to LABEL if ADDR (a legitimate address expression)
9240 has an effect that depends on the machine mode it is used for.
9241
9242 On the RS/6000 this is true of all integral offsets (since AltiVec
9243 and VSX modes don't allow them) or is a pre-increment or decrement.
9244
9245 ??? Except that due to conceptual problems in offsettable_address_p
9246 we can't really report the problems of integral offsets. So leave
9247 this assuming that the adjustable offset must be valid for the
9248 sub-words of a TFmode operand, which is what we had before. */
9249
9250 static bool
9251 rs6000_mode_dependent_address (const_rtx addr)
9252 {
9253 switch (GET_CODE (addr))
9254 {
9255 case PLUS:
9256 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9257 is considered a legitimate address before reload, so there
9258 are no offset restrictions in that case. Note that this
9259 condition is safe in strict mode because any address involving
9260 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9261 been rejected as illegitimate. */
9262 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9263 && XEXP (addr, 0) != arg_pointer_rtx
9264 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9265 {
9266 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9267 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9268 }
9269 break;
9270
9271 case LO_SUM:
9272 /* Anything in the constant pool is sufficiently aligned that
9273 all bytes have the same high part address. */
9274 return !legitimate_constant_pool_address_p (addr, QImode, false);
9275
9276 /* Auto-increment cases are now treated generically in recog.c. */
9277 case PRE_MODIFY:
9278 return TARGET_UPDATE;
9279
9280 /* AND is only allowed in Altivec loads. */
9281 case AND:
9282 return true;
9283
9284 default:
9285 break;
9286 }
9287
9288 return false;
9289 }
9290
9291 /* Debug version of rs6000_mode_dependent_address. */
9292 static bool
9293 rs6000_debug_mode_dependent_address (const_rtx addr)
9294 {
9295 bool ret = rs6000_mode_dependent_address (addr);
9296
9297 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9298 ret ? "true" : "false");
9299 debug_rtx (addr);
9300
9301 return ret;
9302 }
9303
9304 /* Implement FIND_BASE_TERM. */
9305
9306 rtx
9307 rs6000_find_base_term (rtx op)
9308 {
9309 rtx base;
9310
9311 base = op;
9312 if (GET_CODE (base) == CONST)
9313 base = XEXP (base, 0);
9314 if (GET_CODE (base) == PLUS)
9315 base = XEXP (base, 0);
9316 if (GET_CODE (base) == UNSPEC)
9317 switch (XINT (base, 1))
9318 {
9319 case UNSPEC_TOCREL:
9320 case UNSPEC_MACHOPIC_OFFSET:
9321 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9322 for aliasing purposes. */
9323 return XVECEXP (base, 0, 0);
9324 }
9325
9326 return op;
9327 }
9328
9329 /* More elaborate version of recog's offsettable_memref_p predicate
9330 that works around the ??? note of rs6000_mode_dependent_address.
9331 In particular it accepts
9332
9333 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9334
9335 in 32-bit mode, that the recog predicate rejects. */
9336
9337 static bool
9338 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9339 {
9340 bool worst_case;
9341
9342 if (!MEM_P (op))
9343 return false;
9344
9345 /* First mimic offsettable_memref_p. */
9346 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9347 return true;
9348
9349 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9350 the latter predicate knows nothing about the mode of the memory
9351 reference and, therefore, assumes that it is the largest supported
9352 mode (TFmode). As a consequence, legitimate offsettable memory
9353 references are rejected. rs6000_legitimate_offset_address_p contains
9354 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9355 at least with a little bit of help here given that we know the
9356 actual registers used. */
9357 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9358 || GET_MODE_SIZE (reg_mode) == 4);
9359 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9360 strict, worst_case);
9361 }
9362
9363 /* Determine the reassociation width to be used in reassociate_bb.
9364 This takes into account how many parallel operations we
9365 can actually do of a given type, and also the latency.
9366 P8:
9367 int add/sub 6/cycle
9368 mul 2/cycle
9369 vect add/sub/mul 2/cycle
9370 fp add/sub/mul 2/cycle
9371 dfp 1/cycle
9372 */
9373
9374 static int
9375 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9376 machine_mode mode)
9377 {
9378 switch (rs6000_tune)
9379 {
9380 case PROCESSOR_POWER8:
9381 case PROCESSOR_POWER9:
9382 if (DECIMAL_FLOAT_MODE_P (mode))
9383 return 1;
9384 if (VECTOR_MODE_P (mode))
9385 return 4;
9386 if (INTEGRAL_MODE_P (mode))
9387 return 1;
9388 if (FLOAT_MODE_P (mode))
9389 return 4;
9390 break;
9391 default:
9392 break;
9393 }
9394 return 1;
9395 }
9396
9397 /* Change register usage conditional on target flags. */
9398 static void
9399 rs6000_conditional_register_usage (void)
9400 {
9401 int i;
9402
9403 if (TARGET_DEBUG_TARGET)
9404 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9405
9406 /* Set MQ register fixed (already call_used) so that it will not be
9407 allocated. */
9408 fixed_regs[64] = 1;
9409
9410 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9411 if (TARGET_64BIT)
9412 fixed_regs[13] = call_used_regs[13]
9413 = call_really_used_regs[13] = 1;
9414
9415 /* Conditionally disable FPRs. */
9416 if (TARGET_SOFT_FLOAT)
9417 for (i = 32; i < 64; i++)
9418 fixed_regs[i] = call_used_regs[i]
9419 = call_really_used_regs[i] = 1;
9420
9421 /* The TOC register is not killed across calls in a way that is
9422 visible to the compiler. */
9423 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9424 call_really_used_regs[2] = 0;
9425
9426 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9427 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9428
9429 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9430 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9431 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9432 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9433
9434 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9435 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9436 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9437 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9438
9439 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9440 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9441 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9442
9443 if (!TARGET_ALTIVEC && !TARGET_VSX)
9444 {
9445 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9446 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9447 call_really_used_regs[VRSAVE_REGNO] = 1;
9448 }
9449
9450 if (TARGET_ALTIVEC || TARGET_VSX)
9451 global_regs[VSCR_REGNO] = 1;
9452
9453 if (TARGET_ALTIVEC_ABI)
9454 {
9455 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9456 call_used_regs[i] = call_really_used_regs[i] = 1;
9457
9458 /* AIX reserves VR20:31 in non-extended ABI mode. */
9459 if (TARGET_XCOFF)
9460 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9461 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9462 }
9463 }
9464
9465 \f
9466 /* Output insns to set DEST equal to the constant SOURCE as a series of
9467 lis, ori and shl instructions and return TRUE. */
9468
9469 bool
9470 rs6000_emit_set_const (rtx dest, rtx source)
9471 {
9472 machine_mode mode = GET_MODE (dest);
9473 rtx temp, set;
9474 rtx_insn *insn;
9475 HOST_WIDE_INT c;
9476
9477 gcc_checking_assert (CONST_INT_P (source));
9478 c = INTVAL (source);
9479 switch (mode)
9480 {
9481 case E_QImode:
9482 case E_HImode:
9483 emit_insn (gen_rtx_SET (dest, source));
9484 return true;
9485
9486 case E_SImode:
9487 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9488
9489 emit_insn (gen_rtx_SET (copy_rtx (temp),
9490 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9491 emit_insn (gen_rtx_SET (dest,
9492 gen_rtx_IOR (SImode, copy_rtx (temp),
9493 GEN_INT (c & 0xffff))));
9494 break;
9495
9496 case E_DImode:
9497 if (!TARGET_POWERPC64)
9498 {
9499 rtx hi, lo;
9500
9501 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9502 DImode);
9503 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9504 DImode);
9505 emit_move_insn (hi, GEN_INT (c >> 32));
9506 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9507 emit_move_insn (lo, GEN_INT (c));
9508 }
9509 else
9510 rs6000_emit_set_long_const (dest, c);
9511 break;
9512
9513 default:
9514 gcc_unreachable ();
9515 }
9516
9517 insn = get_last_insn ();
9518 set = single_set (insn);
9519 if (! CONSTANT_P (SET_SRC (set)))
9520 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9521
9522 return true;
9523 }
9524
9525 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9526 Output insns to set DEST equal to the constant C as a series of
9527 lis, ori and shl instructions. */
9528
9529 static void
9530 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9531 {
9532 rtx temp;
9533 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9534
9535 ud1 = c & 0xffff;
9536 c = c >> 16;
9537 ud2 = c & 0xffff;
9538 c = c >> 16;
9539 ud3 = c & 0xffff;
9540 c = c >> 16;
9541 ud4 = c & 0xffff;
9542
9543 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9544 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9545 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9546
9547 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9548 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9549 {
9550 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9551
9552 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9553 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9554 if (ud1 != 0)
9555 emit_move_insn (dest,
9556 gen_rtx_IOR (DImode, copy_rtx (temp),
9557 GEN_INT (ud1)));
9558 }
9559 else if (ud3 == 0 && ud4 == 0)
9560 {
9561 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9562
9563 gcc_assert (ud2 & 0x8000);
9564 emit_move_insn (copy_rtx (temp),
9565 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9566 if (ud1 != 0)
9567 emit_move_insn (copy_rtx (temp),
9568 gen_rtx_IOR (DImode, copy_rtx (temp),
9569 GEN_INT (ud1)));
9570 emit_move_insn (dest,
9571 gen_rtx_ZERO_EXTEND (DImode,
9572 gen_lowpart (SImode,
9573 copy_rtx (temp))));
9574 }
9575 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9576 || (ud4 == 0 && ! (ud3 & 0x8000)))
9577 {
9578 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9579
9580 emit_move_insn (copy_rtx (temp),
9581 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9582 if (ud2 != 0)
9583 emit_move_insn (copy_rtx (temp),
9584 gen_rtx_IOR (DImode, copy_rtx (temp),
9585 GEN_INT (ud2)));
9586 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9587 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9588 GEN_INT (16)));
9589 if (ud1 != 0)
9590 emit_move_insn (dest,
9591 gen_rtx_IOR (DImode, copy_rtx (temp),
9592 GEN_INT (ud1)));
9593 }
9594 else
9595 {
9596 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9597
9598 emit_move_insn (copy_rtx (temp),
9599 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9600 if (ud3 != 0)
9601 emit_move_insn (copy_rtx (temp),
9602 gen_rtx_IOR (DImode, copy_rtx (temp),
9603 GEN_INT (ud3)));
9604
9605 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9606 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9607 GEN_INT (32)));
9608 if (ud2 != 0)
9609 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9610 gen_rtx_IOR (DImode, copy_rtx (temp),
9611 GEN_INT (ud2 << 16)));
9612 if (ud1 != 0)
9613 emit_move_insn (dest,
9614 gen_rtx_IOR (DImode, copy_rtx (temp),
9615 GEN_INT (ud1)));
9616 }
9617 }
9618
9619 /* Helper for the following. Get rid of [r+r] memory refs
9620 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9621
9622 static void
9623 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9624 {
9625 if (GET_CODE (operands[0]) == MEM
9626 && GET_CODE (XEXP (operands[0], 0)) != REG
9627 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9628 GET_MODE (operands[0]), false))
9629 operands[0]
9630 = replace_equiv_address (operands[0],
9631 copy_addr_to_reg (XEXP (operands[0], 0)));
9632
9633 if (GET_CODE (operands[1]) == MEM
9634 && GET_CODE (XEXP (operands[1], 0)) != REG
9635 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9636 GET_MODE (operands[1]), false))
9637 operands[1]
9638 = replace_equiv_address (operands[1],
9639 copy_addr_to_reg (XEXP (operands[1], 0)));
9640 }
9641
9642 /* Generate a vector of constants to permute MODE for a little-endian
9643 storage operation by swapping the two halves of a vector. */
9644 static rtvec
9645 rs6000_const_vec (machine_mode mode)
9646 {
9647 int i, subparts;
9648 rtvec v;
9649
9650 switch (mode)
9651 {
9652 case E_V1TImode:
9653 subparts = 1;
9654 break;
9655 case E_V2DFmode:
9656 case E_V2DImode:
9657 subparts = 2;
9658 break;
9659 case E_V4SFmode:
9660 case E_V4SImode:
9661 subparts = 4;
9662 break;
9663 case E_V8HImode:
9664 subparts = 8;
9665 break;
9666 case E_V16QImode:
9667 subparts = 16;
9668 break;
9669 default:
9670 gcc_unreachable();
9671 }
9672
9673 v = rtvec_alloc (subparts);
9674
9675 for (i = 0; i < subparts / 2; ++i)
9676 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9677 for (i = subparts / 2; i < subparts; ++i)
9678 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9679
9680 return v;
9681 }
9682
9683 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9684 store operation. */
9685 void
9686 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9687 {
9688 /* Scalar permutations are easier to express in integer modes rather than
9689 floating-point modes, so cast them here. We use V1TImode instead
9690 of TImode to ensure that the values don't go through GPRs. */
9691 if (FLOAT128_VECTOR_P (mode))
9692 {
9693 dest = gen_lowpart (V1TImode, dest);
9694 source = gen_lowpart (V1TImode, source);
9695 mode = V1TImode;
9696 }
9697
9698 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9699 scalar. */
9700 if (mode == TImode || mode == V1TImode)
9701 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9702 GEN_INT (64))));
9703 else
9704 {
9705 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9706 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9707 }
9708 }
9709
9710 /* Emit a little-endian load from vector memory location SOURCE to VSX
9711 register DEST in mode MODE. The load is done with two permuting
9712 insn's that represent an lxvd2x and xxpermdi. */
9713 void
9714 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9715 {
9716 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9717 V1TImode). */
9718 if (mode == TImode || mode == V1TImode)
9719 {
9720 mode = V2DImode;
9721 dest = gen_lowpart (V2DImode, dest);
9722 source = adjust_address (source, V2DImode, 0);
9723 }
9724
9725 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9726 rs6000_emit_le_vsx_permute (tmp, source, mode);
9727 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9728 }
9729
9730 /* Emit a little-endian store to vector memory location DEST from VSX
9731 register SOURCE in mode MODE. The store is done with two permuting
9732 insn's that represent an xxpermdi and an stxvd2x. */
9733 void
9734 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9735 {
9736 /* This should never be called during or after LRA, because it does
9737 not re-permute the source register. It is intended only for use
9738 during expand. */
9739 gcc_assert (!lra_in_progress && !reload_completed);
9740
9741 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9742 V1TImode). */
9743 if (mode == TImode || mode == V1TImode)
9744 {
9745 mode = V2DImode;
9746 dest = adjust_address (dest, V2DImode, 0);
9747 source = gen_lowpart (V2DImode, source);
9748 }
9749
9750 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9751 rs6000_emit_le_vsx_permute (tmp, source, mode);
9752 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9753 }
9754
9755 /* Emit a sequence representing a little-endian VSX load or store,
9756 moving data from SOURCE to DEST in mode MODE. This is done
9757 separately from rs6000_emit_move to ensure it is called only
9758 during expand. LE VSX loads and stores introduced later are
9759 handled with a split. The expand-time RTL generation allows
9760 us to optimize away redundant pairs of register-permutes. */
9761 void
9762 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9763 {
9764 gcc_assert (!BYTES_BIG_ENDIAN
9765 && VECTOR_MEM_VSX_P (mode)
9766 && !TARGET_P9_VECTOR
9767 && !gpr_or_gpr_p (dest, source)
9768 && (MEM_P (source) ^ MEM_P (dest)));
9769
9770 if (MEM_P (source))
9771 {
9772 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9773 rs6000_emit_le_vsx_load (dest, source, mode);
9774 }
9775 else
9776 {
9777 if (!REG_P (source))
9778 source = force_reg (mode, source);
9779 rs6000_emit_le_vsx_store (dest, source, mode);
9780 }
9781 }
9782
9783 /* Return whether a SFmode or SImode move can be done without converting one
9784 mode to another. This arrises when we have:
9785
9786 (SUBREG:SF (REG:SI ...))
9787 (SUBREG:SI (REG:SF ...))
9788
9789 and one of the values is in a floating point/vector register, where SFmode
9790 scalars are stored in DFmode format. */
9791
9792 bool
9793 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9794 {
9795 if (TARGET_ALLOW_SF_SUBREG)
9796 return true;
9797
9798 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9799 return true;
9800
9801 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9802 return true;
9803
9804 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9805 if (SUBREG_P (dest))
9806 {
9807 rtx dest_subreg = SUBREG_REG (dest);
9808 rtx src_subreg = SUBREG_REG (src);
9809 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9810 }
9811
9812 return false;
9813 }
9814
9815
9816 /* Helper function to change moves with:
9817
9818 (SUBREG:SF (REG:SI)) and
9819 (SUBREG:SI (REG:SF))
9820
9821 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9822 values are stored as DFmode values in the VSX registers. We need to convert
9823 the bits before we can use a direct move or operate on the bits in the
9824 vector register as an integer type.
9825
9826 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9827
9828 static bool
9829 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9830 {
9831 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
9832 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9833 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9834 {
9835 rtx inner_source = SUBREG_REG (source);
9836 machine_mode inner_mode = GET_MODE (inner_source);
9837
9838 if (mode == SImode && inner_mode == SFmode)
9839 {
9840 emit_insn (gen_movsi_from_sf (dest, inner_source));
9841 return true;
9842 }
9843
9844 if (mode == SFmode && inner_mode == SImode)
9845 {
9846 emit_insn (gen_movsf_from_si (dest, inner_source));
9847 return true;
9848 }
9849 }
9850
9851 return false;
9852 }
9853
9854 /* Emit a move from SOURCE to DEST in mode MODE. */
9855 void
9856 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9857 {
9858 rtx operands[2];
9859 operands[0] = dest;
9860 operands[1] = source;
9861
9862 if (TARGET_DEBUG_ADDR)
9863 {
9864 fprintf (stderr,
9865 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9866 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9867 GET_MODE_NAME (mode),
9868 lra_in_progress,
9869 reload_completed,
9870 can_create_pseudo_p ());
9871 debug_rtx (dest);
9872 fprintf (stderr, "source:\n");
9873 debug_rtx (source);
9874 }
9875
9876 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
9877 if (CONST_WIDE_INT_P (operands[1])
9878 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9879 {
9880 /* This should be fixed with the introduction of CONST_WIDE_INT. */
9881 gcc_unreachable ();
9882 }
9883
9884 #ifdef HAVE_AS_GNU_ATTRIBUTE
9885 /* If we use a long double type, set the flags in .gnu_attribute that say
9886 what the long double type is. This is to allow the linker's warning
9887 message for the wrong long double to be useful, even if the function does
9888 not do a call (for example, doing a 128-bit add on power9 if the long
9889 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9890 used if they aren't the default long dobule type. */
9891 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9892 {
9893 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9894 rs6000_passes_float = rs6000_passes_long_double = true;
9895
9896 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9897 rs6000_passes_float = rs6000_passes_long_double = true;
9898 }
9899 #endif
9900
9901 /* See if we need to special case SImode/SFmode SUBREG moves. */
9902 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9903 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9904 return;
9905
9906 /* Check if GCC is setting up a block move that will end up using FP
9907 registers as temporaries. We must make sure this is acceptable. */
9908 if (GET_CODE (operands[0]) == MEM
9909 && GET_CODE (operands[1]) == MEM
9910 && mode == DImode
9911 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9912 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9913 && ! (rs6000_slow_unaligned_access (SImode,
9914 (MEM_ALIGN (operands[0]) > 32
9915 ? 32 : MEM_ALIGN (operands[0])))
9916 || rs6000_slow_unaligned_access (SImode,
9917 (MEM_ALIGN (operands[1]) > 32
9918 ? 32 : MEM_ALIGN (operands[1]))))
9919 && ! MEM_VOLATILE_P (operands [0])
9920 && ! MEM_VOLATILE_P (operands [1]))
9921 {
9922 emit_move_insn (adjust_address (operands[0], SImode, 0),
9923 adjust_address (operands[1], SImode, 0));
9924 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9925 adjust_address (copy_rtx (operands[1]), SImode, 4));
9926 return;
9927 }
9928
9929 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9930 && !gpc_reg_operand (operands[1], mode))
9931 operands[1] = force_reg (mode, operands[1]);
9932
9933 /* Recognize the case where operand[1] is a reference to thread-local
9934 data and load its address to a register. */
9935 if (tls_referenced_p (operands[1]))
9936 {
9937 enum tls_model model;
9938 rtx tmp = operands[1];
9939 rtx addend = NULL;
9940
9941 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9942 {
9943 addend = XEXP (XEXP (tmp, 0), 1);
9944 tmp = XEXP (XEXP (tmp, 0), 0);
9945 }
9946
9947 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
9948 model = SYMBOL_REF_TLS_MODEL (tmp);
9949 gcc_assert (model != 0);
9950
9951 tmp = rs6000_legitimize_tls_address (tmp, model);
9952 if (addend)
9953 {
9954 tmp = gen_rtx_PLUS (mode, tmp, addend);
9955 tmp = force_operand (tmp, operands[0]);
9956 }
9957 operands[1] = tmp;
9958 }
9959
9960 /* 128-bit constant floating-point values on Darwin should really be loaded
9961 as two parts. However, this premature splitting is a problem when DFmode
9962 values can go into Altivec registers. */
9963 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
9964 && GET_CODE (operands[1]) == CONST_DOUBLE)
9965 {
9966 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9967 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9968 DFmode);
9969 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9970 GET_MODE_SIZE (DFmode)),
9971 simplify_gen_subreg (DFmode, operands[1], mode,
9972 GET_MODE_SIZE (DFmode)),
9973 DFmode);
9974 return;
9975 }
9976
9977 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9978 p1:SD) if p1 is not of floating point class and p0 is spilled as
9979 we can have no analogous movsd_store for this. */
9980 if (lra_in_progress && mode == DDmode
9981 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9982 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9983 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
9984 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9985 {
9986 enum reg_class cl;
9987 int regno = REGNO (SUBREG_REG (operands[1]));
9988
9989 if (regno >= FIRST_PSEUDO_REGISTER)
9990 {
9991 cl = reg_preferred_class (regno);
9992 regno = reg_renumber[regno];
9993 if (regno < 0)
9994 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9995 }
9996 if (regno >= 0 && ! FP_REGNO_P (regno))
9997 {
9998 mode = SDmode;
9999 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10000 operands[1] = SUBREG_REG (operands[1]);
10001 }
10002 }
10003 if (lra_in_progress
10004 && mode == SDmode
10005 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10006 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10007 && (REG_P (operands[1])
10008 || (GET_CODE (operands[1]) == SUBREG
10009 && REG_P (SUBREG_REG (operands[1])))))
10010 {
10011 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10012 ? SUBREG_REG (operands[1]) : operands[1]);
10013 enum reg_class cl;
10014
10015 if (regno >= FIRST_PSEUDO_REGISTER)
10016 {
10017 cl = reg_preferred_class (regno);
10018 gcc_assert (cl != NO_REGS);
10019 regno = reg_renumber[regno];
10020 if (regno < 0)
10021 regno = ira_class_hard_regs[cl][0];
10022 }
10023 if (FP_REGNO_P (regno))
10024 {
10025 if (GET_MODE (operands[0]) != DDmode)
10026 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10027 emit_insn (gen_movsd_store (operands[0], operands[1]));
10028 }
10029 else if (INT_REGNO_P (regno))
10030 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10031 else
10032 gcc_unreachable();
10033 return;
10034 }
10035 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10036 p:DD)) if p0 is not of floating point class and p1 is spilled as
10037 we can have no analogous movsd_load for this. */
10038 if (lra_in_progress && mode == DDmode
10039 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10040 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10041 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10042 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10043 {
10044 enum reg_class cl;
10045 int regno = REGNO (SUBREG_REG (operands[0]));
10046
10047 if (regno >= FIRST_PSEUDO_REGISTER)
10048 {
10049 cl = reg_preferred_class (regno);
10050 regno = reg_renumber[regno];
10051 if (regno < 0)
10052 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10053 }
10054 if (regno >= 0 && ! FP_REGNO_P (regno))
10055 {
10056 mode = SDmode;
10057 operands[0] = SUBREG_REG (operands[0]);
10058 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10059 }
10060 }
10061 if (lra_in_progress
10062 && mode == SDmode
10063 && (REG_P (operands[0])
10064 || (GET_CODE (operands[0]) == SUBREG
10065 && REG_P (SUBREG_REG (operands[0]))))
10066 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10067 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10068 {
10069 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10070 ? SUBREG_REG (operands[0]) : operands[0]);
10071 enum reg_class cl;
10072
10073 if (regno >= FIRST_PSEUDO_REGISTER)
10074 {
10075 cl = reg_preferred_class (regno);
10076 gcc_assert (cl != NO_REGS);
10077 regno = reg_renumber[regno];
10078 if (regno < 0)
10079 regno = ira_class_hard_regs[cl][0];
10080 }
10081 if (FP_REGNO_P (regno))
10082 {
10083 if (GET_MODE (operands[1]) != DDmode)
10084 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10085 emit_insn (gen_movsd_load (operands[0], operands[1]));
10086 }
10087 else if (INT_REGNO_P (regno))
10088 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10089 else
10090 gcc_unreachable();
10091 return;
10092 }
10093
10094 /* FIXME: In the long term, this switch statement should go away
10095 and be replaced by a sequence of tests based on things like
10096 mode == Pmode. */
10097 switch (mode)
10098 {
10099 case E_HImode:
10100 case E_QImode:
10101 if (CONSTANT_P (operands[1])
10102 && GET_CODE (operands[1]) != CONST_INT)
10103 operands[1] = force_const_mem (mode, operands[1]);
10104 break;
10105
10106 case E_TFmode:
10107 case E_TDmode:
10108 case E_IFmode:
10109 case E_KFmode:
10110 if (FLOAT128_2REG_P (mode))
10111 rs6000_eliminate_indexed_memrefs (operands);
10112 /* fall through */
10113
10114 case E_DFmode:
10115 case E_DDmode:
10116 case E_SFmode:
10117 case E_SDmode:
10118 if (CONSTANT_P (operands[1])
10119 && ! easy_fp_constant (operands[1], mode))
10120 operands[1] = force_const_mem (mode, operands[1]);
10121 break;
10122
10123 case E_V16QImode:
10124 case E_V8HImode:
10125 case E_V4SFmode:
10126 case E_V4SImode:
10127 case E_V2DFmode:
10128 case E_V2DImode:
10129 case E_V1TImode:
10130 if (CONSTANT_P (operands[1])
10131 && !easy_vector_constant (operands[1], mode))
10132 operands[1] = force_const_mem (mode, operands[1]);
10133 break;
10134
10135 case E_SImode:
10136 case E_DImode:
10137 /* Use default pattern for address of ELF small data */
10138 if (TARGET_ELF
10139 && mode == Pmode
10140 && DEFAULT_ABI == ABI_V4
10141 && (GET_CODE (operands[1]) == SYMBOL_REF
10142 || GET_CODE (operands[1]) == CONST)
10143 && small_data_operand (operands[1], mode))
10144 {
10145 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10146 return;
10147 }
10148
10149 if (DEFAULT_ABI == ABI_V4
10150 && mode == Pmode && mode == SImode
10151 && flag_pic == 1 && got_operand (operands[1], mode))
10152 {
10153 emit_insn (gen_movsi_got (operands[0], operands[1]));
10154 return;
10155 }
10156
10157 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10158 && TARGET_NO_TOC
10159 && ! flag_pic
10160 && mode == Pmode
10161 && CONSTANT_P (operands[1])
10162 && GET_CODE (operands[1]) != HIGH
10163 && GET_CODE (operands[1]) != CONST_INT)
10164 {
10165 rtx target = (!can_create_pseudo_p ()
10166 ? operands[0]
10167 : gen_reg_rtx (mode));
10168
10169 /* If this is a function address on -mcall-aixdesc,
10170 convert it to the address of the descriptor. */
10171 if (DEFAULT_ABI == ABI_AIX
10172 && GET_CODE (operands[1]) == SYMBOL_REF
10173 && XSTR (operands[1], 0)[0] == '.')
10174 {
10175 const char *name = XSTR (operands[1], 0);
10176 rtx new_ref;
10177 while (*name == '.')
10178 name++;
10179 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10180 CONSTANT_POOL_ADDRESS_P (new_ref)
10181 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10182 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10183 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10184 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10185 operands[1] = new_ref;
10186 }
10187
10188 if (DEFAULT_ABI == ABI_DARWIN)
10189 {
10190 #if TARGET_MACHO
10191 if (MACHO_DYNAMIC_NO_PIC_P)
10192 {
10193 /* Take care of any required data indirection. */
10194 operands[1] = rs6000_machopic_legitimize_pic_address (
10195 operands[1], mode, operands[0]);
10196 if (operands[0] != operands[1])
10197 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10198 return;
10199 }
10200 #endif
10201 emit_insn (gen_macho_high (target, operands[1]));
10202 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10203 return;
10204 }
10205
10206 emit_insn (gen_elf_high (target, operands[1]));
10207 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10208 return;
10209 }
10210
10211 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10212 and we have put it in the TOC, we just need to make a TOC-relative
10213 reference to it. */
10214 if (TARGET_TOC
10215 && GET_CODE (operands[1]) == SYMBOL_REF
10216 && use_toc_relative_ref (operands[1], mode))
10217 operands[1] = create_TOC_reference (operands[1], operands[0]);
10218 else if (mode == Pmode
10219 && CONSTANT_P (operands[1])
10220 && GET_CODE (operands[1]) != HIGH
10221 && ((GET_CODE (operands[1]) != CONST_INT
10222 && ! easy_fp_constant (operands[1], mode))
10223 || (GET_CODE (operands[1]) == CONST_INT
10224 && (num_insns_constant (operands[1], mode)
10225 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10226 || (GET_CODE (operands[0]) == REG
10227 && FP_REGNO_P (REGNO (operands[0]))))
10228 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10229 && (TARGET_CMODEL == CMODEL_SMALL
10230 || can_create_pseudo_p ()
10231 || (REG_P (operands[0])
10232 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10233 {
10234
10235 #if TARGET_MACHO
10236 /* Darwin uses a special PIC legitimizer. */
10237 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10238 {
10239 operands[1] =
10240 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10241 operands[0]);
10242 if (operands[0] != operands[1])
10243 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10244 return;
10245 }
10246 #endif
10247
10248 /* If we are to limit the number of things we put in the TOC and
10249 this is a symbol plus a constant we can add in one insn,
10250 just put the symbol in the TOC and add the constant. */
10251 if (GET_CODE (operands[1]) == CONST
10252 && TARGET_NO_SUM_IN_TOC
10253 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10254 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10255 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10256 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10257 && ! side_effects_p (operands[0]))
10258 {
10259 rtx sym =
10260 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10261 rtx other = XEXP (XEXP (operands[1], 0), 1);
10262
10263 sym = force_reg (mode, sym);
10264 emit_insn (gen_add3_insn (operands[0], sym, other));
10265 return;
10266 }
10267
10268 operands[1] = force_const_mem (mode, operands[1]);
10269
10270 if (TARGET_TOC
10271 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10272 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10273 {
10274 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10275 operands[0]);
10276 operands[1] = gen_const_mem (mode, tocref);
10277 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10278 }
10279 }
10280 break;
10281
10282 case E_TImode:
10283 if (!VECTOR_MEM_VSX_P (TImode))
10284 rs6000_eliminate_indexed_memrefs (operands);
10285 break;
10286
10287 case E_PTImode:
10288 rs6000_eliminate_indexed_memrefs (operands);
10289 break;
10290
10291 default:
10292 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10293 }
10294
10295 /* Above, we may have called force_const_mem which may have returned
10296 an invalid address. If we can, fix this up; otherwise, reload will
10297 have to deal with it. */
10298 if (GET_CODE (operands[1]) == MEM)
10299 operands[1] = validize_mem (operands[1]);
10300
10301 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10302 }
10303 \f
10304 /* Nonzero if we can use a floating-point register to pass this arg. */
10305 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10306 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10307 && (CUM)->fregno <= FP_ARG_MAX_REG \
10308 && TARGET_HARD_FLOAT)
10309
10310 /* Nonzero if we can use an AltiVec register to pass this arg. */
10311 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10312 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10313 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10314 && TARGET_ALTIVEC_ABI \
10315 && (NAMED))
10316
10317 /* Walk down the type tree of TYPE counting consecutive base elements.
10318 If *MODEP is VOIDmode, then set it to the first valid floating point
10319 or vector type. If a non-floating point or vector type is found, or
10320 if a floating point or vector type that doesn't match a non-VOIDmode
10321 *MODEP is found, then return -1, otherwise return the count in the
10322 sub-tree. */
10323
10324 static int
10325 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10326 {
10327 machine_mode mode;
10328 HOST_WIDE_INT size;
10329
10330 switch (TREE_CODE (type))
10331 {
10332 case REAL_TYPE:
10333 mode = TYPE_MODE (type);
10334 if (!SCALAR_FLOAT_MODE_P (mode))
10335 return -1;
10336
10337 if (*modep == VOIDmode)
10338 *modep = mode;
10339
10340 if (*modep == mode)
10341 return 1;
10342
10343 break;
10344
10345 case COMPLEX_TYPE:
10346 mode = TYPE_MODE (TREE_TYPE (type));
10347 if (!SCALAR_FLOAT_MODE_P (mode))
10348 return -1;
10349
10350 if (*modep == VOIDmode)
10351 *modep = mode;
10352
10353 if (*modep == mode)
10354 return 2;
10355
10356 break;
10357
10358 case VECTOR_TYPE:
10359 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10360 return -1;
10361
10362 /* Use V4SImode as representative of all 128-bit vector types. */
10363 size = int_size_in_bytes (type);
10364 switch (size)
10365 {
10366 case 16:
10367 mode = V4SImode;
10368 break;
10369 default:
10370 return -1;
10371 }
10372
10373 if (*modep == VOIDmode)
10374 *modep = mode;
10375
10376 /* Vector modes are considered to be opaque: two vectors are
10377 equivalent for the purposes of being homogeneous aggregates
10378 if they are the same size. */
10379 if (*modep == mode)
10380 return 1;
10381
10382 break;
10383
10384 case ARRAY_TYPE:
10385 {
10386 int count;
10387 tree index = TYPE_DOMAIN (type);
10388
10389 /* Can't handle incomplete types nor sizes that are not
10390 fixed. */
10391 if (!COMPLETE_TYPE_P (type)
10392 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10393 return -1;
10394
10395 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10396 if (count == -1
10397 || !index
10398 || !TYPE_MAX_VALUE (index)
10399 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10400 || !TYPE_MIN_VALUE (index)
10401 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10402 || count < 0)
10403 return -1;
10404
10405 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10406 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10407
10408 /* There must be no padding. */
10409 if (wi::to_wide (TYPE_SIZE (type))
10410 != count * GET_MODE_BITSIZE (*modep))
10411 return -1;
10412
10413 return count;
10414 }
10415
10416 case RECORD_TYPE:
10417 {
10418 int count = 0;
10419 int sub_count;
10420 tree field;
10421
10422 /* Can't handle incomplete types nor sizes that are not
10423 fixed. */
10424 if (!COMPLETE_TYPE_P (type)
10425 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10426 return -1;
10427
10428 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10429 {
10430 if (TREE_CODE (field) != FIELD_DECL)
10431 continue;
10432
10433 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10434 if (sub_count < 0)
10435 return -1;
10436 count += sub_count;
10437 }
10438
10439 /* There must be no padding. */
10440 if (wi::to_wide (TYPE_SIZE (type))
10441 != count * GET_MODE_BITSIZE (*modep))
10442 return -1;
10443
10444 return count;
10445 }
10446
10447 case UNION_TYPE:
10448 case QUAL_UNION_TYPE:
10449 {
10450 /* These aren't very interesting except in a degenerate case. */
10451 int count = 0;
10452 int sub_count;
10453 tree field;
10454
10455 /* Can't handle incomplete types nor sizes that are not
10456 fixed. */
10457 if (!COMPLETE_TYPE_P (type)
10458 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10459 return -1;
10460
10461 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10462 {
10463 if (TREE_CODE (field) != FIELD_DECL)
10464 continue;
10465
10466 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10467 if (sub_count < 0)
10468 return -1;
10469 count = count > sub_count ? count : sub_count;
10470 }
10471
10472 /* There must be no padding. */
10473 if (wi::to_wide (TYPE_SIZE (type))
10474 != count * GET_MODE_BITSIZE (*modep))
10475 return -1;
10476
10477 return count;
10478 }
10479
10480 default:
10481 break;
10482 }
10483
10484 return -1;
10485 }
10486
10487 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10488 float or vector aggregate that shall be passed in FP/vector registers
10489 according to the ELFv2 ABI, return the homogeneous element mode in
10490 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10491
10492 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10493
10494 static bool
10495 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10496 machine_mode *elt_mode,
10497 int *n_elts)
10498 {
10499 /* Note that we do not accept complex types at the top level as
10500 homogeneous aggregates; these types are handled via the
10501 targetm.calls.split_complex_arg mechanism. Complex types
10502 can be elements of homogeneous aggregates, however. */
10503 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10504 && AGGREGATE_TYPE_P (type))
10505 {
10506 machine_mode field_mode = VOIDmode;
10507 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10508
10509 if (field_count > 0)
10510 {
10511 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10512 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10513
10514 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10515 up to AGGR_ARG_NUM_REG registers. */
10516 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10517 {
10518 if (elt_mode)
10519 *elt_mode = field_mode;
10520 if (n_elts)
10521 *n_elts = field_count;
10522 return true;
10523 }
10524 }
10525 }
10526
10527 if (elt_mode)
10528 *elt_mode = mode;
10529 if (n_elts)
10530 *n_elts = 1;
10531 return false;
10532 }
10533
10534 /* Return a nonzero value to say to return the function value in
10535 memory, just as large structures are always returned. TYPE will be
10536 the data type of the value, and FNTYPE will be the type of the
10537 function doing the returning, or @code{NULL} for libcalls.
10538
10539 The AIX ABI for the RS/6000 specifies that all structures are
10540 returned in memory. The Darwin ABI does the same.
10541
10542 For the Darwin 64 Bit ABI, a function result can be returned in
10543 registers or in memory, depending on the size of the return data
10544 type. If it is returned in registers, the value occupies the same
10545 registers as it would if it were the first and only function
10546 argument. Otherwise, the function places its result in memory at
10547 the location pointed to by GPR3.
10548
10549 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10550 but a draft put them in memory, and GCC used to implement the draft
10551 instead of the final standard. Therefore, aix_struct_return
10552 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10553 compatibility can change DRAFT_V4_STRUCT_RET to override the
10554 default, and -m switches get the final word. See
10555 rs6000_option_override_internal for more details.
10556
10557 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10558 long double support is enabled. These values are returned in memory.
10559
10560 int_size_in_bytes returns -1 for variable size objects, which go in
10561 memory always. The cast to unsigned makes -1 > 8. */
10562
10563 static bool
10564 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10565 {
10566 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10567 if (TARGET_MACHO
10568 && rs6000_darwin64_abi
10569 && TREE_CODE (type) == RECORD_TYPE
10570 && int_size_in_bytes (type) > 0)
10571 {
10572 CUMULATIVE_ARGS valcum;
10573 rtx valret;
10574
10575 valcum.words = 0;
10576 valcum.fregno = FP_ARG_MIN_REG;
10577 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10578 /* Do a trial code generation as if this were going to be passed
10579 as an argument; if any part goes in memory, we return NULL. */
10580 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10581 if (valret)
10582 return false;
10583 /* Otherwise fall through to more conventional ABI rules. */
10584 }
10585
10586 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10587 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10588 NULL, NULL))
10589 return false;
10590
10591 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10592 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10593 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10594 return false;
10595
10596 if (AGGREGATE_TYPE_P (type)
10597 && (aix_struct_return
10598 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10599 return true;
10600
10601 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10602 modes only exist for GCC vector types if -maltivec. */
10603 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10604 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10605 return false;
10606
10607 /* Return synthetic vectors in memory. */
10608 if (TREE_CODE (type) == VECTOR_TYPE
10609 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10610 {
10611 static bool warned_for_return_big_vectors = false;
10612 if (!warned_for_return_big_vectors)
10613 {
10614 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10615 "non-standard ABI extension with no compatibility "
10616 "guarantee");
10617 warned_for_return_big_vectors = true;
10618 }
10619 return true;
10620 }
10621
10622 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10623 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10624 return true;
10625
10626 return false;
10627 }
10628
10629 /* Specify whether values returned in registers should be at the most
10630 significant end of a register. We want aggregates returned by
10631 value to match the way aggregates are passed to functions. */
10632
10633 static bool
10634 rs6000_return_in_msb (const_tree valtype)
10635 {
10636 return (DEFAULT_ABI == ABI_ELFv2
10637 && BYTES_BIG_ENDIAN
10638 && AGGREGATE_TYPE_P (valtype)
10639 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10640 == PAD_UPWARD));
10641 }
10642
10643 #ifdef HAVE_AS_GNU_ATTRIBUTE
10644 /* Return TRUE if a call to function FNDECL may be one that
10645 potentially affects the function calling ABI of the object file. */
10646
10647 static bool
10648 call_ABI_of_interest (tree fndecl)
10649 {
10650 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10651 {
10652 struct cgraph_node *c_node;
10653
10654 /* Libcalls are always interesting. */
10655 if (fndecl == NULL_TREE)
10656 return true;
10657
10658 /* Any call to an external function is interesting. */
10659 if (DECL_EXTERNAL (fndecl))
10660 return true;
10661
10662 /* Interesting functions that we are emitting in this object file. */
10663 c_node = cgraph_node::get (fndecl);
10664 c_node = c_node->ultimate_alias_target ();
10665 return !c_node->only_called_directly_p ();
10666 }
10667 return false;
10668 }
10669 #endif
10670
10671 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10672 for a call to a function whose data type is FNTYPE.
10673 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10674
10675 For incoming args we set the number of arguments in the prototype large
10676 so we never return a PARALLEL. */
10677
10678 void
10679 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10680 rtx libname ATTRIBUTE_UNUSED, int incoming,
10681 int libcall, int n_named_args,
10682 tree fndecl ATTRIBUTE_UNUSED,
10683 machine_mode return_mode ATTRIBUTE_UNUSED)
10684 {
10685 static CUMULATIVE_ARGS zero_cumulative;
10686
10687 *cum = zero_cumulative;
10688 cum->words = 0;
10689 cum->fregno = FP_ARG_MIN_REG;
10690 cum->vregno = ALTIVEC_ARG_MIN_REG;
10691 cum->prototype = (fntype && prototype_p (fntype));
10692 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10693 ? CALL_LIBCALL : CALL_NORMAL);
10694 cum->sysv_gregno = GP_ARG_MIN_REG;
10695 cum->stdarg = stdarg_p (fntype);
10696 cum->libcall = libcall;
10697
10698 cum->nargs_prototype = 0;
10699 if (incoming || cum->prototype)
10700 cum->nargs_prototype = n_named_args;
10701
10702 /* Check for a longcall attribute. */
10703 if ((!fntype && rs6000_default_long_calls)
10704 || (fntype
10705 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10706 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10707 cum->call_cookie |= CALL_LONG;
10708
10709 if (TARGET_DEBUG_ARG)
10710 {
10711 fprintf (stderr, "\ninit_cumulative_args:");
10712 if (fntype)
10713 {
10714 tree ret_type = TREE_TYPE (fntype);
10715 fprintf (stderr, " ret code = %s,",
10716 get_tree_code_name (TREE_CODE (ret_type)));
10717 }
10718
10719 if (cum->call_cookie & CALL_LONG)
10720 fprintf (stderr, " longcall,");
10721
10722 fprintf (stderr, " proto = %d, nargs = %d\n",
10723 cum->prototype, cum->nargs_prototype);
10724 }
10725
10726 #ifdef HAVE_AS_GNU_ATTRIBUTE
10727 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10728 {
10729 cum->escapes = call_ABI_of_interest (fndecl);
10730 if (cum->escapes)
10731 {
10732 tree return_type;
10733
10734 if (fntype)
10735 {
10736 return_type = TREE_TYPE (fntype);
10737 return_mode = TYPE_MODE (return_type);
10738 }
10739 else
10740 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10741
10742 if (return_type != NULL)
10743 {
10744 if (TREE_CODE (return_type) == RECORD_TYPE
10745 && TYPE_TRANSPARENT_AGGR (return_type))
10746 {
10747 return_type = TREE_TYPE (first_field (return_type));
10748 return_mode = TYPE_MODE (return_type);
10749 }
10750 if (AGGREGATE_TYPE_P (return_type)
10751 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10752 <= 8))
10753 rs6000_returns_struct = true;
10754 }
10755 if (SCALAR_FLOAT_MODE_P (return_mode))
10756 {
10757 rs6000_passes_float = true;
10758 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10759 && (FLOAT128_IBM_P (return_mode)
10760 || FLOAT128_IEEE_P (return_mode)
10761 || (return_type != NULL
10762 && (TYPE_MAIN_VARIANT (return_type)
10763 == long_double_type_node))))
10764 rs6000_passes_long_double = true;
10765
10766 /* Note if we passed or return a IEEE 128-bit type. We changed
10767 the mangling for these types, and we may need to make an alias
10768 with the old mangling. */
10769 if (FLOAT128_IEEE_P (return_mode))
10770 rs6000_passes_ieee128 = true;
10771 }
10772 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10773 rs6000_passes_vector = true;
10774 }
10775 }
10776 #endif
10777
10778 if (fntype
10779 && !TARGET_ALTIVEC
10780 && TARGET_ALTIVEC_ABI
10781 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10782 {
10783 error ("cannot return value in vector register because"
10784 " altivec instructions are disabled, use %qs"
10785 " to enable them", "-maltivec");
10786 }
10787 }
10788 \f
10789 /* The mode the ABI uses for a word. This is not the same as word_mode
10790 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10791
10792 static scalar_int_mode
10793 rs6000_abi_word_mode (void)
10794 {
10795 return TARGET_32BIT ? SImode : DImode;
10796 }
10797
10798 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10799 static char *
10800 rs6000_offload_options (void)
10801 {
10802 if (TARGET_64BIT)
10803 return xstrdup ("-foffload-abi=lp64");
10804 else
10805 return xstrdup ("-foffload-abi=ilp32");
10806 }
10807
10808 /* On rs6000, function arguments are promoted, as are function return
10809 values. */
10810
10811 static machine_mode
10812 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10813 machine_mode mode,
10814 int *punsignedp ATTRIBUTE_UNUSED,
10815 const_tree, int)
10816 {
10817 PROMOTE_MODE (mode, *punsignedp, type);
10818
10819 return mode;
10820 }
10821
10822 /* Return true if TYPE must be passed on the stack and not in registers. */
10823
10824 static bool
10825 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10826 {
10827 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10828 return must_pass_in_stack_var_size (mode, type);
10829 else
10830 return must_pass_in_stack_var_size_or_pad (mode, type);
10831 }
10832
10833 static inline bool
10834 is_complex_IBM_long_double (machine_mode mode)
10835 {
10836 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10837 }
10838
10839 /* Whether ABI_V4 passes MODE args to a function in floating point
10840 registers. */
10841
10842 static bool
10843 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10844 {
10845 if (!TARGET_HARD_FLOAT)
10846 return false;
10847 if (mode == DFmode)
10848 return true;
10849 if (mode == SFmode && named)
10850 return true;
10851 /* ABI_V4 passes complex IBM long double in 8 gprs.
10852 Stupid, but we can't change the ABI now. */
10853 if (is_complex_IBM_long_double (mode))
10854 return false;
10855 if (FLOAT128_2REG_P (mode))
10856 return true;
10857 if (DECIMAL_FLOAT_MODE_P (mode))
10858 return true;
10859 return false;
10860 }
10861
10862 /* Implement TARGET_FUNCTION_ARG_PADDING.
10863
10864 For the AIX ABI structs are always stored left shifted in their
10865 argument slot. */
10866
10867 static pad_direction
10868 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10869 {
10870 #ifndef AGGREGATE_PADDING_FIXED
10871 #define AGGREGATE_PADDING_FIXED 0
10872 #endif
10873 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10874 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10875 #endif
10876
10877 if (!AGGREGATE_PADDING_FIXED)
10878 {
10879 /* GCC used to pass structures of the same size as integer types as
10880 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10881 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10882 passed padded downward, except that -mstrict-align further
10883 muddied the water in that multi-component structures of 2 and 4
10884 bytes in size were passed padded upward.
10885
10886 The following arranges for best compatibility with previous
10887 versions of gcc, but removes the -mstrict-align dependency. */
10888 if (BYTES_BIG_ENDIAN)
10889 {
10890 HOST_WIDE_INT size = 0;
10891
10892 if (mode == BLKmode)
10893 {
10894 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10895 size = int_size_in_bytes (type);
10896 }
10897 else
10898 size = GET_MODE_SIZE (mode);
10899
10900 if (size == 1 || size == 2 || size == 4)
10901 return PAD_DOWNWARD;
10902 }
10903 return PAD_UPWARD;
10904 }
10905
10906 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10907 {
10908 if (type != 0 && AGGREGATE_TYPE_P (type))
10909 return PAD_UPWARD;
10910 }
10911
10912 /* Fall back to the default. */
10913 return default_function_arg_padding (mode, type);
10914 }
10915
10916 /* If defined, a C expression that gives the alignment boundary, in bits,
10917 of an argument with the specified mode and type. If it is not defined,
10918 PARM_BOUNDARY is used for all arguments.
10919
10920 V.4 wants long longs and doubles to be double word aligned. Just
10921 testing the mode size is a boneheaded way to do this as it means
10922 that other types such as complex int are also double word aligned.
10923 However, we're stuck with this because changing the ABI might break
10924 existing library interfaces.
10925
10926 Quadword align Altivec/VSX vectors.
10927 Quadword align large synthetic vector types. */
10928
10929 static unsigned int
10930 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10931 {
10932 machine_mode elt_mode;
10933 int n_elts;
10934
10935 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10936
10937 if (DEFAULT_ABI == ABI_V4
10938 && (GET_MODE_SIZE (mode) == 8
10939 || (TARGET_HARD_FLOAT
10940 && !is_complex_IBM_long_double (mode)
10941 && FLOAT128_2REG_P (mode))))
10942 return 64;
10943 else if (FLOAT128_VECTOR_P (mode))
10944 return 128;
10945 else if (type && TREE_CODE (type) == VECTOR_TYPE
10946 && int_size_in_bytes (type) >= 8
10947 && int_size_in_bytes (type) < 16)
10948 return 64;
10949 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10950 || (type && TREE_CODE (type) == VECTOR_TYPE
10951 && int_size_in_bytes (type) >= 16))
10952 return 128;
10953
10954 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10955 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10956 -mcompat-align-parm is used. */
10957 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10958 || DEFAULT_ABI == ABI_ELFv2)
10959 && type && TYPE_ALIGN (type) > 64)
10960 {
10961 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10962 or homogeneous float/vector aggregates here. We already handled
10963 vector aggregates above, but still need to check for float here. */
10964 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10965 && !SCALAR_FLOAT_MODE_P (elt_mode));
10966
10967 /* We used to check for BLKmode instead of the above aggregate type
10968 check. Warn when this results in any difference to the ABI. */
10969 if (aggregate_p != (mode == BLKmode))
10970 {
10971 static bool warned;
10972 if (!warned && warn_psabi)
10973 {
10974 warned = true;
10975 inform (input_location,
10976 "the ABI of passing aggregates with %d-byte alignment"
10977 " has changed in GCC 5",
10978 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10979 }
10980 }
10981
10982 if (aggregate_p)
10983 return 128;
10984 }
10985
10986 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10987 implement the "aggregate type" check as a BLKmode check here; this
10988 means certain aggregate types are in fact not aligned. */
10989 if (TARGET_MACHO && rs6000_darwin64_abi
10990 && mode == BLKmode
10991 && type && TYPE_ALIGN (type) > 64)
10992 return 128;
10993
10994 return PARM_BOUNDARY;
10995 }
10996
10997 /* The offset in words to the start of the parameter save area. */
10998
10999 static unsigned int
11000 rs6000_parm_offset (void)
11001 {
11002 return (DEFAULT_ABI == ABI_V4 ? 2
11003 : DEFAULT_ABI == ABI_ELFv2 ? 4
11004 : 6);
11005 }
11006
11007 /* For a function parm of MODE and TYPE, return the starting word in
11008 the parameter area. NWORDS of the parameter area are already used. */
11009
11010 static unsigned int
11011 rs6000_parm_start (machine_mode mode, const_tree type,
11012 unsigned int nwords)
11013 {
11014 unsigned int align;
11015
11016 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11017 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11018 }
11019
11020 /* Compute the size (in words) of a function argument. */
11021
11022 static unsigned long
11023 rs6000_arg_size (machine_mode mode, const_tree type)
11024 {
11025 unsigned long size;
11026
11027 if (mode != BLKmode)
11028 size = GET_MODE_SIZE (mode);
11029 else
11030 size = int_size_in_bytes (type);
11031
11032 if (TARGET_32BIT)
11033 return (size + 3) >> 2;
11034 else
11035 return (size + 7) >> 3;
11036 }
11037 \f
11038 /* Use this to flush pending int fields. */
11039
11040 static void
11041 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11042 HOST_WIDE_INT bitpos, int final)
11043 {
11044 unsigned int startbit, endbit;
11045 int intregs, intoffset;
11046
11047 /* Handle the situations where a float is taking up the first half
11048 of the GPR, and the other half is empty (typically due to
11049 alignment restrictions). We can detect this by a 8-byte-aligned
11050 int field, or by seeing that this is the final flush for this
11051 argument. Count the word and continue on. */
11052 if (cum->floats_in_gpr == 1
11053 && (cum->intoffset % 64 == 0
11054 || (cum->intoffset == -1 && final)))
11055 {
11056 cum->words++;
11057 cum->floats_in_gpr = 0;
11058 }
11059
11060 if (cum->intoffset == -1)
11061 return;
11062
11063 intoffset = cum->intoffset;
11064 cum->intoffset = -1;
11065 cum->floats_in_gpr = 0;
11066
11067 if (intoffset % BITS_PER_WORD != 0)
11068 {
11069 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11070 if (!int_mode_for_size (bits, 0).exists ())
11071 {
11072 /* We couldn't find an appropriate mode, which happens,
11073 e.g., in packed structs when there are 3 bytes to load.
11074 Back intoffset back to the beginning of the word in this
11075 case. */
11076 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11077 }
11078 }
11079
11080 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11081 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11082 intregs = (endbit - startbit) / BITS_PER_WORD;
11083 cum->words += intregs;
11084 /* words should be unsigned. */
11085 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11086 {
11087 int pad = (endbit/BITS_PER_WORD) - cum->words;
11088 cum->words += pad;
11089 }
11090 }
11091
11092 /* The darwin64 ABI calls for us to recurse down through structs,
11093 looking for elements passed in registers. Unfortunately, we have
11094 to track int register count here also because of misalignments
11095 in powerpc alignment mode. */
11096
11097 static void
11098 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11099 const_tree type,
11100 HOST_WIDE_INT startbitpos)
11101 {
11102 tree f;
11103
11104 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11105 if (TREE_CODE (f) == FIELD_DECL)
11106 {
11107 HOST_WIDE_INT bitpos = startbitpos;
11108 tree ftype = TREE_TYPE (f);
11109 machine_mode mode;
11110 if (ftype == error_mark_node)
11111 continue;
11112 mode = TYPE_MODE (ftype);
11113
11114 if (DECL_SIZE (f) != 0
11115 && tree_fits_uhwi_p (bit_position (f)))
11116 bitpos += int_bit_position (f);
11117
11118 /* ??? FIXME: else assume zero offset. */
11119
11120 if (TREE_CODE (ftype) == RECORD_TYPE)
11121 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11122 else if (USE_FP_FOR_ARG_P (cum, mode))
11123 {
11124 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11125 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11126 cum->fregno += n_fpregs;
11127 /* Single-precision floats present a special problem for
11128 us, because they are smaller than an 8-byte GPR, and so
11129 the structure-packing rules combined with the standard
11130 varargs behavior mean that we want to pack float/float
11131 and float/int combinations into a single register's
11132 space. This is complicated by the arg advance flushing,
11133 which works on arbitrarily large groups of int-type
11134 fields. */
11135 if (mode == SFmode)
11136 {
11137 if (cum->floats_in_gpr == 1)
11138 {
11139 /* Two floats in a word; count the word and reset
11140 the float count. */
11141 cum->words++;
11142 cum->floats_in_gpr = 0;
11143 }
11144 else if (bitpos % 64 == 0)
11145 {
11146 /* A float at the beginning of an 8-byte word;
11147 count it and put off adjusting cum->words until
11148 we see if a arg advance flush is going to do it
11149 for us. */
11150 cum->floats_in_gpr++;
11151 }
11152 else
11153 {
11154 /* The float is at the end of a word, preceded
11155 by integer fields, so the arg advance flush
11156 just above has already set cum->words and
11157 everything is taken care of. */
11158 }
11159 }
11160 else
11161 cum->words += n_fpregs;
11162 }
11163 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11164 {
11165 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11166 cum->vregno++;
11167 cum->words += 2;
11168 }
11169 else if (cum->intoffset == -1)
11170 cum->intoffset = bitpos;
11171 }
11172 }
11173
11174 /* Check for an item that needs to be considered specially under the darwin 64
11175 bit ABI. These are record types where the mode is BLK or the structure is
11176 8 bytes in size. */
11177 static int
11178 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11179 {
11180 return rs6000_darwin64_abi
11181 && ((mode == BLKmode
11182 && TREE_CODE (type) == RECORD_TYPE
11183 && int_size_in_bytes (type) > 0)
11184 || (type && TREE_CODE (type) == RECORD_TYPE
11185 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11186 }
11187
11188 /* Update the data in CUM to advance over an argument
11189 of mode MODE and data type TYPE.
11190 (TYPE is null for libcalls where that information may not be available.)
11191
11192 Note that for args passed by reference, function_arg will be called
11193 with MODE and TYPE set to that of the pointer to the arg, not the arg
11194 itself. */
11195
11196 static void
11197 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11198 const_tree type, bool named, int depth)
11199 {
11200 machine_mode elt_mode;
11201 int n_elts;
11202
11203 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11204
11205 /* Only tick off an argument if we're not recursing. */
11206 if (depth == 0)
11207 cum->nargs_prototype--;
11208
11209 #ifdef HAVE_AS_GNU_ATTRIBUTE
11210 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11211 && cum->escapes)
11212 {
11213 if (SCALAR_FLOAT_MODE_P (mode))
11214 {
11215 rs6000_passes_float = true;
11216 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11217 && (FLOAT128_IBM_P (mode)
11218 || FLOAT128_IEEE_P (mode)
11219 || (type != NULL
11220 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11221 rs6000_passes_long_double = true;
11222
11223 /* Note if we passed or return a IEEE 128-bit type. We changed the
11224 mangling for these types, and we may need to make an alias with
11225 the old mangling. */
11226 if (FLOAT128_IEEE_P (mode))
11227 rs6000_passes_ieee128 = true;
11228 }
11229 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11230 rs6000_passes_vector = true;
11231 }
11232 #endif
11233
11234 if (TARGET_ALTIVEC_ABI
11235 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11236 || (type && TREE_CODE (type) == VECTOR_TYPE
11237 && int_size_in_bytes (type) == 16)))
11238 {
11239 bool stack = false;
11240
11241 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11242 {
11243 cum->vregno += n_elts;
11244
11245 if (!TARGET_ALTIVEC)
11246 error ("cannot pass argument in vector register because"
11247 " altivec instructions are disabled, use %qs"
11248 " to enable them", "-maltivec");
11249
11250 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11251 even if it is going to be passed in a vector register.
11252 Darwin does the same for variable-argument functions. */
11253 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11254 && TARGET_64BIT)
11255 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11256 stack = true;
11257 }
11258 else
11259 stack = true;
11260
11261 if (stack)
11262 {
11263 int align;
11264
11265 /* Vector parameters must be 16-byte aligned. In 32-bit
11266 mode this means we need to take into account the offset
11267 to the parameter save area. In 64-bit mode, they just
11268 have to start on an even word, since the parameter save
11269 area is 16-byte aligned. */
11270 if (TARGET_32BIT)
11271 align = -(rs6000_parm_offset () + cum->words) & 3;
11272 else
11273 align = cum->words & 1;
11274 cum->words += align + rs6000_arg_size (mode, type);
11275
11276 if (TARGET_DEBUG_ARG)
11277 {
11278 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11279 cum->words, align);
11280 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11281 cum->nargs_prototype, cum->prototype,
11282 GET_MODE_NAME (mode));
11283 }
11284 }
11285 }
11286 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11287 {
11288 int size = int_size_in_bytes (type);
11289 /* Variable sized types have size == -1 and are
11290 treated as if consisting entirely of ints.
11291 Pad to 16 byte boundary if needed. */
11292 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11293 && (cum->words % 2) != 0)
11294 cum->words++;
11295 /* For varargs, we can just go up by the size of the struct. */
11296 if (!named)
11297 cum->words += (size + 7) / 8;
11298 else
11299 {
11300 /* It is tempting to say int register count just goes up by
11301 sizeof(type)/8, but this is wrong in a case such as
11302 { int; double; int; } [powerpc alignment]. We have to
11303 grovel through the fields for these too. */
11304 cum->intoffset = 0;
11305 cum->floats_in_gpr = 0;
11306 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11307 rs6000_darwin64_record_arg_advance_flush (cum,
11308 size * BITS_PER_UNIT, 1);
11309 }
11310 if (TARGET_DEBUG_ARG)
11311 {
11312 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11313 cum->words, TYPE_ALIGN (type), size);
11314 fprintf (stderr,
11315 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11316 cum->nargs_prototype, cum->prototype,
11317 GET_MODE_NAME (mode));
11318 }
11319 }
11320 else if (DEFAULT_ABI == ABI_V4)
11321 {
11322 if (abi_v4_pass_in_fpr (mode, named))
11323 {
11324 /* _Decimal128 must use an even/odd register pair. This assumes
11325 that the register number is odd when fregno is odd. */
11326 if (mode == TDmode && (cum->fregno % 2) == 1)
11327 cum->fregno++;
11328
11329 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11330 <= FP_ARG_V4_MAX_REG)
11331 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11332 else
11333 {
11334 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11335 if (mode == DFmode || FLOAT128_IBM_P (mode)
11336 || mode == DDmode || mode == TDmode)
11337 cum->words += cum->words & 1;
11338 cum->words += rs6000_arg_size (mode, type);
11339 }
11340 }
11341 else
11342 {
11343 int n_words = rs6000_arg_size (mode, type);
11344 int gregno = cum->sysv_gregno;
11345
11346 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11347 As does any other 2 word item such as complex int due to a
11348 historical mistake. */
11349 if (n_words == 2)
11350 gregno += (1 - gregno) & 1;
11351
11352 /* Multi-reg args are not split between registers and stack. */
11353 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11354 {
11355 /* Long long is aligned on the stack. So are other 2 word
11356 items such as complex int due to a historical mistake. */
11357 if (n_words == 2)
11358 cum->words += cum->words & 1;
11359 cum->words += n_words;
11360 }
11361
11362 /* Note: continuing to accumulate gregno past when we've started
11363 spilling to the stack indicates the fact that we've started
11364 spilling to the stack to expand_builtin_saveregs. */
11365 cum->sysv_gregno = gregno + n_words;
11366 }
11367
11368 if (TARGET_DEBUG_ARG)
11369 {
11370 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11371 cum->words, cum->fregno);
11372 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11373 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11374 fprintf (stderr, "mode = %4s, named = %d\n",
11375 GET_MODE_NAME (mode), named);
11376 }
11377 }
11378 else
11379 {
11380 int n_words = rs6000_arg_size (mode, type);
11381 int start_words = cum->words;
11382 int align_words = rs6000_parm_start (mode, type, start_words);
11383
11384 cum->words = align_words + n_words;
11385
11386 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11387 {
11388 /* _Decimal128 must be passed in an even/odd float register pair.
11389 This assumes that the register number is odd when fregno is
11390 odd. */
11391 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11392 cum->fregno++;
11393 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11394 }
11395
11396 if (TARGET_DEBUG_ARG)
11397 {
11398 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11399 cum->words, cum->fregno);
11400 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11401 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11402 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11403 named, align_words - start_words, depth);
11404 }
11405 }
11406 }
11407
11408 static void
11409 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11410 const_tree type, bool named)
11411 {
11412 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11413 0);
11414 }
11415
11416 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11417 structure between cum->intoffset and bitpos to integer registers. */
11418
11419 static void
11420 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11421 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11422 {
11423 machine_mode mode;
11424 unsigned int regno;
11425 unsigned int startbit, endbit;
11426 int this_regno, intregs, intoffset;
11427 rtx reg;
11428
11429 if (cum->intoffset == -1)
11430 return;
11431
11432 intoffset = cum->intoffset;
11433 cum->intoffset = -1;
11434
11435 /* If this is the trailing part of a word, try to only load that
11436 much into the register. Otherwise load the whole register. Note
11437 that in the latter case we may pick up unwanted bits. It's not a
11438 problem at the moment but may wish to revisit. */
11439
11440 if (intoffset % BITS_PER_WORD != 0)
11441 {
11442 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11443 if (!int_mode_for_size (bits, 0).exists (&mode))
11444 {
11445 /* We couldn't find an appropriate mode, which happens,
11446 e.g., in packed structs when there are 3 bytes to load.
11447 Back intoffset back to the beginning of the word in this
11448 case. */
11449 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11450 mode = word_mode;
11451 }
11452 }
11453 else
11454 mode = word_mode;
11455
11456 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11457 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11458 intregs = (endbit - startbit) / BITS_PER_WORD;
11459 this_regno = cum->words + intoffset / BITS_PER_WORD;
11460
11461 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11462 cum->use_stack = 1;
11463
11464 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11465 if (intregs <= 0)
11466 return;
11467
11468 intoffset /= BITS_PER_UNIT;
11469 do
11470 {
11471 regno = GP_ARG_MIN_REG + this_regno;
11472 reg = gen_rtx_REG (mode, regno);
11473 rvec[(*k)++] =
11474 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11475
11476 this_regno += 1;
11477 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11478 mode = word_mode;
11479 intregs -= 1;
11480 }
11481 while (intregs > 0);
11482 }
11483
11484 /* Recursive workhorse for the following. */
11485
11486 static void
11487 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11488 HOST_WIDE_INT startbitpos, rtx rvec[],
11489 int *k)
11490 {
11491 tree f;
11492
11493 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11494 if (TREE_CODE (f) == FIELD_DECL)
11495 {
11496 HOST_WIDE_INT bitpos = startbitpos;
11497 tree ftype = TREE_TYPE (f);
11498 machine_mode mode;
11499 if (ftype == error_mark_node)
11500 continue;
11501 mode = TYPE_MODE (ftype);
11502
11503 if (DECL_SIZE (f) != 0
11504 && tree_fits_uhwi_p (bit_position (f)))
11505 bitpos += int_bit_position (f);
11506
11507 /* ??? FIXME: else assume zero offset. */
11508
11509 if (TREE_CODE (ftype) == RECORD_TYPE)
11510 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11511 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11512 {
11513 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11514 #if 0
11515 switch (mode)
11516 {
11517 case E_SCmode: mode = SFmode; break;
11518 case E_DCmode: mode = DFmode; break;
11519 case E_TCmode: mode = TFmode; break;
11520 default: break;
11521 }
11522 #endif
11523 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11524 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11525 {
11526 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11527 && (mode == TFmode || mode == TDmode));
11528 /* Long double or _Decimal128 split over regs and memory. */
11529 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11530 cum->use_stack=1;
11531 }
11532 rvec[(*k)++]
11533 = gen_rtx_EXPR_LIST (VOIDmode,
11534 gen_rtx_REG (mode, cum->fregno++),
11535 GEN_INT (bitpos / BITS_PER_UNIT));
11536 if (FLOAT128_2REG_P (mode))
11537 cum->fregno++;
11538 }
11539 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11540 {
11541 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11542 rvec[(*k)++]
11543 = gen_rtx_EXPR_LIST (VOIDmode,
11544 gen_rtx_REG (mode, cum->vregno++),
11545 GEN_INT (bitpos / BITS_PER_UNIT));
11546 }
11547 else if (cum->intoffset == -1)
11548 cum->intoffset = bitpos;
11549 }
11550 }
11551
11552 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11553 the register(s) to be used for each field and subfield of a struct
11554 being passed by value, along with the offset of where the
11555 register's value may be found in the block. FP fields go in FP
11556 register, vector fields go in vector registers, and everything
11557 else goes in int registers, packed as in memory.
11558
11559 This code is also used for function return values. RETVAL indicates
11560 whether this is the case.
11561
11562 Much of this is taken from the SPARC V9 port, which has a similar
11563 calling convention. */
11564
11565 static rtx
11566 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11567 bool named, bool retval)
11568 {
11569 rtx rvec[FIRST_PSEUDO_REGISTER];
11570 int k = 1, kbase = 1;
11571 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11572 /* This is a copy; modifications are not visible to our caller. */
11573 CUMULATIVE_ARGS copy_cum = *orig_cum;
11574 CUMULATIVE_ARGS *cum = &copy_cum;
11575
11576 /* Pad to 16 byte boundary if needed. */
11577 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11578 && (cum->words % 2) != 0)
11579 cum->words++;
11580
11581 cum->intoffset = 0;
11582 cum->use_stack = 0;
11583 cum->named = named;
11584
11585 /* Put entries into rvec[] for individual FP and vector fields, and
11586 for the chunks of memory that go in int regs. Note we start at
11587 element 1; 0 is reserved for an indication of using memory, and
11588 may or may not be filled in below. */
11589 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11590 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11591
11592 /* If any part of the struct went on the stack put all of it there.
11593 This hack is because the generic code for
11594 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11595 parts of the struct are not at the beginning. */
11596 if (cum->use_stack)
11597 {
11598 if (retval)
11599 return NULL_RTX; /* doesn't go in registers at all */
11600 kbase = 0;
11601 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11602 }
11603 if (k > 1 || cum->use_stack)
11604 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11605 else
11606 return NULL_RTX;
11607 }
11608
11609 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11610
11611 static rtx
11612 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11613 int align_words)
11614 {
11615 int n_units;
11616 int i, k;
11617 rtx rvec[GP_ARG_NUM_REG + 1];
11618
11619 if (align_words >= GP_ARG_NUM_REG)
11620 return NULL_RTX;
11621
11622 n_units = rs6000_arg_size (mode, type);
11623
11624 /* Optimize the simple case where the arg fits in one gpr, except in
11625 the case of BLKmode due to assign_parms assuming that registers are
11626 BITS_PER_WORD wide. */
11627 if (n_units == 0
11628 || (n_units == 1 && mode != BLKmode))
11629 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11630
11631 k = 0;
11632 if (align_words + n_units > GP_ARG_NUM_REG)
11633 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11634 using a magic NULL_RTX component.
11635 This is not strictly correct. Only some of the arg belongs in
11636 memory, not all of it. However, the normal scheme using
11637 function_arg_partial_nregs can result in unusual subregs, eg.
11638 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11639 store the whole arg to memory is often more efficient than code
11640 to store pieces, and we know that space is available in the right
11641 place for the whole arg. */
11642 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11643
11644 i = 0;
11645 do
11646 {
11647 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11648 rtx off = GEN_INT (i++ * 4);
11649 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11650 }
11651 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11652
11653 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11654 }
11655
11656 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11657 but must also be copied into the parameter save area starting at
11658 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11659 to the GPRs and/or memory. Return the number of elements used. */
11660
11661 static int
11662 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11663 int align_words, rtx *rvec)
11664 {
11665 int k = 0;
11666
11667 if (align_words < GP_ARG_NUM_REG)
11668 {
11669 int n_words = rs6000_arg_size (mode, type);
11670
11671 if (align_words + n_words > GP_ARG_NUM_REG
11672 || mode == BLKmode
11673 || (TARGET_32BIT && TARGET_POWERPC64))
11674 {
11675 /* If this is partially on the stack, then we only
11676 include the portion actually in registers here. */
11677 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11678 int i = 0;
11679
11680 if (align_words + n_words > GP_ARG_NUM_REG)
11681 {
11682 /* Not all of the arg fits in gprs. Say that it goes in memory
11683 too, using a magic NULL_RTX component. Also see comment in
11684 rs6000_mixed_function_arg for why the normal
11685 function_arg_partial_nregs scheme doesn't work in this case. */
11686 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11687 }
11688
11689 do
11690 {
11691 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11692 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11693 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11694 }
11695 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11696 }
11697 else
11698 {
11699 /* The whole arg fits in gprs. */
11700 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11701 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11702 }
11703 }
11704 else
11705 {
11706 /* It's entirely in memory. */
11707 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11708 }
11709
11710 return k;
11711 }
11712
11713 /* RVEC is a vector of K components of an argument of mode MODE.
11714 Construct the final function_arg return value from it. */
11715
11716 static rtx
11717 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11718 {
11719 gcc_assert (k >= 1);
11720
11721 /* Avoid returning a PARALLEL in the trivial cases. */
11722 if (k == 1)
11723 {
11724 if (XEXP (rvec[0], 0) == NULL_RTX)
11725 return NULL_RTX;
11726
11727 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11728 return XEXP (rvec[0], 0);
11729 }
11730
11731 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11732 }
11733
11734 /* Determine where to put an argument to a function.
11735 Value is zero to push the argument on the stack,
11736 or a hard register in which to store the argument.
11737
11738 MODE is the argument's machine mode.
11739 TYPE is the data type of the argument (as a tree).
11740 This is null for libcalls where that information may
11741 not be available.
11742 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11743 the preceding args and about the function being called. It is
11744 not modified in this routine.
11745 NAMED is nonzero if this argument is a named parameter
11746 (otherwise it is an extra parameter matching an ellipsis).
11747
11748 On RS/6000 the first eight words of non-FP are normally in registers
11749 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11750 Under V.4, the first 8 FP args are in registers.
11751
11752 If this is floating-point and no prototype is specified, we use
11753 both an FP and integer register (or possibly FP reg and stack). Library
11754 functions (when CALL_LIBCALL is set) always have the proper types for args,
11755 so we can pass the FP value just in one register. emit_library_function
11756 doesn't support PARALLEL anyway.
11757
11758 Note that for args passed by reference, function_arg will be called
11759 with MODE and TYPE set to that of the pointer to the arg, not the arg
11760 itself. */
11761
11762 static rtx
11763 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11764 const_tree type, bool named)
11765 {
11766 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11767 enum rs6000_abi abi = DEFAULT_ABI;
11768 machine_mode elt_mode;
11769 int n_elts;
11770
11771 /* Return a marker to indicate whether CR1 needs to set or clear the
11772 bit that V.4 uses to say fp args were passed in registers.
11773 Assume that we don't need the marker for software floating point,
11774 or compiler generated library calls. */
11775 if (mode == VOIDmode)
11776 {
11777 if (abi == ABI_V4
11778 && (cum->call_cookie & CALL_LIBCALL) == 0
11779 && (cum->stdarg
11780 || (cum->nargs_prototype < 0
11781 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11782 && TARGET_HARD_FLOAT)
11783 return GEN_INT (cum->call_cookie
11784 | ((cum->fregno == FP_ARG_MIN_REG)
11785 ? CALL_V4_SET_FP_ARGS
11786 : CALL_V4_CLEAR_FP_ARGS));
11787
11788 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11789 }
11790
11791 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11792
11793 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11794 {
11795 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11796 if (rslt != NULL_RTX)
11797 return rslt;
11798 /* Else fall through to usual handling. */
11799 }
11800
11801 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11802 {
11803 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11804 rtx r, off;
11805 int i, k = 0;
11806
11807 /* Do we also need to pass this argument in the parameter save area?
11808 Library support functions for IEEE 128-bit are assumed to not need the
11809 value passed both in GPRs and in vector registers. */
11810 if (TARGET_64BIT && !cum->prototype
11811 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11812 {
11813 int align_words = ROUND_UP (cum->words, 2);
11814 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11815 }
11816
11817 /* Describe where this argument goes in the vector registers. */
11818 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11819 {
11820 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11821 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11822 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11823 }
11824
11825 return rs6000_finish_function_arg (mode, rvec, k);
11826 }
11827 else if (TARGET_ALTIVEC_ABI
11828 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11829 || (type && TREE_CODE (type) == VECTOR_TYPE
11830 && int_size_in_bytes (type) == 16)))
11831 {
11832 if (named || abi == ABI_V4)
11833 return NULL_RTX;
11834 else
11835 {
11836 /* Vector parameters to varargs functions under AIX or Darwin
11837 get passed in memory and possibly also in GPRs. */
11838 int align, align_words, n_words;
11839 machine_mode part_mode;
11840
11841 /* Vector parameters must be 16-byte aligned. In 32-bit
11842 mode this means we need to take into account the offset
11843 to the parameter save area. In 64-bit mode, they just
11844 have to start on an even word, since the parameter save
11845 area is 16-byte aligned. */
11846 if (TARGET_32BIT)
11847 align = -(rs6000_parm_offset () + cum->words) & 3;
11848 else
11849 align = cum->words & 1;
11850 align_words = cum->words + align;
11851
11852 /* Out of registers? Memory, then. */
11853 if (align_words >= GP_ARG_NUM_REG)
11854 return NULL_RTX;
11855
11856 if (TARGET_32BIT && TARGET_POWERPC64)
11857 return rs6000_mixed_function_arg (mode, type, align_words);
11858
11859 /* The vector value goes in GPRs. Only the part of the
11860 value in GPRs is reported here. */
11861 part_mode = mode;
11862 n_words = rs6000_arg_size (mode, type);
11863 if (align_words + n_words > GP_ARG_NUM_REG)
11864 /* Fortunately, there are only two possibilities, the value
11865 is either wholly in GPRs or half in GPRs and half not. */
11866 part_mode = DImode;
11867
11868 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11869 }
11870 }
11871
11872 else if (abi == ABI_V4)
11873 {
11874 if (abi_v4_pass_in_fpr (mode, named))
11875 {
11876 /* _Decimal128 must use an even/odd register pair. This assumes
11877 that the register number is odd when fregno is odd. */
11878 if (mode == TDmode && (cum->fregno % 2) == 1)
11879 cum->fregno++;
11880
11881 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11882 <= FP_ARG_V4_MAX_REG)
11883 return gen_rtx_REG (mode, cum->fregno);
11884 else
11885 return NULL_RTX;
11886 }
11887 else
11888 {
11889 int n_words = rs6000_arg_size (mode, type);
11890 int gregno = cum->sysv_gregno;
11891
11892 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11893 As does any other 2 word item such as complex int due to a
11894 historical mistake. */
11895 if (n_words == 2)
11896 gregno += (1 - gregno) & 1;
11897
11898 /* Multi-reg args are not split between registers and stack. */
11899 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11900 return NULL_RTX;
11901
11902 if (TARGET_32BIT && TARGET_POWERPC64)
11903 return rs6000_mixed_function_arg (mode, type,
11904 gregno - GP_ARG_MIN_REG);
11905 return gen_rtx_REG (mode, gregno);
11906 }
11907 }
11908 else
11909 {
11910 int align_words = rs6000_parm_start (mode, type, cum->words);
11911
11912 /* _Decimal128 must be passed in an even/odd float register pair.
11913 This assumes that the register number is odd when fregno is odd. */
11914 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11915 cum->fregno++;
11916
11917 if (USE_FP_FOR_ARG_P (cum, elt_mode))
11918 {
11919 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11920 rtx r, off;
11921 int i, k = 0;
11922 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11923 int fpr_words;
11924
11925 /* Do we also need to pass this argument in the parameter
11926 save area? */
11927 if (type && (cum->nargs_prototype <= 0
11928 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11929 && TARGET_XL_COMPAT
11930 && align_words >= GP_ARG_NUM_REG)))
11931 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11932
11933 /* Describe where this argument goes in the fprs. */
11934 for (i = 0; i < n_elts
11935 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11936 {
11937 /* Check if the argument is split over registers and memory.
11938 This can only ever happen for long double or _Decimal128;
11939 complex types are handled via split_complex_arg. */
11940 machine_mode fmode = elt_mode;
11941 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11942 {
11943 gcc_assert (FLOAT128_2REG_P (fmode));
11944 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11945 }
11946
11947 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11948 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11949 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11950 }
11951
11952 /* If there were not enough FPRs to hold the argument, the rest
11953 usually goes into memory. However, if the current position
11954 is still within the register parameter area, a portion may
11955 actually have to go into GPRs.
11956
11957 Note that it may happen that the portion of the argument
11958 passed in the first "half" of the first GPR was already
11959 passed in the last FPR as well.
11960
11961 For unnamed arguments, we already set up GPRs to cover the
11962 whole argument in rs6000_psave_function_arg, so there is
11963 nothing further to do at this point. */
11964 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11965 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11966 && cum->nargs_prototype > 0)
11967 {
11968 static bool warned;
11969
11970 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11971 int n_words = rs6000_arg_size (mode, type);
11972
11973 align_words += fpr_words;
11974 n_words -= fpr_words;
11975
11976 do
11977 {
11978 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11979 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11980 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11981 }
11982 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11983
11984 if (!warned && warn_psabi)
11985 {
11986 warned = true;
11987 inform (input_location,
11988 "the ABI of passing homogeneous float aggregates"
11989 " has changed in GCC 5");
11990 }
11991 }
11992
11993 return rs6000_finish_function_arg (mode, rvec, k);
11994 }
11995 else if (align_words < GP_ARG_NUM_REG)
11996 {
11997 if (TARGET_32BIT && TARGET_POWERPC64)
11998 return rs6000_mixed_function_arg (mode, type, align_words);
11999
12000 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12001 }
12002 else
12003 return NULL_RTX;
12004 }
12005 }
12006 \f
12007 /* For an arg passed partly in registers and partly in memory, this is
12008 the number of bytes passed in registers. For args passed entirely in
12009 registers or entirely in memory, zero. When an arg is described by a
12010 PARALLEL, perhaps using more than one register type, this function
12011 returns the number of bytes used by the first element of the PARALLEL. */
12012
12013 static int
12014 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12015 tree type, bool named)
12016 {
12017 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12018 bool passed_in_gprs = true;
12019 int ret = 0;
12020 int align_words;
12021 machine_mode elt_mode;
12022 int n_elts;
12023
12024 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12025
12026 if (DEFAULT_ABI == ABI_V4)
12027 return 0;
12028
12029 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12030 {
12031 /* If we are passing this arg in the fixed parameter save area (gprs or
12032 memory) as well as VRs, we do not use the partial bytes mechanism;
12033 instead, rs6000_function_arg will return a PARALLEL including a memory
12034 element as necessary. Library support functions for IEEE 128-bit are
12035 assumed to not need the value passed both in GPRs and in vector
12036 registers. */
12037 if (TARGET_64BIT && !cum->prototype
12038 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12039 return 0;
12040
12041 /* Otherwise, we pass in VRs only. Check for partial copies. */
12042 passed_in_gprs = false;
12043 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12044 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12045 }
12046
12047 /* In this complicated case we just disable the partial_nregs code. */
12048 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12049 return 0;
12050
12051 align_words = rs6000_parm_start (mode, type, cum->words);
12052
12053 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12054 {
12055 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12056
12057 /* If we are passing this arg in the fixed parameter save area
12058 (gprs or memory) as well as FPRs, we do not use the partial
12059 bytes mechanism; instead, rs6000_function_arg will return a
12060 PARALLEL including a memory element as necessary. */
12061 if (type
12062 && (cum->nargs_prototype <= 0
12063 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12064 && TARGET_XL_COMPAT
12065 && align_words >= GP_ARG_NUM_REG)))
12066 return 0;
12067
12068 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12069 passed_in_gprs = false;
12070 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12071 {
12072 /* Compute number of bytes / words passed in FPRs. If there
12073 is still space available in the register parameter area
12074 *after* that amount, a part of the argument will be passed
12075 in GPRs. In that case, the total amount passed in any
12076 registers is equal to the amount that would have been passed
12077 in GPRs if everything were passed there, so we fall back to
12078 the GPR code below to compute the appropriate value. */
12079 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12080 * MIN (8, GET_MODE_SIZE (elt_mode)));
12081 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12082
12083 if (align_words + fpr_words < GP_ARG_NUM_REG)
12084 passed_in_gprs = true;
12085 else
12086 ret = fpr;
12087 }
12088 }
12089
12090 if (passed_in_gprs
12091 && align_words < GP_ARG_NUM_REG
12092 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12093 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12094
12095 if (ret != 0 && TARGET_DEBUG_ARG)
12096 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12097
12098 return ret;
12099 }
12100 \f
12101 /* A C expression that indicates when an argument must be passed by
12102 reference. If nonzero for an argument, a copy of that argument is
12103 made in memory and a pointer to the argument is passed instead of
12104 the argument itself. The pointer is passed in whatever way is
12105 appropriate for passing a pointer to that type.
12106
12107 Under V.4, aggregates and long double are passed by reference.
12108
12109 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12110 reference unless the AltiVec vector extension ABI is in force.
12111
12112 As an extension to all ABIs, variable sized types are passed by
12113 reference. */
12114
12115 static bool
12116 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12117 machine_mode mode, const_tree type,
12118 bool named ATTRIBUTE_UNUSED)
12119 {
12120 if (!type)
12121 return 0;
12122
12123 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12124 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12125 {
12126 if (TARGET_DEBUG_ARG)
12127 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12128 return 1;
12129 }
12130
12131 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12132 {
12133 if (TARGET_DEBUG_ARG)
12134 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12135 return 1;
12136 }
12137
12138 if (int_size_in_bytes (type) < 0)
12139 {
12140 if (TARGET_DEBUG_ARG)
12141 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12142 return 1;
12143 }
12144
12145 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12146 modes only exist for GCC vector types if -maltivec. */
12147 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12148 {
12149 if (TARGET_DEBUG_ARG)
12150 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12151 return 1;
12152 }
12153
12154 /* Pass synthetic vectors in memory. */
12155 if (TREE_CODE (type) == VECTOR_TYPE
12156 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12157 {
12158 static bool warned_for_pass_big_vectors = false;
12159 if (TARGET_DEBUG_ARG)
12160 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12161 if (!warned_for_pass_big_vectors)
12162 {
12163 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12164 "non-standard ABI extension with no compatibility "
12165 "guarantee");
12166 warned_for_pass_big_vectors = true;
12167 }
12168 return 1;
12169 }
12170
12171 return 0;
12172 }
12173
12174 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12175 already processes. Return true if the parameter must be passed
12176 (fully or partially) on the stack. */
12177
12178 static bool
12179 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12180 {
12181 machine_mode mode;
12182 int unsignedp;
12183 rtx entry_parm;
12184
12185 /* Catch errors. */
12186 if (type == NULL || type == error_mark_node)
12187 return true;
12188
12189 /* Handle types with no storage requirement. */
12190 if (TYPE_MODE (type) == VOIDmode)
12191 return false;
12192
12193 /* Handle complex types. */
12194 if (TREE_CODE (type) == COMPLEX_TYPE)
12195 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12196 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12197
12198 /* Handle transparent aggregates. */
12199 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12200 && TYPE_TRANSPARENT_AGGR (type))
12201 type = TREE_TYPE (first_field (type));
12202
12203 /* See if this arg was passed by invisible reference. */
12204 if (pass_by_reference (get_cumulative_args (args_so_far),
12205 TYPE_MODE (type), type, true))
12206 type = build_pointer_type (type);
12207
12208 /* Find mode as it is passed by the ABI. */
12209 unsignedp = TYPE_UNSIGNED (type);
12210 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12211
12212 /* If we must pass in stack, we need a stack. */
12213 if (rs6000_must_pass_in_stack (mode, type))
12214 return true;
12215
12216 /* If there is no incoming register, we need a stack. */
12217 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12218 if (entry_parm == NULL)
12219 return true;
12220
12221 /* Likewise if we need to pass both in registers and on the stack. */
12222 if (GET_CODE (entry_parm) == PARALLEL
12223 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12224 return true;
12225
12226 /* Also true if we're partially in registers and partially not. */
12227 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12228 return true;
12229
12230 /* Update info on where next arg arrives in registers. */
12231 rs6000_function_arg_advance (args_so_far, mode, type, true);
12232 return false;
12233 }
12234
12235 /* Return true if FUN has no prototype, has a variable argument
12236 list, or passes any parameter in memory. */
12237
12238 static bool
12239 rs6000_function_parms_need_stack (tree fun, bool incoming)
12240 {
12241 tree fntype, result;
12242 CUMULATIVE_ARGS args_so_far_v;
12243 cumulative_args_t args_so_far;
12244
12245 if (!fun)
12246 /* Must be a libcall, all of which only use reg parms. */
12247 return false;
12248
12249 fntype = fun;
12250 if (!TYPE_P (fun))
12251 fntype = TREE_TYPE (fun);
12252
12253 /* Varargs functions need the parameter save area. */
12254 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12255 return true;
12256
12257 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12258 args_so_far = pack_cumulative_args (&args_so_far_v);
12259
12260 /* When incoming, we will have been passed the function decl.
12261 It is necessary to use the decl to handle K&R style functions,
12262 where TYPE_ARG_TYPES may not be available. */
12263 if (incoming)
12264 {
12265 gcc_assert (DECL_P (fun));
12266 result = DECL_RESULT (fun);
12267 }
12268 else
12269 result = TREE_TYPE (fntype);
12270
12271 if (result && aggregate_value_p (result, fntype))
12272 {
12273 if (!TYPE_P (result))
12274 result = TREE_TYPE (result);
12275 result = build_pointer_type (result);
12276 rs6000_parm_needs_stack (args_so_far, result);
12277 }
12278
12279 if (incoming)
12280 {
12281 tree parm;
12282
12283 for (parm = DECL_ARGUMENTS (fun);
12284 parm && parm != void_list_node;
12285 parm = TREE_CHAIN (parm))
12286 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12287 return true;
12288 }
12289 else
12290 {
12291 function_args_iterator args_iter;
12292 tree arg_type;
12293
12294 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12295 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12296 return true;
12297 }
12298
12299 return false;
12300 }
12301
12302 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12303 usually a constant depending on the ABI. However, in the ELFv2 ABI
12304 the register parameter area is optional when calling a function that
12305 has a prototype is scope, has no variable argument list, and passes
12306 all parameters in registers. */
12307
12308 int
12309 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12310 {
12311 int reg_parm_stack_space;
12312
12313 switch (DEFAULT_ABI)
12314 {
12315 default:
12316 reg_parm_stack_space = 0;
12317 break;
12318
12319 case ABI_AIX:
12320 case ABI_DARWIN:
12321 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12322 break;
12323
12324 case ABI_ELFv2:
12325 /* ??? Recomputing this every time is a bit expensive. Is there
12326 a place to cache this information? */
12327 if (rs6000_function_parms_need_stack (fun, incoming))
12328 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12329 else
12330 reg_parm_stack_space = 0;
12331 break;
12332 }
12333
12334 return reg_parm_stack_space;
12335 }
12336
12337 static void
12338 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12339 {
12340 int i;
12341 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12342
12343 if (nregs == 0)
12344 return;
12345
12346 for (i = 0; i < nregs; i++)
12347 {
12348 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12349 if (reload_completed)
12350 {
12351 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12352 tem = NULL_RTX;
12353 else
12354 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12355 i * GET_MODE_SIZE (reg_mode));
12356 }
12357 else
12358 tem = replace_equiv_address (tem, XEXP (tem, 0));
12359
12360 gcc_assert (tem);
12361
12362 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12363 }
12364 }
12365 \f
12366 /* Perform any needed actions needed for a function that is receiving a
12367 variable number of arguments.
12368
12369 CUM is as above.
12370
12371 MODE and TYPE are the mode and type of the current parameter.
12372
12373 PRETEND_SIZE is a variable that should be set to the amount of stack
12374 that must be pushed by the prolog to pretend that our caller pushed
12375 it.
12376
12377 Normally, this macro will push all remaining incoming registers on the
12378 stack and set PRETEND_SIZE to the length of the registers pushed. */
12379
12380 static void
12381 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12382 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12383 int no_rtl)
12384 {
12385 CUMULATIVE_ARGS next_cum;
12386 int reg_size = TARGET_32BIT ? 4 : 8;
12387 rtx save_area = NULL_RTX, mem;
12388 int first_reg_offset;
12389 alias_set_type set;
12390
12391 /* Skip the last named argument. */
12392 next_cum = *get_cumulative_args (cum);
12393 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12394
12395 if (DEFAULT_ABI == ABI_V4)
12396 {
12397 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12398
12399 if (! no_rtl)
12400 {
12401 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12402 HOST_WIDE_INT offset = 0;
12403
12404 /* Try to optimize the size of the varargs save area.
12405 The ABI requires that ap.reg_save_area is doubleword
12406 aligned, but we don't need to allocate space for all
12407 the bytes, only those to which we actually will save
12408 anything. */
12409 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12410 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12411 if (TARGET_HARD_FLOAT
12412 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12413 && cfun->va_list_fpr_size)
12414 {
12415 if (gpr_reg_num)
12416 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12417 * UNITS_PER_FP_WORD;
12418 if (cfun->va_list_fpr_size
12419 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12420 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12421 else
12422 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12423 * UNITS_PER_FP_WORD;
12424 }
12425 if (gpr_reg_num)
12426 {
12427 offset = -((first_reg_offset * reg_size) & ~7);
12428 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12429 {
12430 gpr_reg_num = cfun->va_list_gpr_size;
12431 if (reg_size == 4 && (first_reg_offset & 1))
12432 gpr_reg_num++;
12433 }
12434 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12435 }
12436 else if (fpr_size)
12437 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12438 * UNITS_PER_FP_WORD
12439 - (int) (GP_ARG_NUM_REG * reg_size);
12440
12441 if (gpr_size + fpr_size)
12442 {
12443 rtx reg_save_area
12444 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12445 gcc_assert (GET_CODE (reg_save_area) == MEM);
12446 reg_save_area = XEXP (reg_save_area, 0);
12447 if (GET_CODE (reg_save_area) == PLUS)
12448 {
12449 gcc_assert (XEXP (reg_save_area, 0)
12450 == virtual_stack_vars_rtx);
12451 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12452 offset += INTVAL (XEXP (reg_save_area, 1));
12453 }
12454 else
12455 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12456 }
12457
12458 cfun->machine->varargs_save_offset = offset;
12459 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12460 }
12461 }
12462 else
12463 {
12464 first_reg_offset = next_cum.words;
12465 save_area = crtl->args.internal_arg_pointer;
12466
12467 if (targetm.calls.must_pass_in_stack (mode, type))
12468 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12469 }
12470
12471 set = get_varargs_alias_set ();
12472 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12473 && cfun->va_list_gpr_size)
12474 {
12475 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12476
12477 if (va_list_gpr_counter_field)
12478 /* V4 va_list_gpr_size counts number of registers needed. */
12479 n_gpr = cfun->va_list_gpr_size;
12480 else
12481 /* char * va_list instead counts number of bytes needed. */
12482 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12483
12484 if (nregs > n_gpr)
12485 nregs = n_gpr;
12486
12487 mem = gen_rtx_MEM (BLKmode,
12488 plus_constant (Pmode, save_area,
12489 first_reg_offset * reg_size));
12490 MEM_NOTRAP_P (mem) = 1;
12491 set_mem_alias_set (mem, set);
12492 set_mem_align (mem, BITS_PER_WORD);
12493
12494 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12495 nregs);
12496 }
12497
12498 /* Save FP registers if needed. */
12499 if (DEFAULT_ABI == ABI_V4
12500 && TARGET_HARD_FLOAT
12501 && ! no_rtl
12502 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12503 && cfun->va_list_fpr_size)
12504 {
12505 int fregno = next_cum.fregno, nregs;
12506 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12507 rtx lab = gen_label_rtx ();
12508 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12509 * UNITS_PER_FP_WORD);
12510
12511 emit_jump_insn
12512 (gen_rtx_SET (pc_rtx,
12513 gen_rtx_IF_THEN_ELSE (VOIDmode,
12514 gen_rtx_NE (VOIDmode, cr1,
12515 const0_rtx),
12516 gen_rtx_LABEL_REF (VOIDmode, lab),
12517 pc_rtx)));
12518
12519 for (nregs = 0;
12520 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12521 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12522 {
12523 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12524 plus_constant (Pmode, save_area, off));
12525 MEM_NOTRAP_P (mem) = 1;
12526 set_mem_alias_set (mem, set);
12527 set_mem_align (mem, GET_MODE_ALIGNMENT (
12528 TARGET_HARD_FLOAT ? DFmode : SFmode));
12529 emit_move_insn (mem, gen_rtx_REG (
12530 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12531 }
12532
12533 emit_label (lab);
12534 }
12535 }
12536
12537 /* Create the va_list data type. */
12538
12539 static tree
12540 rs6000_build_builtin_va_list (void)
12541 {
12542 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12543
12544 /* For AIX, prefer 'char *' because that's what the system
12545 header files like. */
12546 if (DEFAULT_ABI != ABI_V4)
12547 return build_pointer_type (char_type_node);
12548
12549 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12550 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12551 get_identifier ("__va_list_tag"), record);
12552
12553 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12554 unsigned_char_type_node);
12555 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12556 unsigned_char_type_node);
12557 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12558 every user file. */
12559 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12560 get_identifier ("reserved"), short_unsigned_type_node);
12561 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12562 get_identifier ("overflow_arg_area"),
12563 ptr_type_node);
12564 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12565 get_identifier ("reg_save_area"),
12566 ptr_type_node);
12567
12568 va_list_gpr_counter_field = f_gpr;
12569 va_list_fpr_counter_field = f_fpr;
12570
12571 DECL_FIELD_CONTEXT (f_gpr) = record;
12572 DECL_FIELD_CONTEXT (f_fpr) = record;
12573 DECL_FIELD_CONTEXT (f_res) = record;
12574 DECL_FIELD_CONTEXT (f_ovf) = record;
12575 DECL_FIELD_CONTEXT (f_sav) = record;
12576
12577 TYPE_STUB_DECL (record) = type_decl;
12578 TYPE_NAME (record) = type_decl;
12579 TYPE_FIELDS (record) = f_gpr;
12580 DECL_CHAIN (f_gpr) = f_fpr;
12581 DECL_CHAIN (f_fpr) = f_res;
12582 DECL_CHAIN (f_res) = f_ovf;
12583 DECL_CHAIN (f_ovf) = f_sav;
12584
12585 layout_type (record);
12586
12587 /* The correct type is an array type of one element. */
12588 return build_array_type (record, build_index_type (size_zero_node));
12589 }
12590
12591 /* Implement va_start. */
12592
12593 static void
12594 rs6000_va_start (tree valist, rtx nextarg)
12595 {
12596 HOST_WIDE_INT words, n_gpr, n_fpr;
12597 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12598 tree gpr, fpr, ovf, sav, t;
12599
12600 /* Only SVR4 needs something special. */
12601 if (DEFAULT_ABI != ABI_V4)
12602 {
12603 std_expand_builtin_va_start (valist, nextarg);
12604 return;
12605 }
12606
12607 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12608 f_fpr = DECL_CHAIN (f_gpr);
12609 f_res = DECL_CHAIN (f_fpr);
12610 f_ovf = DECL_CHAIN (f_res);
12611 f_sav = DECL_CHAIN (f_ovf);
12612
12613 valist = build_simple_mem_ref (valist);
12614 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12615 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12616 f_fpr, NULL_TREE);
12617 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12618 f_ovf, NULL_TREE);
12619 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12620 f_sav, NULL_TREE);
12621
12622 /* Count number of gp and fp argument registers used. */
12623 words = crtl->args.info.words;
12624 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12625 GP_ARG_NUM_REG);
12626 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12627 FP_ARG_NUM_REG);
12628
12629 if (TARGET_DEBUG_ARG)
12630 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12631 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12632 words, n_gpr, n_fpr);
12633
12634 if (cfun->va_list_gpr_size)
12635 {
12636 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12637 build_int_cst (NULL_TREE, n_gpr));
12638 TREE_SIDE_EFFECTS (t) = 1;
12639 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12640 }
12641
12642 if (cfun->va_list_fpr_size)
12643 {
12644 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12645 build_int_cst (NULL_TREE, n_fpr));
12646 TREE_SIDE_EFFECTS (t) = 1;
12647 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12648
12649 #ifdef HAVE_AS_GNU_ATTRIBUTE
12650 if (call_ABI_of_interest (cfun->decl))
12651 rs6000_passes_float = true;
12652 #endif
12653 }
12654
12655 /* Find the overflow area. */
12656 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12657 if (words != 0)
12658 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12659 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12660 TREE_SIDE_EFFECTS (t) = 1;
12661 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12662
12663 /* If there were no va_arg invocations, don't set up the register
12664 save area. */
12665 if (!cfun->va_list_gpr_size
12666 && !cfun->va_list_fpr_size
12667 && n_gpr < GP_ARG_NUM_REG
12668 && n_fpr < FP_ARG_V4_MAX_REG)
12669 return;
12670
12671 /* Find the register save area. */
12672 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12673 if (cfun->machine->varargs_save_offset)
12674 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12675 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12676 TREE_SIDE_EFFECTS (t) = 1;
12677 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12678 }
12679
12680 /* Implement va_arg. */
12681
12682 static tree
12683 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12684 gimple_seq *post_p)
12685 {
12686 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12687 tree gpr, fpr, ovf, sav, reg, t, u;
12688 int size, rsize, n_reg, sav_ofs, sav_scale;
12689 tree lab_false, lab_over, addr;
12690 int align;
12691 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12692 int regalign = 0;
12693 gimple *stmt;
12694
12695 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12696 {
12697 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12698 return build_va_arg_indirect_ref (t);
12699 }
12700
12701 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12702 earlier version of gcc, with the property that it always applied alignment
12703 adjustments to the va-args (even for zero-sized types). The cheapest way
12704 to deal with this is to replicate the effect of the part of
12705 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12706 of relevance.
12707 We don't need to check for pass-by-reference because of the test above.
12708 We can return a simplifed answer, since we know there's no offset to add. */
12709
12710 if (((TARGET_MACHO
12711 && rs6000_darwin64_abi)
12712 || DEFAULT_ABI == ABI_ELFv2
12713 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12714 && integer_zerop (TYPE_SIZE (type)))
12715 {
12716 unsigned HOST_WIDE_INT align, boundary;
12717 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12718 align = PARM_BOUNDARY / BITS_PER_UNIT;
12719 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12720 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12721 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12722 boundary /= BITS_PER_UNIT;
12723 if (boundary > align)
12724 {
12725 tree t ;
12726 /* This updates arg ptr by the amount that would be necessary
12727 to align the zero-sized (but not zero-alignment) item. */
12728 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12729 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12730 gimplify_and_add (t, pre_p);
12731
12732 t = fold_convert (sizetype, valist_tmp);
12733 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12734 fold_convert (TREE_TYPE (valist),
12735 fold_build2 (BIT_AND_EXPR, sizetype, t,
12736 size_int (-boundary))));
12737 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12738 gimplify_and_add (t, pre_p);
12739 }
12740 /* Since it is zero-sized there's no increment for the item itself. */
12741 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12742 return build_va_arg_indirect_ref (valist_tmp);
12743 }
12744
12745 if (DEFAULT_ABI != ABI_V4)
12746 {
12747 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12748 {
12749 tree elem_type = TREE_TYPE (type);
12750 machine_mode elem_mode = TYPE_MODE (elem_type);
12751 int elem_size = GET_MODE_SIZE (elem_mode);
12752
12753 if (elem_size < UNITS_PER_WORD)
12754 {
12755 tree real_part, imag_part;
12756 gimple_seq post = NULL;
12757
12758 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12759 &post);
12760 /* Copy the value into a temporary, lest the formal temporary
12761 be reused out from under us. */
12762 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12763 gimple_seq_add_seq (pre_p, post);
12764
12765 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12766 post_p);
12767
12768 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12769 }
12770 }
12771
12772 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12773 }
12774
12775 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12776 f_fpr = DECL_CHAIN (f_gpr);
12777 f_res = DECL_CHAIN (f_fpr);
12778 f_ovf = DECL_CHAIN (f_res);
12779 f_sav = DECL_CHAIN (f_ovf);
12780
12781 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12782 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12783 f_fpr, NULL_TREE);
12784 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12785 f_ovf, NULL_TREE);
12786 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12787 f_sav, NULL_TREE);
12788
12789 size = int_size_in_bytes (type);
12790 rsize = (size + 3) / 4;
12791 int pad = 4 * rsize - size;
12792 align = 1;
12793
12794 machine_mode mode = TYPE_MODE (type);
12795 if (abi_v4_pass_in_fpr (mode, false))
12796 {
12797 /* FP args go in FP registers, if present. */
12798 reg = fpr;
12799 n_reg = (size + 7) / 8;
12800 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12801 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12802 if (mode != SFmode && mode != SDmode)
12803 align = 8;
12804 }
12805 else
12806 {
12807 /* Otherwise into GP registers. */
12808 reg = gpr;
12809 n_reg = rsize;
12810 sav_ofs = 0;
12811 sav_scale = 4;
12812 if (n_reg == 2)
12813 align = 8;
12814 }
12815
12816 /* Pull the value out of the saved registers.... */
12817
12818 lab_over = NULL;
12819 addr = create_tmp_var (ptr_type_node, "addr");
12820
12821 /* AltiVec vectors never go in registers when -mabi=altivec. */
12822 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12823 align = 16;
12824 else
12825 {
12826 lab_false = create_artificial_label (input_location);
12827 lab_over = create_artificial_label (input_location);
12828
12829 /* Long long is aligned in the registers. As are any other 2 gpr
12830 item such as complex int due to a historical mistake. */
12831 u = reg;
12832 if (n_reg == 2 && reg == gpr)
12833 {
12834 regalign = 1;
12835 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12836 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12837 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12838 unshare_expr (reg), u);
12839 }
12840 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12841 reg number is 0 for f1, so we want to make it odd. */
12842 else if (reg == fpr && mode == TDmode)
12843 {
12844 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12845 build_int_cst (TREE_TYPE (reg), 1));
12846 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12847 }
12848
12849 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12850 t = build2 (GE_EXPR, boolean_type_node, u, t);
12851 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12852 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12853 gimplify_and_add (t, pre_p);
12854
12855 t = sav;
12856 if (sav_ofs)
12857 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12858
12859 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12860 build_int_cst (TREE_TYPE (reg), n_reg));
12861 u = fold_convert (sizetype, u);
12862 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12863 t = fold_build_pointer_plus (t, u);
12864
12865 /* _Decimal32 varargs are located in the second word of the 64-bit
12866 FP register for 32-bit binaries. */
12867 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12868 t = fold_build_pointer_plus_hwi (t, size);
12869
12870 /* Args are passed right-aligned. */
12871 if (BYTES_BIG_ENDIAN)
12872 t = fold_build_pointer_plus_hwi (t, pad);
12873
12874 gimplify_assign (addr, t, pre_p);
12875
12876 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12877
12878 stmt = gimple_build_label (lab_false);
12879 gimple_seq_add_stmt (pre_p, stmt);
12880
12881 if ((n_reg == 2 && !regalign) || n_reg > 2)
12882 {
12883 /* Ensure that we don't find any more args in regs.
12884 Alignment has taken care of for special cases. */
12885 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12886 }
12887 }
12888
12889 /* ... otherwise out of the overflow area. */
12890
12891 /* Care for on-stack alignment if needed. */
12892 t = ovf;
12893 if (align != 1)
12894 {
12895 t = fold_build_pointer_plus_hwi (t, align - 1);
12896 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12897 build_int_cst (TREE_TYPE (t), -align));
12898 }
12899
12900 /* Args are passed right-aligned. */
12901 if (BYTES_BIG_ENDIAN)
12902 t = fold_build_pointer_plus_hwi (t, pad);
12903
12904 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12905
12906 gimplify_assign (unshare_expr (addr), t, pre_p);
12907
12908 t = fold_build_pointer_plus_hwi (t, size);
12909 gimplify_assign (unshare_expr (ovf), t, pre_p);
12910
12911 if (lab_over)
12912 {
12913 stmt = gimple_build_label (lab_over);
12914 gimple_seq_add_stmt (pre_p, stmt);
12915 }
12916
12917 if (STRICT_ALIGNMENT
12918 && (TYPE_ALIGN (type)
12919 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12920 {
12921 /* The value (of type complex double, for example) may not be
12922 aligned in memory in the saved registers, so copy via a
12923 temporary. (This is the same code as used for SPARC.) */
12924 tree tmp = create_tmp_var (type, "va_arg_tmp");
12925 tree dest_addr = build_fold_addr_expr (tmp);
12926
12927 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12928 3, dest_addr, addr, size_int (rsize * 4));
12929 TREE_ADDRESSABLE (tmp) = 1;
12930
12931 gimplify_and_add (copy, pre_p);
12932 addr = dest_addr;
12933 }
12934
12935 addr = fold_convert (ptrtype, addr);
12936 return build_va_arg_indirect_ref (addr);
12937 }
12938
12939 /* Builtins. */
12940
12941 static void
12942 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12943 {
12944 tree t;
12945 unsigned classify = rs6000_builtin_info[(int)code].attr;
12946 const char *attr_string = "";
12947
12948 gcc_assert (name != NULL);
12949 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12950
12951 if (rs6000_builtin_decls[(int)code])
12952 fatal_error (input_location,
12953 "internal error: builtin function %qs already processed",
12954 name);
12955
12956 rs6000_builtin_decls[(int)code] = t =
12957 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12958
12959 /* Set any special attributes. */
12960 if ((classify & RS6000_BTC_CONST) != 0)
12961 {
12962 /* const function, function only depends on the inputs. */
12963 TREE_READONLY (t) = 1;
12964 TREE_NOTHROW (t) = 1;
12965 attr_string = ", const";
12966 }
12967 else if ((classify & RS6000_BTC_PURE) != 0)
12968 {
12969 /* pure function, function can read global memory, but does not set any
12970 external state. */
12971 DECL_PURE_P (t) = 1;
12972 TREE_NOTHROW (t) = 1;
12973 attr_string = ", pure";
12974 }
12975 else if ((classify & RS6000_BTC_FP) != 0)
12976 {
12977 /* Function is a math function. If rounding mode is on, then treat the
12978 function as not reading global memory, but it can have arbitrary side
12979 effects. If it is off, then assume the function is a const function.
12980 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12981 builtin-attribute.def that is used for the math functions. */
12982 TREE_NOTHROW (t) = 1;
12983 if (flag_rounding_math)
12984 {
12985 DECL_PURE_P (t) = 1;
12986 DECL_IS_NOVOPS (t) = 1;
12987 attr_string = ", fp, pure";
12988 }
12989 else
12990 {
12991 TREE_READONLY (t) = 1;
12992 attr_string = ", fp, const";
12993 }
12994 }
12995 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12996 gcc_unreachable ();
12997
12998 if (TARGET_DEBUG_BUILTIN)
12999 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13000 (int)code, name, attr_string);
13001 }
13002
13003 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13004
13005 #undef RS6000_BUILTIN_0
13006 #undef RS6000_BUILTIN_1
13007 #undef RS6000_BUILTIN_2
13008 #undef RS6000_BUILTIN_3
13009 #undef RS6000_BUILTIN_A
13010 #undef RS6000_BUILTIN_D
13011 #undef RS6000_BUILTIN_H
13012 #undef RS6000_BUILTIN_P
13013 #undef RS6000_BUILTIN_X
13014
13015 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13016 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13017 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13018 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13019 { MASK, ICODE, NAME, ENUM },
13020
13021 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13022 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13023 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13024 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13025 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13026
13027 static const struct builtin_description bdesc_3arg[] =
13028 {
13029 #include "rs6000-builtin.def"
13030 };
13031
13032 /* DST operations: void foo (void *, const int, const char). */
13033
13034 #undef RS6000_BUILTIN_0
13035 #undef RS6000_BUILTIN_1
13036 #undef RS6000_BUILTIN_2
13037 #undef RS6000_BUILTIN_3
13038 #undef RS6000_BUILTIN_A
13039 #undef RS6000_BUILTIN_D
13040 #undef RS6000_BUILTIN_H
13041 #undef RS6000_BUILTIN_P
13042 #undef RS6000_BUILTIN_X
13043
13044 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13045 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13046 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13047 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13048 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13049 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13050 { MASK, ICODE, NAME, ENUM },
13051
13052 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13053 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13054 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13055
13056 static const struct builtin_description bdesc_dst[] =
13057 {
13058 #include "rs6000-builtin.def"
13059 };
13060
13061 /* Simple binary operations: VECc = foo (VECa, VECb). */
13062
13063 #undef RS6000_BUILTIN_0
13064 #undef RS6000_BUILTIN_1
13065 #undef RS6000_BUILTIN_2
13066 #undef RS6000_BUILTIN_3
13067 #undef RS6000_BUILTIN_A
13068 #undef RS6000_BUILTIN_D
13069 #undef RS6000_BUILTIN_H
13070 #undef RS6000_BUILTIN_P
13071 #undef RS6000_BUILTIN_X
13072
13073 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13074 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13075 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13076 { MASK, ICODE, NAME, ENUM },
13077
13078 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13079 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13080 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13081 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13082 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13083 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13084
13085 static const struct builtin_description bdesc_2arg[] =
13086 {
13087 #include "rs6000-builtin.def"
13088 };
13089
13090 #undef RS6000_BUILTIN_0
13091 #undef RS6000_BUILTIN_1
13092 #undef RS6000_BUILTIN_2
13093 #undef RS6000_BUILTIN_3
13094 #undef RS6000_BUILTIN_A
13095 #undef RS6000_BUILTIN_D
13096 #undef RS6000_BUILTIN_H
13097 #undef RS6000_BUILTIN_P
13098 #undef RS6000_BUILTIN_X
13099
13100 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13101 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13102 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13103 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13104 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13105 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13106 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13107 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13108 { MASK, ICODE, NAME, ENUM },
13109
13110 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13111
13112 /* AltiVec predicates. */
13113
13114 static const struct builtin_description bdesc_altivec_preds[] =
13115 {
13116 #include "rs6000-builtin.def"
13117 };
13118
13119 /* ABS* operations. */
13120
13121 #undef RS6000_BUILTIN_0
13122 #undef RS6000_BUILTIN_1
13123 #undef RS6000_BUILTIN_2
13124 #undef RS6000_BUILTIN_3
13125 #undef RS6000_BUILTIN_A
13126 #undef RS6000_BUILTIN_D
13127 #undef RS6000_BUILTIN_H
13128 #undef RS6000_BUILTIN_P
13129 #undef RS6000_BUILTIN_X
13130
13131 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13132 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13133 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13134 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13135 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13136 { MASK, ICODE, NAME, ENUM },
13137
13138 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13139 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13140 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13141 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13142
13143 static const struct builtin_description bdesc_abs[] =
13144 {
13145 #include "rs6000-builtin.def"
13146 };
13147
13148 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13149 foo (VECa). */
13150
13151 #undef RS6000_BUILTIN_0
13152 #undef RS6000_BUILTIN_1
13153 #undef RS6000_BUILTIN_2
13154 #undef RS6000_BUILTIN_3
13155 #undef RS6000_BUILTIN_A
13156 #undef RS6000_BUILTIN_D
13157 #undef RS6000_BUILTIN_H
13158 #undef RS6000_BUILTIN_P
13159 #undef RS6000_BUILTIN_X
13160
13161 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13162 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13163 { MASK, ICODE, NAME, ENUM },
13164
13165 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13166 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13167 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13168 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13169 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13170 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13171 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13172
13173 static const struct builtin_description bdesc_1arg[] =
13174 {
13175 #include "rs6000-builtin.def"
13176 };
13177
13178 /* Simple no-argument operations: result = __builtin_darn_32 () */
13179
13180 #undef RS6000_BUILTIN_0
13181 #undef RS6000_BUILTIN_1
13182 #undef RS6000_BUILTIN_2
13183 #undef RS6000_BUILTIN_3
13184 #undef RS6000_BUILTIN_A
13185 #undef RS6000_BUILTIN_D
13186 #undef RS6000_BUILTIN_H
13187 #undef RS6000_BUILTIN_P
13188 #undef RS6000_BUILTIN_X
13189
13190 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13191 { MASK, ICODE, NAME, ENUM },
13192
13193 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13194 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13195 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13196 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13197 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13198 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13199 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13200 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13201
13202 static const struct builtin_description bdesc_0arg[] =
13203 {
13204 #include "rs6000-builtin.def"
13205 };
13206
13207 /* HTM builtins. */
13208 #undef RS6000_BUILTIN_0
13209 #undef RS6000_BUILTIN_1
13210 #undef RS6000_BUILTIN_2
13211 #undef RS6000_BUILTIN_3
13212 #undef RS6000_BUILTIN_A
13213 #undef RS6000_BUILTIN_D
13214 #undef RS6000_BUILTIN_H
13215 #undef RS6000_BUILTIN_P
13216 #undef RS6000_BUILTIN_X
13217
13218 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13219 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13220 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13221 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13222 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13223 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13224 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13225 { MASK, ICODE, NAME, ENUM },
13226
13227 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13228 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13229
13230 static const struct builtin_description bdesc_htm[] =
13231 {
13232 #include "rs6000-builtin.def"
13233 };
13234
13235 #undef RS6000_BUILTIN_0
13236 #undef RS6000_BUILTIN_1
13237 #undef RS6000_BUILTIN_2
13238 #undef RS6000_BUILTIN_3
13239 #undef RS6000_BUILTIN_A
13240 #undef RS6000_BUILTIN_D
13241 #undef RS6000_BUILTIN_H
13242 #undef RS6000_BUILTIN_P
13243
13244 /* Return true if a builtin function is overloaded. */
13245 bool
13246 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13247 {
13248 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13249 }
13250
13251 const char *
13252 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13253 {
13254 return rs6000_builtin_info[(int)fncode].name;
13255 }
13256
13257 /* Expand an expression EXP that calls a builtin without arguments. */
13258 static rtx
13259 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13260 {
13261 rtx pat;
13262 machine_mode tmode = insn_data[icode].operand[0].mode;
13263
13264 if (icode == CODE_FOR_nothing)
13265 /* Builtin not supported on this processor. */
13266 return 0;
13267
13268 if (icode == CODE_FOR_rs6000_mffsl
13269 && rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13270 {
13271 error ("__builtin_mffsl() not supported with -msoft-float");
13272 return const0_rtx;
13273 }
13274
13275 if (target == 0
13276 || GET_MODE (target) != tmode
13277 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13278 target = gen_reg_rtx (tmode);
13279
13280 pat = GEN_FCN (icode) (target);
13281 if (! pat)
13282 return 0;
13283 emit_insn (pat);
13284
13285 return target;
13286 }
13287
13288
13289 static rtx
13290 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13291 {
13292 rtx pat;
13293 tree arg0 = CALL_EXPR_ARG (exp, 0);
13294 tree arg1 = CALL_EXPR_ARG (exp, 1);
13295 rtx op0 = expand_normal (arg0);
13296 rtx op1 = expand_normal (arg1);
13297 machine_mode mode0 = insn_data[icode].operand[0].mode;
13298 machine_mode mode1 = insn_data[icode].operand[1].mode;
13299
13300 if (icode == CODE_FOR_nothing)
13301 /* Builtin not supported on this processor. */
13302 return 0;
13303
13304 /* If we got invalid arguments bail out before generating bad rtl. */
13305 if (arg0 == error_mark_node || arg1 == error_mark_node)
13306 return const0_rtx;
13307
13308 if (GET_CODE (op0) != CONST_INT
13309 || INTVAL (op0) > 255
13310 || INTVAL (op0) < 0)
13311 {
13312 error ("argument 1 must be an 8-bit field value");
13313 return const0_rtx;
13314 }
13315
13316 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13317 op0 = copy_to_mode_reg (mode0, op0);
13318
13319 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13320 op1 = copy_to_mode_reg (mode1, op1);
13321
13322 pat = GEN_FCN (icode) (op0, op1);
13323 if (!pat)
13324 return const0_rtx;
13325 emit_insn (pat);
13326
13327 return NULL_RTX;
13328 }
13329
13330 static rtx
13331 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13332 {
13333 rtx pat;
13334 tree arg0 = CALL_EXPR_ARG (exp, 0);
13335 rtx op0 = expand_normal (arg0);
13336
13337 if (icode == CODE_FOR_nothing)
13338 /* Builtin not supported on this processor. */
13339 return 0;
13340
13341 if (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13342 {
13343 error ("__builtin_mtfsb0 and __builtin_mtfsb1 not supported with -msoft-float");
13344 return const0_rtx;
13345 }
13346
13347 /* If we got invalid arguments bail out before generating bad rtl. */
13348 if (arg0 == error_mark_node)
13349 return const0_rtx;
13350
13351 /* Only allow bit numbers 0 to 31. */
13352 if (!u5bit_cint_operand (op0, VOIDmode))
13353 {
13354 error ("Argument must be a constant between 0 and 31.");
13355 return const0_rtx;
13356 }
13357
13358 pat = GEN_FCN (icode) (op0);
13359 if (!pat)
13360 return const0_rtx;
13361 emit_insn (pat);
13362
13363 return NULL_RTX;
13364 }
13365
13366 static rtx
13367 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13368 {
13369 rtx pat;
13370 tree arg0 = CALL_EXPR_ARG (exp, 0);
13371 rtx op0 = expand_normal (arg0);
13372 machine_mode mode0 = insn_data[icode].operand[0].mode;
13373
13374 if (icode == CODE_FOR_nothing)
13375 /* Builtin not supported on this processor. */
13376 return 0;
13377
13378 if (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13379 {
13380 error ("__builtin_set_fpscr_rn not supported with -msoft-float");
13381 return const0_rtx;
13382 }
13383
13384 /* If we got invalid arguments bail out before generating bad rtl. */
13385 if (arg0 == error_mark_node)
13386 return const0_rtx;
13387
13388 /* If the argument is a constant, check the range. Argument can only be a
13389 2-bit value. Unfortunately, can't check the range of the value at
13390 compile time if the argument is a variable. The least significant two
13391 bits of the argument, regardless of type, are used to set the rounding
13392 mode. All other bits are ignored. */
13393 if (GET_CODE (op0) == CONST_INT && !const_0_to_3_operand(op0, VOIDmode))
13394 {
13395 error ("Argument must be a value between 0 and 3.");
13396 return const0_rtx;
13397 }
13398
13399 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13400 op0 = copy_to_mode_reg (mode0, op0);
13401
13402 pat = GEN_FCN (icode) (op0);
13403 if (!pat)
13404 return const0_rtx;
13405 emit_insn (pat);
13406
13407 return NULL_RTX;
13408 }
13409 static rtx
13410 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13411 {
13412 rtx pat;
13413 tree arg0 = CALL_EXPR_ARG (exp, 0);
13414 rtx op0 = expand_normal (arg0);
13415 machine_mode mode0 = insn_data[icode].operand[0].mode;
13416
13417 if (TARGET_32BIT)
13418 /* Builtin not supported in 32-bit mode. */
13419 fatal_error (input_location,
13420 "__builtin_set_fpscr_drn is not supported in 32-bit mode.");
13421
13422 if (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13423 {
13424 error ("__builtin_set_fpscr_drn not supported with -msoft-float");
13425 return const0_rtx;
13426 }
13427
13428 if (icode == CODE_FOR_nothing)
13429 /* Builtin not supported on this processor. */
13430 return 0;
13431
13432 /* If we got invalid arguments bail out before generating bad rtl. */
13433 if (arg0 == error_mark_node)
13434 return const0_rtx;
13435
13436 /* If the argument is a constant, check the range. Agrument can only be a
13437 3-bit value. Unfortunately, can't check the range of the value at
13438 compile time if the argument is a variable. The least significant two
13439 bits of the argument, regardless of type, are used to set the rounding
13440 mode. All other bits are ignored. */
13441 if (GET_CODE (op0) == CONST_INT && !const_0_to_7_operand(op0, VOIDmode))
13442 {
13443 error ("Argument must be a value between 0 and 7.");
13444 return const0_rtx;
13445 }
13446
13447 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13448 op0 = copy_to_mode_reg (mode0, op0);
13449
13450 pat = GEN_FCN (icode) (op0);
13451 if (! pat)
13452 return const0_rtx;
13453 emit_insn (pat);
13454
13455 return NULL_RTX;
13456 }
13457
13458 static rtx
13459 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13460 {
13461 rtx pat;
13462 tree arg0 = CALL_EXPR_ARG (exp, 0);
13463 rtx op0 = expand_normal (arg0);
13464 machine_mode tmode = insn_data[icode].operand[0].mode;
13465 machine_mode mode0 = insn_data[icode].operand[1].mode;
13466
13467 if (icode == CODE_FOR_nothing)
13468 /* Builtin not supported on this processor. */
13469 return 0;
13470
13471 /* If we got invalid arguments bail out before generating bad rtl. */
13472 if (arg0 == error_mark_node)
13473 return const0_rtx;
13474
13475 if (icode == CODE_FOR_altivec_vspltisb
13476 || icode == CODE_FOR_altivec_vspltish
13477 || icode == CODE_FOR_altivec_vspltisw)
13478 {
13479 /* Only allow 5-bit *signed* literals. */
13480 if (GET_CODE (op0) != CONST_INT
13481 || INTVAL (op0) > 15
13482 || INTVAL (op0) < -16)
13483 {
13484 error ("argument 1 must be a 5-bit signed literal");
13485 return CONST0_RTX (tmode);
13486 }
13487 }
13488
13489 if (target == 0
13490 || GET_MODE (target) != tmode
13491 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13492 target = gen_reg_rtx (tmode);
13493
13494 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13495 op0 = copy_to_mode_reg (mode0, op0);
13496
13497 pat = GEN_FCN (icode) (target, op0);
13498 if (! pat)
13499 return 0;
13500 emit_insn (pat);
13501
13502 return target;
13503 }
13504
13505 static rtx
13506 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13507 {
13508 rtx pat, scratch1, scratch2;
13509 tree arg0 = CALL_EXPR_ARG (exp, 0);
13510 rtx op0 = expand_normal (arg0);
13511 machine_mode tmode = insn_data[icode].operand[0].mode;
13512 machine_mode mode0 = insn_data[icode].operand[1].mode;
13513
13514 /* If we have invalid arguments, bail out before generating bad rtl. */
13515 if (arg0 == error_mark_node)
13516 return const0_rtx;
13517
13518 if (target == 0
13519 || GET_MODE (target) != tmode
13520 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13521 target = gen_reg_rtx (tmode);
13522
13523 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13524 op0 = copy_to_mode_reg (mode0, op0);
13525
13526 scratch1 = gen_reg_rtx (mode0);
13527 scratch2 = gen_reg_rtx (mode0);
13528
13529 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13530 if (! pat)
13531 return 0;
13532 emit_insn (pat);
13533
13534 return target;
13535 }
13536
13537 static rtx
13538 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13539 {
13540 rtx pat;
13541 tree arg0 = CALL_EXPR_ARG (exp, 0);
13542 tree arg1 = CALL_EXPR_ARG (exp, 1);
13543 rtx op0 = expand_normal (arg0);
13544 rtx op1 = expand_normal (arg1);
13545 machine_mode tmode = insn_data[icode].operand[0].mode;
13546 machine_mode mode0 = insn_data[icode].operand[1].mode;
13547 machine_mode mode1 = insn_data[icode].operand[2].mode;
13548
13549 if (icode == CODE_FOR_nothing)
13550 /* Builtin not supported on this processor. */
13551 return 0;
13552
13553 /* If we got invalid arguments bail out before generating bad rtl. */
13554 if (arg0 == error_mark_node || arg1 == error_mark_node)
13555 return const0_rtx;
13556
13557 if (icode == CODE_FOR_unpackv1ti
13558 || icode == CODE_FOR_unpackkf
13559 || icode == CODE_FOR_unpacktf
13560 || icode == CODE_FOR_unpackif
13561 || icode == CODE_FOR_unpacktd)
13562 {
13563 /* Only allow 1-bit unsigned literals. */
13564 STRIP_NOPS (arg1);
13565 if (TREE_CODE (arg1) != INTEGER_CST
13566 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13567 {
13568 error ("argument 2 must be a 1-bit unsigned literal");
13569 return CONST0_RTX (tmode);
13570 }
13571 }
13572 else if (icode == CODE_FOR_altivec_vspltw)
13573 {
13574 /* Only allow 2-bit unsigned literals. */
13575 STRIP_NOPS (arg1);
13576 if (TREE_CODE (arg1) != INTEGER_CST
13577 || TREE_INT_CST_LOW (arg1) & ~3)
13578 {
13579 error ("argument 2 must be a 2-bit unsigned literal");
13580 return CONST0_RTX (tmode);
13581 }
13582 }
13583 else if (icode == CODE_FOR_altivec_vsplth)
13584 {
13585 /* Only allow 3-bit unsigned literals. */
13586 STRIP_NOPS (arg1);
13587 if (TREE_CODE (arg1) != INTEGER_CST
13588 || TREE_INT_CST_LOW (arg1) & ~7)
13589 {
13590 error ("argument 2 must be a 3-bit unsigned literal");
13591 return CONST0_RTX (tmode);
13592 }
13593 }
13594 else if (icode == CODE_FOR_altivec_vspltb)
13595 {
13596 /* Only allow 4-bit unsigned literals. */
13597 STRIP_NOPS (arg1);
13598 if (TREE_CODE (arg1) != INTEGER_CST
13599 || TREE_INT_CST_LOW (arg1) & ~15)
13600 {
13601 error ("argument 2 must be a 4-bit unsigned literal");
13602 return CONST0_RTX (tmode);
13603 }
13604 }
13605 else if (icode == CODE_FOR_altivec_vcfux
13606 || icode == CODE_FOR_altivec_vcfsx
13607 || icode == CODE_FOR_altivec_vctsxs
13608 || icode == CODE_FOR_altivec_vctuxs)
13609 {
13610 /* Only allow 5-bit unsigned literals. */
13611 STRIP_NOPS (arg1);
13612 if (TREE_CODE (arg1) != INTEGER_CST
13613 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13614 {
13615 error ("argument 2 must be a 5-bit unsigned literal");
13616 return CONST0_RTX (tmode);
13617 }
13618 }
13619 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13620 || icode == CODE_FOR_dfptstsfi_lt_dd
13621 || icode == CODE_FOR_dfptstsfi_gt_dd
13622 || icode == CODE_FOR_dfptstsfi_unordered_dd
13623 || icode == CODE_FOR_dfptstsfi_eq_td
13624 || icode == CODE_FOR_dfptstsfi_lt_td
13625 || icode == CODE_FOR_dfptstsfi_gt_td
13626 || icode == CODE_FOR_dfptstsfi_unordered_td)
13627 {
13628 /* Only allow 6-bit unsigned literals. */
13629 STRIP_NOPS (arg0);
13630 if (TREE_CODE (arg0) != INTEGER_CST
13631 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13632 {
13633 error ("argument 1 must be a 6-bit unsigned literal");
13634 return CONST0_RTX (tmode);
13635 }
13636 }
13637 else if (icode == CODE_FOR_xststdcqp_kf
13638 || icode == CODE_FOR_xststdcqp_tf
13639 || icode == CODE_FOR_xststdcdp
13640 || icode == CODE_FOR_xststdcsp
13641 || icode == CODE_FOR_xvtstdcdp
13642 || icode == CODE_FOR_xvtstdcsp)
13643 {
13644 /* Only allow 7-bit unsigned literals. */
13645 STRIP_NOPS (arg1);
13646 if (TREE_CODE (arg1) != INTEGER_CST
13647 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13648 {
13649 error ("argument 2 must be a 7-bit unsigned literal");
13650 return CONST0_RTX (tmode);
13651 }
13652 }
13653
13654 if (target == 0
13655 || GET_MODE (target) != tmode
13656 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13657 target = gen_reg_rtx (tmode);
13658
13659 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13660 op0 = copy_to_mode_reg (mode0, op0);
13661 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13662 op1 = copy_to_mode_reg (mode1, op1);
13663
13664 pat = GEN_FCN (icode) (target, op0, op1);
13665 if (! pat)
13666 return 0;
13667 emit_insn (pat);
13668
13669 return target;
13670 }
13671
13672 static rtx
13673 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13674 {
13675 rtx pat, scratch;
13676 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13677 tree arg0 = CALL_EXPR_ARG (exp, 1);
13678 tree arg1 = CALL_EXPR_ARG (exp, 2);
13679 rtx op0 = expand_normal (arg0);
13680 rtx op1 = expand_normal (arg1);
13681 machine_mode tmode = SImode;
13682 machine_mode mode0 = insn_data[icode].operand[1].mode;
13683 machine_mode mode1 = insn_data[icode].operand[2].mode;
13684 int cr6_form_int;
13685
13686 if (TREE_CODE (cr6_form) != INTEGER_CST)
13687 {
13688 error ("argument 1 of %qs must be a constant",
13689 "__builtin_altivec_predicate");
13690 return const0_rtx;
13691 }
13692 else
13693 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13694
13695 gcc_assert (mode0 == mode1);
13696
13697 /* If we have invalid arguments, bail out before generating bad rtl. */
13698 if (arg0 == error_mark_node || arg1 == error_mark_node)
13699 return const0_rtx;
13700
13701 if (target == 0
13702 || GET_MODE (target) != tmode
13703 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13704 target = gen_reg_rtx (tmode);
13705
13706 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13707 op0 = copy_to_mode_reg (mode0, op0);
13708 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13709 op1 = copy_to_mode_reg (mode1, op1);
13710
13711 /* Note that for many of the relevant operations (e.g. cmpne or
13712 cmpeq) with float or double operands, it makes more sense for the
13713 mode of the allocated scratch register to select a vector of
13714 integer. But the choice to copy the mode of operand 0 was made
13715 long ago and there are no plans to change it. */
13716 scratch = gen_reg_rtx (mode0);
13717
13718 pat = GEN_FCN (icode) (scratch, op0, op1);
13719 if (! pat)
13720 return 0;
13721 emit_insn (pat);
13722
13723 /* The vec_any* and vec_all* predicates use the same opcodes for two
13724 different operations, but the bits in CR6 will be different
13725 depending on what information we want. So we have to play tricks
13726 with CR6 to get the right bits out.
13727
13728 If you think this is disgusting, look at the specs for the
13729 AltiVec predicates. */
13730
13731 switch (cr6_form_int)
13732 {
13733 case 0:
13734 emit_insn (gen_cr6_test_for_zero (target));
13735 break;
13736 case 1:
13737 emit_insn (gen_cr6_test_for_zero_reverse (target));
13738 break;
13739 case 2:
13740 emit_insn (gen_cr6_test_for_lt (target));
13741 break;
13742 case 3:
13743 emit_insn (gen_cr6_test_for_lt_reverse (target));
13744 break;
13745 default:
13746 error ("argument 1 of %qs is out of range",
13747 "__builtin_altivec_predicate");
13748 break;
13749 }
13750
13751 return target;
13752 }
13753
13754 rtx
13755 swap_endian_selector_for_mode (machine_mode mode)
13756 {
13757 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13758 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13759 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13760 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13761
13762 unsigned int *swaparray, i;
13763 rtx perm[16];
13764
13765 switch (mode)
13766 {
13767 case E_V1TImode:
13768 swaparray = swap1;
13769 break;
13770 case E_V2DFmode:
13771 case E_V2DImode:
13772 swaparray = swap2;
13773 break;
13774 case E_V4SFmode:
13775 case E_V4SImode:
13776 swaparray = swap4;
13777 break;
13778 case E_V8HImode:
13779 swaparray = swap8;
13780 break;
13781 default:
13782 gcc_unreachable ();
13783 }
13784
13785 for (i = 0; i < 16; ++i)
13786 perm[i] = GEN_INT (swaparray[i]);
13787
13788 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13789 gen_rtvec_v (16, perm)));
13790 }
13791
13792 static rtx
13793 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13794 {
13795 rtx pat, addr;
13796 tree arg0 = CALL_EXPR_ARG (exp, 0);
13797 tree arg1 = CALL_EXPR_ARG (exp, 1);
13798 machine_mode tmode = insn_data[icode].operand[0].mode;
13799 machine_mode mode0 = Pmode;
13800 machine_mode mode1 = Pmode;
13801 rtx op0 = expand_normal (arg0);
13802 rtx op1 = expand_normal (arg1);
13803
13804 if (icode == CODE_FOR_nothing)
13805 /* Builtin not supported on this processor. */
13806 return 0;
13807
13808 /* If we got invalid arguments bail out before generating bad rtl. */
13809 if (arg0 == error_mark_node || arg1 == error_mark_node)
13810 return const0_rtx;
13811
13812 if (target == 0
13813 || GET_MODE (target) != tmode
13814 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13815 target = gen_reg_rtx (tmode);
13816
13817 op1 = copy_to_mode_reg (mode1, op1);
13818
13819 /* For LVX, express the RTL accurately by ANDing the address with -16.
13820 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13821 so the raw address is fine. */
13822 if (icode == CODE_FOR_altivec_lvx_v1ti
13823 || icode == CODE_FOR_altivec_lvx_v2df
13824 || icode == CODE_FOR_altivec_lvx_v2di
13825 || icode == CODE_FOR_altivec_lvx_v4sf
13826 || icode == CODE_FOR_altivec_lvx_v4si
13827 || icode == CODE_FOR_altivec_lvx_v8hi
13828 || icode == CODE_FOR_altivec_lvx_v16qi)
13829 {
13830 rtx rawaddr;
13831 if (op0 == const0_rtx)
13832 rawaddr = op1;
13833 else
13834 {
13835 op0 = copy_to_mode_reg (mode0, op0);
13836 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13837 }
13838 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13839 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13840
13841 emit_insn (gen_rtx_SET (target, addr));
13842 }
13843 else
13844 {
13845 if (op0 == const0_rtx)
13846 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13847 else
13848 {
13849 op0 = copy_to_mode_reg (mode0, op0);
13850 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13851 gen_rtx_PLUS (Pmode, op1, op0));
13852 }
13853
13854 pat = GEN_FCN (icode) (target, addr);
13855 if (! pat)
13856 return 0;
13857 emit_insn (pat);
13858 }
13859
13860 return target;
13861 }
13862
13863 static rtx
13864 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13865 {
13866 rtx pat;
13867 tree arg0 = CALL_EXPR_ARG (exp, 0);
13868 tree arg1 = CALL_EXPR_ARG (exp, 1);
13869 tree arg2 = CALL_EXPR_ARG (exp, 2);
13870 rtx op0 = expand_normal (arg0);
13871 rtx op1 = expand_normal (arg1);
13872 rtx op2 = expand_normal (arg2);
13873 machine_mode mode0 = insn_data[icode].operand[0].mode;
13874 machine_mode mode1 = insn_data[icode].operand[1].mode;
13875 machine_mode mode2 = insn_data[icode].operand[2].mode;
13876
13877 if (icode == CODE_FOR_nothing)
13878 /* Builtin not supported on this processor. */
13879 return NULL_RTX;
13880
13881 /* If we got invalid arguments bail out before generating bad rtl. */
13882 if (arg0 == error_mark_node
13883 || arg1 == error_mark_node
13884 || arg2 == error_mark_node)
13885 return NULL_RTX;
13886
13887 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13888 op0 = copy_to_mode_reg (mode0, op0);
13889 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13890 op1 = copy_to_mode_reg (mode1, op1);
13891 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13892 op2 = copy_to_mode_reg (mode2, op2);
13893
13894 pat = GEN_FCN (icode) (op0, op1, op2);
13895 if (pat)
13896 emit_insn (pat);
13897
13898 return NULL_RTX;
13899 }
13900
13901 static rtx
13902 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13903 {
13904 tree arg0 = CALL_EXPR_ARG (exp, 0);
13905 tree arg1 = CALL_EXPR_ARG (exp, 1);
13906 tree arg2 = CALL_EXPR_ARG (exp, 2);
13907 rtx op0 = expand_normal (arg0);
13908 rtx op1 = expand_normal (arg1);
13909 rtx op2 = expand_normal (arg2);
13910 rtx pat, addr, rawaddr;
13911 machine_mode tmode = insn_data[icode].operand[0].mode;
13912 machine_mode smode = insn_data[icode].operand[1].mode;
13913 machine_mode mode1 = Pmode;
13914 machine_mode mode2 = Pmode;
13915
13916 /* Invalid arguments. Bail before doing anything stoopid! */
13917 if (arg0 == error_mark_node
13918 || arg1 == error_mark_node
13919 || arg2 == error_mark_node)
13920 return const0_rtx;
13921
13922 op2 = copy_to_mode_reg (mode2, op2);
13923
13924 /* For STVX, express the RTL accurately by ANDing the address with -16.
13925 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13926 so the raw address is fine. */
13927 if (icode == CODE_FOR_altivec_stvx_v2df
13928 || icode == CODE_FOR_altivec_stvx_v2di
13929 || icode == CODE_FOR_altivec_stvx_v4sf
13930 || icode == CODE_FOR_altivec_stvx_v4si
13931 || icode == CODE_FOR_altivec_stvx_v8hi
13932 || icode == CODE_FOR_altivec_stvx_v16qi)
13933 {
13934 if (op1 == const0_rtx)
13935 rawaddr = op2;
13936 else
13937 {
13938 op1 = copy_to_mode_reg (mode1, op1);
13939 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13940 }
13941
13942 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13943 addr = gen_rtx_MEM (tmode, addr);
13944
13945 op0 = copy_to_mode_reg (tmode, op0);
13946
13947 emit_insn (gen_rtx_SET (addr, op0));
13948 }
13949 else
13950 {
13951 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13952 op0 = copy_to_mode_reg (smode, op0);
13953
13954 if (op1 == const0_rtx)
13955 addr = gen_rtx_MEM (tmode, op2);
13956 else
13957 {
13958 op1 = copy_to_mode_reg (mode1, op1);
13959 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13960 }
13961
13962 pat = GEN_FCN (icode) (addr, op0);
13963 if (pat)
13964 emit_insn (pat);
13965 }
13966
13967 return NULL_RTX;
13968 }
13969
13970 /* Return the appropriate SPR number associated with the given builtin. */
13971 static inline HOST_WIDE_INT
13972 htm_spr_num (enum rs6000_builtins code)
13973 {
13974 if (code == HTM_BUILTIN_GET_TFHAR
13975 || code == HTM_BUILTIN_SET_TFHAR)
13976 return TFHAR_SPR;
13977 else if (code == HTM_BUILTIN_GET_TFIAR
13978 || code == HTM_BUILTIN_SET_TFIAR)
13979 return TFIAR_SPR;
13980 else if (code == HTM_BUILTIN_GET_TEXASR
13981 || code == HTM_BUILTIN_SET_TEXASR)
13982 return TEXASR_SPR;
13983 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13984 || code == HTM_BUILTIN_SET_TEXASRU);
13985 return TEXASRU_SPR;
13986 }
13987
13988 /* Return the appropriate SPR regno associated with the given builtin. */
13989 static inline HOST_WIDE_INT
13990 htm_spr_regno (enum rs6000_builtins code)
13991 {
13992 if (code == HTM_BUILTIN_GET_TFHAR
13993 || code == HTM_BUILTIN_SET_TFHAR)
13994 return TFHAR_REGNO;
13995 else if (code == HTM_BUILTIN_GET_TFIAR
13996 || code == HTM_BUILTIN_SET_TFIAR)
13997 return TFIAR_REGNO;
13998 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
13999 || code == HTM_BUILTIN_SET_TEXASR
14000 || code == HTM_BUILTIN_GET_TEXASRU
14001 || code == HTM_BUILTIN_SET_TEXASRU);
14002 return TEXASR_REGNO;
14003 }
14004
14005 /* Return the correct ICODE value depending on whether we are
14006 setting or reading the HTM SPRs. */
14007 static inline enum insn_code
14008 rs6000_htm_spr_icode (bool nonvoid)
14009 {
14010 if (nonvoid)
14011 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14012 else
14013 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14014 }
14015
14016 /* Expand the HTM builtin in EXP and store the result in TARGET.
14017 Store true in *EXPANDEDP if we found a builtin to expand. */
14018 static rtx
14019 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14020 {
14021 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14022 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14023 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14024 const struct builtin_description *d;
14025 size_t i;
14026
14027 *expandedp = true;
14028
14029 if (!TARGET_POWERPC64
14030 && (fcode == HTM_BUILTIN_TABORTDC
14031 || fcode == HTM_BUILTIN_TABORTDCI))
14032 {
14033 size_t uns_fcode = (size_t)fcode;
14034 const char *name = rs6000_builtin_info[uns_fcode].name;
14035 error ("builtin %qs is only valid in 64-bit mode", name);
14036 return const0_rtx;
14037 }
14038
14039 /* Expand the HTM builtins. */
14040 d = bdesc_htm;
14041 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14042 if (d->code == fcode)
14043 {
14044 rtx op[MAX_HTM_OPERANDS], pat;
14045 int nopnds = 0;
14046 tree arg;
14047 call_expr_arg_iterator iter;
14048 unsigned attr = rs6000_builtin_info[fcode].attr;
14049 enum insn_code icode = d->icode;
14050 const struct insn_operand_data *insn_op;
14051 bool uses_spr = (attr & RS6000_BTC_SPR);
14052 rtx cr = NULL_RTX;
14053
14054 if (uses_spr)
14055 icode = rs6000_htm_spr_icode (nonvoid);
14056 insn_op = &insn_data[icode].operand[0];
14057
14058 if (nonvoid)
14059 {
14060 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14061 if (!target
14062 || GET_MODE (target) != tmode
14063 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14064 target = gen_reg_rtx (tmode);
14065 if (uses_spr)
14066 op[nopnds++] = target;
14067 }
14068
14069 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14070 {
14071 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14072 return const0_rtx;
14073
14074 insn_op = &insn_data[icode].operand[nopnds];
14075
14076 op[nopnds] = expand_normal (arg);
14077
14078 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14079 {
14080 if (!strcmp (insn_op->constraint, "n"))
14081 {
14082 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14083 if (!CONST_INT_P (op[nopnds]))
14084 error ("argument %d must be an unsigned literal", arg_num);
14085 else
14086 error ("argument %d is an unsigned literal that is "
14087 "out of range", arg_num);
14088 return const0_rtx;
14089 }
14090 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14091 }
14092
14093 nopnds++;
14094 }
14095
14096 /* Handle the builtins for extended mnemonics. These accept
14097 no arguments, but map to builtins that take arguments. */
14098 switch (fcode)
14099 {
14100 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14101 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14102 op[nopnds++] = GEN_INT (1);
14103 if (flag_checking)
14104 attr |= RS6000_BTC_UNARY;
14105 break;
14106 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14107 op[nopnds++] = GEN_INT (0);
14108 if (flag_checking)
14109 attr |= RS6000_BTC_UNARY;
14110 break;
14111 default:
14112 break;
14113 }
14114
14115 /* If this builtin accesses SPRs, then pass in the appropriate
14116 SPR number and SPR regno as the last two operands. */
14117 if (uses_spr)
14118 {
14119 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14120 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14121 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14122 }
14123 /* If this builtin accesses a CR, then pass in a scratch
14124 CR as the last operand. */
14125 else if (attr & RS6000_BTC_CR)
14126 { cr = gen_reg_rtx (CCmode);
14127 op[nopnds++] = cr;
14128 }
14129
14130 if (flag_checking)
14131 {
14132 int expected_nopnds = 0;
14133 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14134 expected_nopnds = 1;
14135 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14136 expected_nopnds = 2;
14137 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14138 expected_nopnds = 3;
14139 if (!(attr & RS6000_BTC_VOID))
14140 expected_nopnds += 1;
14141 if (uses_spr)
14142 expected_nopnds += 2;
14143
14144 gcc_assert (nopnds == expected_nopnds
14145 && nopnds <= MAX_HTM_OPERANDS);
14146 }
14147
14148 switch (nopnds)
14149 {
14150 case 1:
14151 pat = GEN_FCN (icode) (op[0]);
14152 break;
14153 case 2:
14154 pat = GEN_FCN (icode) (op[0], op[1]);
14155 break;
14156 case 3:
14157 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14158 break;
14159 case 4:
14160 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14161 break;
14162 default:
14163 gcc_unreachable ();
14164 }
14165 if (!pat)
14166 return NULL_RTX;
14167 emit_insn (pat);
14168
14169 if (attr & RS6000_BTC_CR)
14170 {
14171 if (fcode == HTM_BUILTIN_TBEGIN)
14172 {
14173 /* Emit code to set TARGET to true or false depending on
14174 whether the tbegin. instruction successfully or failed
14175 to start a transaction. We do this by placing the 1's
14176 complement of CR's EQ bit into TARGET. */
14177 rtx scratch = gen_reg_rtx (SImode);
14178 emit_insn (gen_rtx_SET (scratch,
14179 gen_rtx_EQ (SImode, cr,
14180 const0_rtx)));
14181 emit_insn (gen_rtx_SET (target,
14182 gen_rtx_XOR (SImode, scratch,
14183 GEN_INT (1))));
14184 }
14185 else
14186 {
14187 /* Emit code to copy the 4-bit condition register field
14188 CR into the least significant end of register TARGET. */
14189 rtx scratch1 = gen_reg_rtx (SImode);
14190 rtx scratch2 = gen_reg_rtx (SImode);
14191 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14192 emit_insn (gen_movcc (subreg, cr));
14193 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14194 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14195 }
14196 }
14197
14198 if (nonvoid)
14199 return target;
14200 return const0_rtx;
14201 }
14202
14203 *expandedp = false;
14204 return NULL_RTX;
14205 }
14206
14207 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14208
14209 static rtx
14210 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14211 rtx target)
14212 {
14213 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14214 if (fcode == RS6000_BUILTIN_CPU_INIT)
14215 return const0_rtx;
14216
14217 if (target == 0 || GET_MODE (target) != SImode)
14218 target = gen_reg_rtx (SImode);
14219
14220 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14221 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14222 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14223 to a STRING_CST. */
14224 if (TREE_CODE (arg) == ARRAY_REF
14225 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14226 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14227 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14228 arg = TREE_OPERAND (arg, 0);
14229
14230 if (TREE_CODE (arg) != STRING_CST)
14231 {
14232 error ("builtin %qs only accepts a string argument",
14233 rs6000_builtin_info[(size_t) fcode].name);
14234 return const0_rtx;
14235 }
14236
14237 if (fcode == RS6000_BUILTIN_CPU_IS)
14238 {
14239 const char *cpu = TREE_STRING_POINTER (arg);
14240 rtx cpuid = NULL_RTX;
14241 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14242 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14243 {
14244 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14245 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14246 break;
14247 }
14248 if (cpuid == NULL_RTX)
14249 {
14250 /* Invalid CPU argument. */
14251 error ("cpu %qs is an invalid argument to builtin %qs",
14252 cpu, rs6000_builtin_info[(size_t) fcode].name);
14253 return const0_rtx;
14254 }
14255
14256 rtx platform = gen_reg_rtx (SImode);
14257 rtx tcbmem = gen_const_mem (SImode,
14258 gen_rtx_PLUS (Pmode,
14259 gen_rtx_REG (Pmode, TLS_REGNUM),
14260 GEN_INT (TCB_PLATFORM_OFFSET)));
14261 emit_move_insn (platform, tcbmem);
14262 emit_insn (gen_eqsi3 (target, platform, cpuid));
14263 }
14264 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14265 {
14266 const char *hwcap = TREE_STRING_POINTER (arg);
14267 rtx mask = NULL_RTX;
14268 int hwcap_offset;
14269 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14270 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14271 {
14272 mask = GEN_INT (cpu_supports_info[i].mask);
14273 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14274 break;
14275 }
14276 if (mask == NULL_RTX)
14277 {
14278 /* Invalid HWCAP argument. */
14279 error ("%s %qs is an invalid argument to builtin %qs",
14280 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14281 return const0_rtx;
14282 }
14283
14284 rtx tcb_hwcap = gen_reg_rtx (SImode);
14285 rtx tcbmem = gen_const_mem (SImode,
14286 gen_rtx_PLUS (Pmode,
14287 gen_rtx_REG (Pmode, TLS_REGNUM),
14288 GEN_INT (hwcap_offset)));
14289 emit_move_insn (tcb_hwcap, tcbmem);
14290 rtx scratch1 = gen_reg_rtx (SImode);
14291 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14292 rtx scratch2 = gen_reg_rtx (SImode);
14293 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14294 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14295 }
14296 else
14297 gcc_unreachable ();
14298
14299 /* Record that we have expanded a CPU builtin, so that we can later
14300 emit a reference to the special symbol exported by LIBC to ensure we
14301 do not link against an old LIBC that doesn't support this feature. */
14302 cpu_builtin_p = true;
14303
14304 #else
14305 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14306 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14307
14308 /* For old LIBCs, always return FALSE. */
14309 emit_move_insn (target, GEN_INT (0));
14310 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14311
14312 return target;
14313 }
14314
14315 static rtx
14316 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14317 {
14318 rtx pat;
14319 tree arg0 = CALL_EXPR_ARG (exp, 0);
14320 tree arg1 = CALL_EXPR_ARG (exp, 1);
14321 tree arg2 = CALL_EXPR_ARG (exp, 2);
14322 rtx op0 = expand_normal (arg0);
14323 rtx op1 = expand_normal (arg1);
14324 rtx op2 = expand_normal (arg2);
14325 machine_mode tmode = insn_data[icode].operand[0].mode;
14326 machine_mode mode0 = insn_data[icode].operand[1].mode;
14327 machine_mode mode1 = insn_data[icode].operand[2].mode;
14328 machine_mode mode2 = insn_data[icode].operand[3].mode;
14329
14330 if (icode == CODE_FOR_nothing)
14331 /* Builtin not supported on this processor. */
14332 return 0;
14333
14334 /* If we got invalid arguments bail out before generating bad rtl. */
14335 if (arg0 == error_mark_node
14336 || arg1 == error_mark_node
14337 || arg2 == error_mark_node)
14338 return const0_rtx;
14339
14340 /* Check and prepare argument depending on the instruction code.
14341
14342 Note that a switch statement instead of the sequence of tests
14343 would be incorrect as many of the CODE_FOR values could be
14344 CODE_FOR_nothing and that would yield multiple alternatives
14345 with identical values. We'd never reach here at runtime in
14346 this case. */
14347 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14348 || icode == CODE_FOR_altivec_vsldoi_v2df
14349 || icode == CODE_FOR_altivec_vsldoi_v4si
14350 || icode == CODE_FOR_altivec_vsldoi_v8hi
14351 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14352 {
14353 /* Only allow 4-bit unsigned literals. */
14354 STRIP_NOPS (arg2);
14355 if (TREE_CODE (arg2) != INTEGER_CST
14356 || TREE_INT_CST_LOW (arg2) & ~0xf)
14357 {
14358 error ("argument 3 must be a 4-bit unsigned literal");
14359 return CONST0_RTX (tmode);
14360 }
14361 }
14362 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14363 || icode == CODE_FOR_vsx_xxpermdi_v2di
14364 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14365 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14366 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14367 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14368 || icode == CODE_FOR_vsx_xxpermdi_v4si
14369 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14370 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14371 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14372 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14373 || icode == CODE_FOR_vsx_xxsldwi_v4si
14374 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14375 || icode == CODE_FOR_vsx_xxsldwi_v2di
14376 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14377 {
14378 /* Only allow 2-bit unsigned literals. */
14379 STRIP_NOPS (arg2);
14380 if (TREE_CODE (arg2) != INTEGER_CST
14381 || TREE_INT_CST_LOW (arg2) & ~0x3)
14382 {
14383 error ("argument 3 must be a 2-bit unsigned literal");
14384 return CONST0_RTX (tmode);
14385 }
14386 }
14387 else if (icode == CODE_FOR_vsx_set_v2df
14388 || icode == CODE_FOR_vsx_set_v2di
14389 || icode == CODE_FOR_bcdadd
14390 || icode == CODE_FOR_bcdadd_lt
14391 || icode == CODE_FOR_bcdadd_eq
14392 || icode == CODE_FOR_bcdadd_gt
14393 || icode == CODE_FOR_bcdsub
14394 || icode == CODE_FOR_bcdsub_lt
14395 || icode == CODE_FOR_bcdsub_eq
14396 || icode == CODE_FOR_bcdsub_gt)
14397 {
14398 /* Only allow 1-bit unsigned literals. */
14399 STRIP_NOPS (arg2);
14400 if (TREE_CODE (arg2) != INTEGER_CST
14401 || TREE_INT_CST_LOW (arg2) & ~0x1)
14402 {
14403 error ("argument 3 must be a 1-bit unsigned literal");
14404 return CONST0_RTX (tmode);
14405 }
14406 }
14407 else if (icode == CODE_FOR_dfp_ddedpd_dd
14408 || icode == CODE_FOR_dfp_ddedpd_td)
14409 {
14410 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14411 STRIP_NOPS (arg0);
14412 if (TREE_CODE (arg0) != INTEGER_CST
14413 || TREE_INT_CST_LOW (arg2) & ~0x3)
14414 {
14415 error ("argument 1 must be 0 or 2");
14416 return CONST0_RTX (tmode);
14417 }
14418 }
14419 else if (icode == CODE_FOR_dfp_denbcd_dd
14420 || icode == CODE_FOR_dfp_denbcd_td)
14421 {
14422 /* Only allow 1-bit unsigned literals. */
14423 STRIP_NOPS (arg0);
14424 if (TREE_CODE (arg0) != INTEGER_CST
14425 || TREE_INT_CST_LOW (arg0) & ~0x1)
14426 {
14427 error ("argument 1 must be a 1-bit unsigned literal");
14428 return CONST0_RTX (tmode);
14429 }
14430 }
14431 else if (icode == CODE_FOR_dfp_dscli_dd
14432 || icode == CODE_FOR_dfp_dscli_td
14433 || icode == CODE_FOR_dfp_dscri_dd
14434 || icode == CODE_FOR_dfp_dscri_td)
14435 {
14436 /* Only allow 6-bit unsigned literals. */
14437 STRIP_NOPS (arg1);
14438 if (TREE_CODE (arg1) != INTEGER_CST
14439 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14440 {
14441 error ("argument 2 must be a 6-bit unsigned literal");
14442 return CONST0_RTX (tmode);
14443 }
14444 }
14445 else if (icode == CODE_FOR_crypto_vshasigmaw
14446 || icode == CODE_FOR_crypto_vshasigmad)
14447 {
14448 /* Check whether the 2nd and 3rd arguments are integer constants and in
14449 range and prepare arguments. */
14450 STRIP_NOPS (arg1);
14451 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14452 {
14453 error ("argument 2 must be 0 or 1");
14454 return CONST0_RTX (tmode);
14455 }
14456
14457 STRIP_NOPS (arg2);
14458 if (TREE_CODE (arg2) != INTEGER_CST
14459 || wi::geu_p (wi::to_wide (arg2), 16))
14460 {
14461 error ("argument 3 must be in the range 0..15");
14462 return CONST0_RTX (tmode);
14463 }
14464 }
14465
14466 if (target == 0
14467 || GET_MODE (target) != tmode
14468 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14469 target = gen_reg_rtx (tmode);
14470
14471 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14472 op0 = copy_to_mode_reg (mode0, op0);
14473 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14474 op1 = copy_to_mode_reg (mode1, op1);
14475 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14476 op2 = copy_to_mode_reg (mode2, op2);
14477
14478 pat = GEN_FCN (icode) (target, op0, op1, op2);
14479 if (! pat)
14480 return 0;
14481 emit_insn (pat);
14482
14483 return target;
14484 }
14485
14486
14487 /* Expand the dst builtins. */
14488 static rtx
14489 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14490 bool *expandedp)
14491 {
14492 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14493 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14494 tree arg0, arg1, arg2;
14495 machine_mode mode0, mode1;
14496 rtx pat, op0, op1, op2;
14497 const struct builtin_description *d;
14498 size_t i;
14499
14500 *expandedp = false;
14501
14502 /* Handle DST variants. */
14503 d = bdesc_dst;
14504 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14505 if (d->code == fcode)
14506 {
14507 arg0 = CALL_EXPR_ARG (exp, 0);
14508 arg1 = CALL_EXPR_ARG (exp, 1);
14509 arg2 = CALL_EXPR_ARG (exp, 2);
14510 op0 = expand_normal (arg0);
14511 op1 = expand_normal (arg1);
14512 op2 = expand_normal (arg2);
14513 mode0 = insn_data[d->icode].operand[0].mode;
14514 mode1 = insn_data[d->icode].operand[1].mode;
14515
14516 /* Invalid arguments, bail out before generating bad rtl. */
14517 if (arg0 == error_mark_node
14518 || arg1 == error_mark_node
14519 || arg2 == error_mark_node)
14520 return const0_rtx;
14521
14522 *expandedp = true;
14523 STRIP_NOPS (arg2);
14524 if (TREE_CODE (arg2) != INTEGER_CST
14525 || TREE_INT_CST_LOW (arg2) & ~0x3)
14526 {
14527 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14528 return const0_rtx;
14529 }
14530
14531 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14532 op0 = copy_to_mode_reg (Pmode, op0);
14533 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14534 op1 = copy_to_mode_reg (mode1, op1);
14535
14536 pat = GEN_FCN (d->icode) (op0, op1, op2);
14537 if (pat != 0)
14538 emit_insn (pat);
14539
14540 return NULL_RTX;
14541 }
14542
14543 return NULL_RTX;
14544 }
14545
14546 /* Expand vec_init builtin. */
14547 static rtx
14548 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14549 {
14550 machine_mode tmode = TYPE_MODE (type);
14551 machine_mode inner_mode = GET_MODE_INNER (tmode);
14552 int i, n_elt = GET_MODE_NUNITS (tmode);
14553
14554 gcc_assert (VECTOR_MODE_P (tmode));
14555 gcc_assert (n_elt == call_expr_nargs (exp));
14556
14557 if (!target || !register_operand (target, tmode))
14558 target = gen_reg_rtx (tmode);
14559
14560 /* If we have a vector compromised of a single element, such as V1TImode, do
14561 the initialization directly. */
14562 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14563 {
14564 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14565 emit_move_insn (target, gen_lowpart (tmode, x));
14566 }
14567 else
14568 {
14569 rtvec v = rtvec_alloc (n_elt);
14570
14571 for (i = 0; i < n_elt; ++i)
14572 {
14573 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14574 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14575 }
14576
14577 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14578 }
14579
14580 return target;
14581 }
14582
14583 /* Return the integer constant in ARG. Constrain it to be in the range
14584 of the subparts of VEC_TYPE; issue an error if not. */
14585
14586 static int
14587 get_element_number (tree vec_type, tree arg)
14588 {
14589 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14590
14591 if (!tree_fits_uhwi_p (arg)
14592 || (elt = tree_to_uhwi (arg), elt > max))
14593 {
14594 error ("selector must be an integer constant in the range 0..%wi", max);
14595 return 0;
14596 }
14597
14598 return elt;
14599 }
14600
14601 /* Expand vec_set builtin. */
14602 static rtx
14603 altivec_expand_vec_set_builtin (tree exp)
14604 {
14605 machine_mode tmode, mode1;
14606 tree arg0, arg1, arg2;
14607 int elt;
14608 rtx op0, op1;
14609
14610 arg0 = CALL_EXPR_ARG (exp, 0);
14611 arg1 = CALL_EXPR_ARG (exp, 1);
14612 arg2 = CALL_EXPR_ARG (exp, 2);
14613
14614 tmode = TYPE_MODE (TREE_TYPE (arg0));
14615 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14616 gcc_assert (VECTOR_MODE_P (tmode));
14617
14618 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14619 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14620 elt = get_element_number (TREE_TYPE (arg0), arg2);
14621
14622 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14623 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14624
14625 op0 = force_reg (tmode, op0);
14626 op1 = force_reg (mode1, op1);
14627
14628 rs6000_expand_vector_set (op0, op1, elt);
14629
14630 return op0;
14631 }
14632
14633 /* Expand vec_ext builtin. */
14634 static rtx
14635 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14636 {
14637 machine_mode tmode, mode0;
14638 tree arg0, arg1;
14639 rtx op0;
14640 rtx op1;
14641
14642 arg0 = CALL_EXPR_ARG (exp, 0);
14643 arg1 = CALL_EXPR_ARG (exp, 1);
14644
14645 op0 = expand_normal (arg0);
14646 op1 = expand_normal (arg1);
14647
14648 /* Call get_element_number to validate arg1 if it is a constant. */
14649 if (TREE_CODE (arg1) == INTEGER_CST)
14650 (void) get_element_number (TREE_TYPE (arg0), arg1);
14651
14652 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14653 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14654 gcc_assert (VECTOR_MODE_P (mode0));
14655
14656 op0 = force_reg (mode0, op0);
14657
14658 if (optimize || !target || !register_operand (target, tmode))
14659 target = gen_reg_rtx (tmode);
14660
14661 rs6000_expand_vector_extract (target, op0, op1);
14662
14663 return target;
14664 }
14665
14666 /* Expand the builtin in EXP and store the result in TARGET. Store
14667 true in *EXPANDEDP if we found a builtin to expand. */
14668 static rtx
14669 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14670 {
14671 const struct builtin_description *d;
14672 size_t i;
14673 enum insn_code icode;
14674 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14675 tree arg0, arg1, arg2;
14676 rtx op0, pat;
14677 machine_mode tmode, mode0;
14678 enum rs6000_builtins fcode
14679 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14680
14681 if (rs6000_overloaded_builtin_p (fcode))
14682 {
14683 *expandedp = true;
14684 error ("unresolved overload for Altivec builtin %qF", fndecl);
14685
14686 /* Given it is invalid, just generate a normal call. */
14687 return expand_call (exp, target, false);
14688 }
14689
14690 target = altivec_expand_dst_builtin (exp, target, expandedp);
14691 if (*expandedp)
14692 return target;
14693
14694 *expandedp = true;
14695
14696 switch (fcode)
14697 {
14698 case ALTIVEC_BUILTIN_STVX_V2DF:
14699 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14700 case ALTIVEC_BUILTIN_STVX_V2DI:
14701 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14702 case ALTIVEC_BUILTIN_STVX_V4SF:
14703 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14704 case ALTIVEC_BUILTIN_STVX:
14705 case ALTIVEC_BUILTIN_STVX_V4SI:
14706 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14707 case ALTIVEC_BUILTIN_STVX_V8HI:
14708 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14709 case ALTIVEC_BUILTIN_STVX_V16QI:
14710 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14711 case ALTIVEC_BUILTIN_STVEBX:
14712 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14713 case ALTIVEC_BUILTIN_STVEHX:
14714 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14715 case ALTIVEC_BUILTIN_STVEWX:
14716 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14717 case ALTIVEC_BUILTIN_STVXL_V2DF:
14718 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14719 case ALTIVEC_BUILTIN_STVXL_V2DI:
14720 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14721 case ALTIVEC_BUILTIN_STVXL_V4SF:
14722 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14723 case ALTIVEC_BUILTIN_STVXL:
14724 case ALTIVEC_BUILTIN_STVXL_V4SI:
14725 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14726 case ALTIVEC_BUILTIN_STVXL_V8HI:
14727 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14728 case ALTIVEC_BUILTIN_STVXL_V16QI:
14729 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14730
14731 case ALTIVEC_BUILTIN_STVLX:
14732 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14733 case ALTIVEC_BUILTIN_STVLXL:
14734 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14735 case ALTIVEC_BUILTIN_STVRX:
14736 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14737 case ALTIVEC_BUILTIN_STVRXL:
14738 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14739
14740 case P9V_BUILTIN_STXVL:
14741 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14742
14743 case P9V_BUILTIN_XST_LEN_R:
14744 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14745
14746 case VSX_BUILTIN_STXVD2X_V1TI:
14747 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14748 case VSX_BUILTIN_STXVD2X_V2DF:
14749 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14750 case VSX_BUILTIN_STXVD2X_V2DI:
14751 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14752 case VSX_BUILTIN_STXVW4X_V4SF:
14753 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14754 case VSX_BUILTIN_STXVW4X_V4SI:
14755 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14756 case VSX_BUILTIN_STXVW4X_V8HI:
14757 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14758 case VSX_BUILTIN_STXVW4X_V16QI:
14759 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14760
14761 /* For the following on big endian, it's ok to use any appropriate
14762 unaligned-supporting store, so use a generic expander. For
14763 little-endian, the exact element-reversing instruction must
14764 be used. */
14765 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14766 {
14767 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14768 : CODE_FOR_vsx_st_elemrev_v1ti);
14769 return altivec_expand_stv_builtin (code, exp);
14770 }
14771 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14772 {
14773 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14774 : CODE_FOR_vsx_st_elemrev_v2df);
14775 return altivec_expand_stv_builtin (code, exp);
14776 }
14777 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14778 {
14779 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14780 : CODE_FOR_vsx_st_elemrev_v2di);
14781 return altivec_expand_stv_builtin (code, exp);
14782 }
14783 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14784 {
14785 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14786 : CODE_FOR_vsx_st_elemrev_v4sf);
14787 return altivec_expand_stv_builtin (code, exp);
14788 }
14789 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14790 {
14791 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14792 : CODE_FOR_vsx_st_elemrev_v4si);
14793 return altivec_expand_stv_builtin (code, exp);
14794 }
14795 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14796 {
14797 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14798 : CODE_FOR_vsx_st_elemrev_v8hi);
14799 return altivec_expand_stv_builtin (code, exp);
14800 }
14801 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14802 {
14803 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14804 : CODE_FOR_vsx_st_elemrev_v16qi);
14805 return altivec_expand_stv_builtin (code, exp);
14806 }
14807
14808 case ALTIVEC_BUILTIN_MFVSCR:
14809 icode = CODE_FOR_altivec_mfvscr;
14810 tmode = insn_data[icode].operand[0].mode;
14811
14812 if (target == 0
14813 || GET_MODE (target) != tmode
14814 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14815 target = gen_reg_rtx (tmode);
14816
14817 pat = GEN_FCN (icode) (target);
14818 if (! pat)
14819 return 0;
14820 emit_insn (pat);
14821 return target;
14822
14823 case ALTIVEC_BUILTIN_MTVSCR:
14824 icode = CODE_FOR_altivec_mtvscr;
14825 arg0 = CALL_EXPR_ARG (exp, 0);
14826 op0 = expand_normal (arg0);
14827 mode0 = insn_data[icode].operand[0].mode;
14828
14829 /* If we got invalid arguments bail out before generating bad rtl. */
14830 if (arg0 == error_mark_node)
14831 return const0_rtx;
14832
14833 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14834 op0 = copy_to_mode_reg (mode0, op0);
14835
14836 pat = GEN_FCN (icode) (op0);
14837 if (pat)
14838 emit_insn (pat);
14839 return NULL_RTX;
14840
14841 case ALTIVEC_BUILTIN_DSSALL:
14842 emit_insn (gen_altivec_dssall ());
14843 return NULL_RTX;
14844
14845 case ALTIVEC_BUILTIN_DSS:
14846 icode = CODE_FOR_altivec_dss;
14847 arg0 = CALL_EXPR_ARG (exp, 0);
14848 STRIP_NOPS (arg0);
14849 op0 = expand_normal (arg0);
14850 mode0 = insn_data[icode].operand[0].mode;
14851
14852 /* If we got invalid arguments bail out before generating bad rtl. */
14853 if (arg0 == error_mark_node)
14854 return const0_rtx;
14855
14856 if (TREE_CODE (arg0) != INTEGER_CST
14857 || TREE_INT_CST_LOW (arg0) & ~0x3)
14858 {
14859 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14860 return const0_rtx;
14861 }
14862
14863 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14864 op0 = copy_to_mode_reg (mode0, op0);
14865
14866 emit_insn (gen_altivec_dss (op0));
14867 return NULL_RTX;
14868
14869 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14870 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14871 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14872 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14873 case VSX_BUILTIN_VEC_INIT_V2DF:
14874 case VSX_BUILTIN_VEC_INIT_V2DI:
14875 case VSX_BUILTIN_VEC_INIT_V1TI:
14876 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14877
14878 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14879 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14880 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14881 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14882 case VSX_BUILTIN_VEC_SET_V2DF:
14883 case VSX_BUILTIN_VEC_SET_V2DI:
14884 case VSX_BUILTIN_VEC_SET_V1TI:
14885 return altivec_expand_vec_set_builtin (exp);
14886
14887 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14888 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14889 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14890 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14891 case VSX_BUILTIN_VEC_EXT_V2DF:
14892 case VSX_BUILTIN_VEC_EXT_V2DI:
14893 case VSX_BUILTIN_VEC_EXT_V1TI:
14894 return altivec_expand_vec_ext_builtin (exp, target);
14895
14896 case P9V_BUILTIN_VEC_EXTRACT4B:
14897 arg1 = CALL_EXPR_ARG (exp, 1);
14898 STRIP_NOPS (arg1);
14899
14900 /* Generate a normal call if it is invalid. */
14901 if (arg1 == error_mark_node)
14902 return expand_call (exp, target, false);
14903
14904 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14905 {
14906 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14907 return expand_call (exp, target, false);
14908 }
14909 break;
14910
14911 case P9V_BUILTIN_VEC_INSERT4B:
14912 arg2 = CALL_EXPR_ARG (exp, 2);
14913 STRIP_NOPS (arg2);
14914
14915 /* Generate a normal call if it is invalid. */
14916 if (arg2 == error_mark_node)
14917 return expand_call (exp, target, false);
14918
14919 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14920 {
14921 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
14922 return expand_call (exp, target, false);
14923 }
14924 break;
14925
14926 default:
14927 break;
14928 /* Fall through. */
14929 }
14930
14931 /* Expand abs* operations. */
14932 d = bdesc_abs;
14933 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14934 if (d->code == fcode)
14935 return altivec_expand_abs_builtin (d->icode, exp, target);
14936
14937 /* Expand the AltiVec predicates. */
14938 d = bdesc_altivec_preds;
14939 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14940 if (d->code == fcode)
14941 return altivec_expand_predicate_builtin (d->icode, exp, target);
14942
14943 /* LV* are funky. We initialized them differently. */
14944 switch (fcode)
14945 {
14946 case ALTIVEC_BUILTIN_LVSL:
14947 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14948 exp, target, false);
14949 case ALTIVEC_BUILTIN_LVSR:
14950 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14951 exp, target, false);
14952 case ALTIVEC_BUILTIN_LVEBX:
14953 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14954 exp, target, false);
14955 case ALTIVEC_BUILTIN_LVEHX:
14956 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14957 exp, target, false);
14958 case ALTIVEC_BUILTIN_LVEWX:
14959 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14960 exp, target, false);
14961 case ALTIVEC_BUILTIN_LVXL_V2DF:
14962 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14963 exp, target, false);
14964 case ALTIVEC_BUILTIN_LVXL_V2DI:
14965 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14966 exp, target, false);
14967 case ALTIVEC_BUILTIN_LVXL_V4SF:
14968 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14969 exp, target, false);
14970 case ALTIVEC_BUILTIN_LVXL:
14971 case ALTIVEC_BUILTIN_LVXL_V4SI:
14972 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14973 exp, target, false);
14974 case ALTIVEC_BUILTIN_LVXL_V8HI:
14975 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14976 exp, target, false);
14977 case ALTIVEC_BUILTIN_LVXL_V16QI:
14978 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14979 exp, target, false);
14980 case ALTIVEC_BUILTIN_LVX_V1TI:
14981 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
14982 exp, target, false);
14983 case ALTIVEC_BUILTIN_LVX_V2DF:
14984 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
14985 exp, target, false);
14986 case ALTIVEC_BUILTIN_LVX_V2DI:
14987 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
14988 exp, target, false);
14989 case ALTIVEC_BUILTIN_LVX_V4SF:
14990 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
14991 exp, target, false);
14992 case ALTIVEC_BUILTIN_LVX:
14993 case ALTIVEC_BUILTIN_LVX_V4SI:
14994 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
14995 exp, target, false);
14996 case ALTIVEC_BUILTIN_LVX_V8HI:
14997 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
14998 exp, target, false);
14999 case ALTIVEC_BUILTIN_LVX_V16QI:
15000 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15001 exp, target, false);
15002 case ALTIVEC_BUILTIN_LVLX:
15003 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15004 exp, target, true);
15005 case ALTIVEC_BUILTIN_LVLXL:
15006 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15007 exp, target, true);
15008 case ALTIVEC_BUILTIN_LVRX:
15009 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15010 exp, target, true);
15011 case ALTIVEC_BUILTIN_LVRXL:
15012 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15013 exp, target, true);
15014 case VSX_BUILTIN_LXVD2X_V1TI:
15015 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15016 exp, target, false);
15017 case VSX_BUILTIN_LXVD2X_V2DF:
15018 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15019 exp, target, false);
15020 case VSX_BUILTIN_LXVD2X_V2DI:
15021 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15022 exp, target, false);
15023 case VSX_BUILTIN_LXVW4X_V4SF:
15024 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15025 exp, target, false);
15026 case VSX_BUILTIN_LXVW4X_V4SI:
15027 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15028 exp, target, false);
15029 case VSX_BUILTIN_LXVW4X_V8HI:
15030 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15031 exp, target, false);
15032 case VSX_BUILTIN_LXVW4X_V16QI:
15033 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15034 exp, target, false);
15035 /* For the following on big endian, it's ok to use any appropriate
15036 unaligned-supporting load, so use a generic expander. For
15037 little-endian, the exact element-reversing instruction must
15038 be used. */
15039 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15040 {
15041 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15042 : CODE_FOR_vsx_ld_elemrev_v2df);
15043 return altivec_expand_lv_builtin (code, exp, target, false);
15044 }
15045 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15046 {
15047 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15048 : CODE_FOR_vsx_ld_elemrev_v1ti);
15049 return altivec_expand_lv_builtin (code, exp, target, false);
15050 }
15051 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15052 {
15053 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15054 : CODE_FOR_vsx_ld_elemrev_v2di);
15055 return altivec_expand_lv_builtin (code, exp, target, false);
15056 }
15057 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15058 {
15059 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15060 : CODE_FOR_vsx_ld_elemrev_v4sf);
15061 return altivec_expand_lv_builtin (code, exp, target, false);
15062 }
15063 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15064 {
15065 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15066 : CODE_FOR_vsx_ld_elemrev_v4si);
15067 return altivec_expand_lv_builtin (code, exp, target, false);
15068 }
15069 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15070 {
15071 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15072 : CODE_FOR_vsx_ld_elemrev_v8hi);
15073 return altivec_expand_lv_builtin (code, exp, target, false);
15074 }
15075 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15076 {
15077 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15078 : CODE_FOR_vsx_ld_elemrev_v16qi);
15079 return altivec_expand_lv_builtin (code, exp, target, false);
15080 }
15081 break;
15082 default:
15083 break;
15084 /* Fall through. */
15085 }
15086
15087 *expandedp = false;
15088 return NULL_RTX;
15089 }
15090
15091 /* Check whether a builtin function is supported in this target
15092 configuration. */
15093 bool
15094 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15095 {
15096 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15097 if ((fnmask & rs6000_builtin_mask) != fnmask)
15098 return false;
15099 else
15100 return true;
15101 }
15102
15103 /* Raise an error message for a builtin function that is called without the
15104 appropriate target options being set. */
15105
15106 static void
15107 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15108 {
15109 size_t uns_fncode = (size_t) fncode;
15110 const char *name = rs6000_builtin_info[uns_fncode].name;
15111 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15112
15113 gcc_assert (name != NULL);
15114 if ((fnmask & RS6000_BTM_CELL) != 0)
15115 error ("builtin function %qs is only valid for the cell processor", name);
15116 else if ((fnmask & RS6000_BTM_VSX) != 0)
15117 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15118 else if ((fnmask & RS6000_BTM_HTM) != 0)
15119 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15120 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15121 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15122 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15123 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15124 error ("builtin function %qs requires the %qs and %qs options",
15125 name, "-mhard-dfp", "-mpower8-vector");
15126 else if ((fnmask & RS6000_BTM_DFP) != 0)
15127 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15128 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15129 error ("builtin function %qs requires the %qs option", name,
15130 "-mpower8-vector");
15131 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15132 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15133 error ("builtin function %qs requires the %qs and %qs options",
15134 name, "-mcpu=power9", "-m64");
15135 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15136 error ("builtin function %qs requires the %qs option", name,
15137 "-mcpu=power9");
15138 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15139 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15140 error ("builtin function %qs requires the %qs and %qs options",
15141 name, "-mcpu=power9", "-m64");
15142 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15143 error ("builtin function %qs requires the %qs option", name,
15144 "-mcpu=power9");
15145 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15146 {
15147 if (!TARGET_HARD_FLOAT)
15148 error ("builtin function %qs requires the %qs option", name,
15149 "-mhard-float");
15150 else
15151 error ("builtin function %qs requires the %qs option", name,
15152 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15153 }
15154 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15155 error ("builtin function %qs requires the %qs option", name,
15156 "-mhard-float");
15157 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15158 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15159 name);
15160 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15161 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15162 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15163 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15164 error ("builtin function %qs requires the %qs (or newer), and "
15165 "%qs or %qs options",
15166 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15167 else
15168 error ("builtin function %qs is not supported with the current options",
15169 name);
15170 }
15171
15172 /* Target hook for early folding of built-ins, shamelessly stolen
15173 from ia64.c. */
15174
15175 static tree
15176 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15177 int n_args ATTRIBUTE_UNUSED,
15178 tree *args ATTRIBUTE_UNUSED,
15179 bool ignore ATTRIBUTE_UNUSED)
15180 {
15181 #ifdef SUBTARGET_FOLD_BUILTIN
15182 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15183 #else
15184 return NULL_TREE;
15185 #endif
15186 }
15187
15188 /* Helper function to sort out which built-ins may be valid without having
15189 a LHS. */
15190 static bool
15191 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15192 {
15193 switch (fn_code)
15194 {
15195 case ALTIVEC_BUILTIN_STVX_V16QI:
15196 case ALTIVEC_BUILTIN_STVX_V8HI:
15197 case ALTIVEC_BUILTIN_STVX_V4SI:
15198 case ALTIVEC_BUILTIN_STVX_V4SF:
15199 case ALTIVEC_BUILTIN_STVX_V2DI:
15200 case ALTIVEC_BUILTIN_STVX_V2DF:
15201 case VSX_BUILTIN_STXVW4X_V16QI:
15202 case VSX_BUILTIN_STXVW4X_V8HI:
15203 case VSX_BUILTIN_STXVW4X_V4SF:
15204 case VSX_BUILTIN_STXVW4X_V4SI:
15205 case VSX_BUILTIN_STXVD2X_V2DF:
15206 case VSX_BUILTIN_STXVD2X_V2DI:
15207 return true;
15208 default:
15209 return false;
15210 }
15211 }
15212
15213 /* Helper function to handle the gimple folding of a vector compare
15214 operation. This sets up true/false vectors, and uses the
15215 VEC_COND_EXPR operation.
15216 CODE indicates which comparison is to be made. (EQ, GT, ...).
15217 TYPE indicates the type of the result. */
15218 static tree
15219 fold_build_vec_cmp (tree_code code, tree type,
15220 tree arg0, tree arg1)
15221 {
15222 tree cmp_type = build_same_sized_truth_vector_type (type);
15223 tree zero_vec = build_zero_cst (type);
15224 tree minus_one_vec = build_minus_one_cst (type);
15225 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15226 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15227 }
15228
15229 /* Helper function to handle the in-between steps for the
15230 vector compare built-ins. */
15231 static void
15232 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15233 {
15234 tree arg0 = gimple_call_arg (stmt, 0);
15235 tree arg1 = gimple_call_arg (stmt, 1);
15236 tree lhs = gimple_call_lhs (stmt);
15237 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15238 gimple *g = gimple_build_assign (lhs, cmp);
15239 gimple_set_location (g, gimple_location (stmt));
15240 gsi_replace (gsi, g, true);
15241 }
15242
15243 /* Helper function to map V2DF and V4SF types to their
15244 integral equivalents (V2DI and V4SI). */
15245 tree map_to_integral_tree_type (tree input_tree_type)
15246 {
15247 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15248 return input_tree_type;
15249 else
15250 {
15251 if (types_compatible_p (TREE_TYPE (input_tree_type),
15252 TREE_TYPE (V2DF_type_node)))
15253 return V2DI_type_node;
15254 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15255 TREE_TYPE (V4SF_type_node)))
15256 return V4SI_type_node;
15257 else
15258 gcc_unreachable ();
15259 }
15260 }
15261
15262 /* Helper function to handle the vector merge[hl] built-ins. The
15263 implementation difference between h and l versions for this code are in
15264 the values used when building of the permute vector for high word versus
15265 low word merge. The variance is keyed off the use_high parameter. */
15266 static void
15267 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15268 {
15269 tree arg0 = gimple_call_arg (stmt, 0);
15270 tree arg1 = gimple_call_arg (stmt, 1);
15271 tree lhs = gimple_call_lhs (stmt);
15272 tree lhs_type = TREE_TYPE (lhs);
15273 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15274 int midpoint = n_elts / 2;
15275 int offset = 0;
15276
15277 if (use_high == 1)
15278 offset = midpoint;
15279
15280 /* The permute_type will match the lhs for integral types. For double and
15281 float types, the permute type needs to map to the V2 or V4 type that
15282 matches size. */
15283 tree permute_type;
15284 permute_type = map_to_integral_tree_type (lhs_type);
15285 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15286
15287 for (int i = 0; i < midpoint; i++)
15288 {
15289 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15290 offset + i));
15291 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15292 offset + n_elts + i));
15293 }
15294
15295 tree permute = elts.build ();
15296
15297 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15298 gimple_set_location (g, gimple_location (stmt));
15299 gsi_replace (gsi, g, true);
15300 }
15301
15302 /* Helper function to handle the vector merge[eo] built-ins. */
15303 static void
15304 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15305 {
15306 tree arg0 = gimple_call_arg (stmt, 0);
15307 tree arg1 = gimple_call_arg (stmt, 1);
15308 tree lhs = gimple_call_lhs (stmt);
15309 tree lhs_type = TREE_TYPE (lhs);
15310 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15311
15312 /* The permute_type will match the lhs for integral types. For double and
15313 float types, the permute type needs to map to the V2 or V4 type that
15314 matches size. */
15315 tree permute_type;
15316 permute_type = map_to_integral_tree_type (lhs_type);
15317
15318 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15319
15320 /* Build the permute vector. */
15321 for (int i = 0; i < n_elts / 2; i++)
15322 {
15323 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15324 2*i + use_odd));
15325 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15326 2*i + use_odd + n_elts));
15327 }
15328
15329 tree permute = elts.build ();
15330
15331 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15332 gimple_set_location (g, gimple_location (stmt));
15333 gsi_replace (gsi, g, true);
15334 }
15335
15336 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15337 a constant, use rs6000_fold_builtin.) */
15338
15339 bool
15340 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15341 {
15342 gimple *stmt = gsi_stmt (*gsi);
15343 tree fndecl = gimple_call_fndecl (stmt);
15344 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15345 enum rs6000_builtins fn_code
15346 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15347 tree arg0, arg1, lhs, temp;
15348 gimple *g;
15349
15350 size_t uns_fncode = (size_t) fn_code;
15351 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15352 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15353 const char *fn_name2 = (icode != CODE_FOR_nothing)
15354 ? get_insn_name ((int) icode)
15355 : "nothing";
15356
15357 if (TARGET_DEBUG_BUILTIN)
15358 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15359 fn_code, fn_name1, fn_name2);
15360
15361 if (!rs6000_fold_gimple)
15362 return false;
15363
15364 /* Prevent gimple folding for code that does not have a LHS, unless it is
15365 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15366 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15367 return false;
15368
15369 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15370 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15371 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15372 if (!func_valid_p)
15373 return false;
15374
15375 switch (fn_code)
15376 {
15377 /* Flavors of vec_add. We deliberately don't expand
15378 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15379 TImode, resulting in much poorer code generation. */
15380 case ALTIVEC_BUILTIN_VADDUBM:
15381 case ALTIVEC_BUILTIN_VADDUHM:
15382 case ALTIVEC_BUILTIN_VADDUWM:
15383 case P8V_BUILTIN_VADDUDM:
15384 case ALTIVEC_BUILTIN_VADDFP:
15385 case VSX_BUILTIN_XVADDDP:
15386 arg0 = gimple_call_arg (stmt, 0);
15387 arg1 = gimple_call_arg (stmt, 1);
15388 lhs = gimple_call_lhs (stmt);
15389 g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
15390 gimple_set_location (g, gimple_location (stmt));
15391 gsi_replace (gsi, g, true);
15392 return true;
15393 /* Flavors of vec_sub. We deliberately don't expand
15394 P8V_BUILTIN_VSUBUQM. */
15395 case ALTIVEC_BUILTIN_VSUBUBM:
15396 case ALTIVEC_BUILTIN_VSUBUHM:
15397 case ALTIVEC_BUILTIN_VSUBUWM:
15398 case P8V_BUILTIN_VSUBUDM:
15399 case ALTIVEC_BUILTIN_VSUBFP:
15400 case VSX_BUILTIN_XVSUBDP:
15401 arg0 = gimple_call_arg (stmt, 0);
15402 arg1 = gimple_call_arg (stmt, 1);
15403 lhs = gimple_call_lhs (stmt);
15404 g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
15405 gimple_set_location (g, gimple_location (stmt));
15406 gsi_replace (gsi, g, true);
15407 return true;
15408 case VSX_BUILTIN_XVMULSP:
15409 case VSX_BUILTIN_XVMULDP:
15410 arg0 = gimple_call_arg (stmt, 0);
15411 arg1 = gimple_call_arg (stmt, 1);
15412 lhs = gimple_call_lhs (stmt);
15413 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15414 gimple_set_location (g, gimple_location (stmt));
15415 gsi_replace (gsi, g, true);
15416 return true;
15417 /* Even element flavors of vec_mul (signed). */
15418 case ALTIVEC_BUILTIN_VMULESB:
15419 case ALTIVEC_BUILTIN_VMULESH:
15420 case P8V_BUILTIN_VMULESW:
15421 /* Even element flavors of vec_mul (unsigned). */
15422 case ALTIVEC_BUILTIN_VMULEUB:
15423 case ALTIVEC_BUILTIN_VMULEUH:
15424 case P8V_BUILTIN_VMULEUW:
15425 arg0 = gimple_call_arg (stmt, 0);
15426 arg1 = gimple_call_arg (stmt, 1);
15427 lhs = gimple_call_lhs (stmt);
15428 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15429 gimple_set_location (g, gimple_location (stmt));
15430 gsi_replace (gsi, g, true);
15431 return true;
15432 /* Odd element flavors of vec_mul (signed). */
15433 case ALTIVEC_BUILTIN_VMULOSB:
15434 case ALTIVEC_BUILTIN_VMULOSH:
15435 case P8V_BUILTIN_VMULOSW:
15436 /* Odd element flavors of vec_mul (unsigned). */
15437 case ALTIVEC_BUILTIN_VMULOUB:
15438 case ALTIVEC_BUILTIN_VMULOUH:
15439 case P8V_BUILTIN_VMULOUW:
15440 arg0 = gimple_call_arg (stmt, 0);
15441 arg1 = gimple_call_arg (stmt, 1);
15442 lhs = gimple_call_lhs (stmt);
15443 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15444 gimple_set_location (g, gimple_location (stmt));
15445 gsi_replace (gsi, g, true);
15446 return true;
15447 /* Flavors of vec_div (Integer). */
15448 case VSX_BUILTIN_DIV_V2DI:
15449 case VSX_BUILTIN_UDIV_V2DI:
15450 arg0 = gimple_call_arg (stmt, 0);
15451 arg1 = gimple_call_arg (stmt, 1);
15452 lhs = gimple_call_lhs (stmt);
15453 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15454 gimple_set_location (g, gimple_location (stmt));
15455 gsi_replace (gsi, g, true);
15456 return true;
15457 /* Flavors of vec_div (Float). */
15458 case VSX_BUILTIN_XVDIVSP:
15459 case VSX_BUILTIN_XVDIVDP:
15460 arg0 = gimple_call_arg (stmt, 0);
15461 arg1 = gimple_call_arg (stmt, 1);
15462 lhs = gimple_call_lhs (stmt);
15463 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15464 gimple_set_location (g, gimple_location (stmt));
15465 gsi_replace (gsi, g, true);
15466 return true;
15467 /* Flavors of vec_and. */
15468 case ALTIVEC_BUILTIN_VAND:
15469 arg0 = gimple_call_arg (stmt, 0);
15470 arg1 = gimple_call_arg (stmt, 1);
15471 lhs = gimple_call_lhs (stmt);
15472 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15473 gimple_set_location (g, gimple_location (stmt));
15474 gsi_replace (gsi, g, true);
15475 return true;
15476 /* Flavors of vec_andc. */
15477 case ALTIVEC_BUILTIN_VANDC:
15478 arg0 = gimple_call_arg (stmt, 0);
15479 arg1 = gimple_call_arg (stmt, 1);
15480 lhs = gimple_call_lhs (stmt);
15481 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15482 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15483 gimple_set_location (g, gimple_location (stmt));
15484 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15485 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15486 gimple_set_location (g, gimple_location (stmt));
15487 gsi_replace (gsi, g, true);
15488 return true;
15489 /* Flavors of vec_nand. */
15490 case P8V_BUILTIN_VEC_NAND:
15491 case P8V_BUILTIN_NAND_V16QI:
15492 case P8V_BUILTIN_NAND_V8HI:
15493 case P8V_BUILTIN_NAND_V4SI:
15494 case P8V_BUILTIN_NAND_V4SF:
15495 case P8V_BUILTIN_NAND_V2DF:
15496 case P8V_BUILTIN_NAND_V2DI:
15497 arg0 = gimple_call_arg (stmt, 0);
15498 arg1 = gimple_call_arg (stmt, 1);
15499 lhs = gimple_call_lhs (stmt);
15500 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15501 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15502 gimple_set_location (g, gimple_location (stmt));
15503 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15504 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15505 gimple_set_location (g, gimple_location (stmt));
15506 gsi_replace (gsi, g, true);
15507 return true;
15508 /* Flavors of vec_or. */
15509 case ALTIVEC_BUILTIN_VOR:
15510 arg0 = gimple_call_arg (stmt, 0);
15511 arg1 = gimple_call_arg (stmt, 1);
15512 lhs = gimple_call_lhs (stmt);
15513 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15514 gimple_set_location (g, gimple_location (stmt));
15515 gsi_replace (gsi, g, true);
15516 return true;
15517 /* flavors of vec_orc. */
15518 case P8V_BUILTIN_ORC_V16QI:
15519 case P8V_BUILTIN_ORC_V8HI:
15520 case P8V_BUILTIN_ORC_V4SI:
15521 case P8V_BUILTIN_ORC_V4SF:
15522 case P8V_BUILTIN_ORC_V2DF:
15523 case P8V_BUILTIN_ORC_V2DI:
15524 arg0 = gimple_call_arg (stmt, 0);
15525 arg1 = gimple_call_arg (stmt, 1);
15526 lhs = gimple_call_lhs (stmt);
15527 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15528 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15529 gimple_set_location (g, gimple_location (stmt));
15530 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15531 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15532 gimple_set_location (g, gimple_location (stmt));
15533 gsi_replace (gsi, g, true);
15534 return true;
15535 /* Flavors of vec_xor. */
15536 case ALTIVEC_BUILTIN_VXOR:
15537 arg0 = gimple_call_arg (stmt, 0);
15538 arg1 = gimple_call_arg (stmt, 1);
15539 lhs = gimple_call_lhs (stmt);
15540 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15541 gimple_set_location (g, gimple_location (stmt));
15542 gsi_replace (gsi, g, true);
15543 return true;
15544 /* Flavors of vec_nor. */
15545 case ALTIVEC_BUILTIN_VNOR:
15546 arg0 = gimple_call_arg (stmt, 0);
15547 arg1 = gimple_call_arg (stmt, 1);
15548 lhs = gimple_call_lhs (stmt);
15549 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15550 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15551 gimple_set_location (g, gimple_location (stmt));
15552 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15553 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15554 gimple_set_location (g, gimple_location (stmt));
15555 gsi_replace (gsi, g, true);
15556 return true;
15557 /* flavors of vec_abs. */
15558 case ALTIVEC_BUILTIN_ABS_V16QI:
15559 case ALTIVEC_BUILTIN_ABS_V8HI:
15560 case ALTIVEC_BUILTIN_ABS_V4SI:
15561 case ALTIVEC_BUILTIN_ABS_V4SF:
15562 case P8V_BUILTIN_ABS_V2DI:
15563 case VSX_BUILTIN_XVABSDP:
15564 arg0 = gimple_call_arg (stmt, 0);
15565 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15566 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15567 return false;
15568 lhs = gimple_call_lhs (stmt);
15569 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15570 gimple_set_location (g, gimple_location (stmt));
15571 gsi_replace (gsi, g, true);
15572 return true;
15573 /* flavors of vec_min. */
15574 case VSX_BUILTIN_XVMINDP:
15575 case P8V_BUILTIN_VMINSD:
15576 case P8V_BUILTIN_VMINUD:
15577 case ALTIVEC_BUILTIN_VMINSB:
15578 case ALTIVEC_BUILTIN_VMINSH:
15579 case ALTIVEC_BUILTIN_VMINSW:
15580 case ALTIVEC_BUILTIN_VMINUB:
15581 case ALTIVEC_BUILTIN_VMINUH:
15582 case ALTIVEC_BUILTIN_VMINUW:
15583 case ALTIVEC_BUILTIN_VMINFP:
15584 arg0 = gimple_call_arg (stmt, 0);
15585 arg1 = gimple_call_arg (stmt, 1);
15586 lhs = gimple_call_lhs (stmt);
15587 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15588 gimple_set_location (g, gimple_location (stmt));
15589 gsi_replace (gsi, g, true);
15590 return true;
15591 /* flavors of vec_max. */
15592 case VSX_BUILTIN_XVMAXDP:
15593 case P8V_BUILTIN_VMAXSD:
15594 case P8V_BUILTIN_VMAXUD:
15595 case ALTIVEC_BUILTIN_VMAXSB:
15596 case ALTIVEC_BUILTIN_VMAXSH:
15597 case ALTIVEC_BUILTIN_VMAXSW:
15598 case ALTIVEC_BUILTIN_VMAXUB:
15599 case ALTIVEC_BUILTIN_VMAXUH:
15600 case ALTIVEC_BUILTIN_VMAXUW:
15601 case ALTIVEC_BUILTIN_VMAXFP:
15602 arg0 = gimple_call_arg (stmt, 0);
15603 arg1 = gimple_call_arg (stmt, 1);
15604 lhs = gimple_call_lhs (stmt);
15605 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15606 gimple_set_location (g, gimple_location (stmt));
15607 gsi_replace (gsi, g, true);
15608 return true;
15609 /* Flavors of vec_eqv. */
15610 case P8V_BUILTIN_EQV_V16QI:
15611 case P8V_BUILTIN_EQV_V8HI:
15612 case P8V_BUILTIN_EQV_V4SI:
15613 case P8V_BUILTIN_EQV_V4SF:
15614 case P8V_BUILTIN_EQV_V2DF:
15615 case P8V_BUILTIN_EQV_V2DI:
15616 arg0 = gimple_call_arg (stmt, 0);
15617 arg1 = gimple_call_arg (stmt, 1);
15618 lhs = gimple_call_lhs (stmt);
15619 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15620 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15621 gimple_set_location (g, gimple_location (stmt));
15622 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15623 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15624 gimple_set_location (g, gimple_location (stmt));
15625 gsi_replace (gsi, g, true);
15626 return true;
15627 /* Flavors of vec_rotate_left. */
15628 case ALTIVEC_BUILTIN_VRLB:
15629 case ALTIVEC_BUILTIN_VRLH:
15630 case ALTIVEC_BUILTIN_VRLW:
15631 case P8V_BUILTIN_VRLD:
15632 arg0 = gimple_call_arg (stmt, 0);
15633 arg1 = gimple_call_arg (stmt, 1);
15634 lhs = gimple_call_lhs (stmt);
15635 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15636 gimple_set_location (g, gimple_location (stmt));
15637 gsi_replace (gsi, g, true);
15638 return true;
15639 /* Flavors of vector shift right algebraic.
15640 vec_sra{b,h,w} -> vsra{b,h,w}. */
15641 case ALTIVEC_BUILTIN_VSRAB:
15642 case ALTIVEC_BUILTIN_VSRAH:
15643 case ALTIVEC_BUILTIN_VSRAW:
15644 case P8V_BUILTIN_VSRAD:
15645 arg0 = gimple_call_arg (stmt, 0);
15646 arg1 = gimple_call_arg (stmt, 1);
15647 lhs = gimple_call_lhs (stmt);
15648 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
15649 gimple_set_location (g, gimple_location (stmt));
15650 gsi_replace (gsi, g, true);
15651 return true;
15652 /* Flavors of vector shift left.
15653 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15654 case ALTIVEC_BUILTIN_VSLB:
15655 case ALTIVEC_BUILTIN_VSLH:
15656 case ALTIVEC_BUILTIN_VSLW:
15657 case P8V_BUILTIN_VSLD:
15658 {
15659 location_t loc;
15660 gimple_seq stmts = NULL;
15661 arg0 = gimple_call_arg (stmt, 0);
15662 tree arg0_type = TREE_TYPE (arg0);
15663 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15664 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15665 return false;
15666 arg1 = gimple_call_arg (stmt, 1);
15667 tree arg1_type = TREE_TYPE (arg1);
15668 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15669 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15670 loc = gimple_location (stmt);
15671 lhs = gimple_call_lhs (stmt);
15672 /* Force arg1 into the range valid matching the arg0 type. */
15673 /* Build a vector consisting of the max valid bit-size values. */
15674 int n_elts = VECTOR_CST_NELTS (arg1);
15675 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15676 * BITS_PER_UNIT;
15677 tree element_size = build_int_cst (unsigned_element_type,
15678 tree_size_in_bits / n_elts);
15679 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15680 for (int i = 0; i < n_elts; i++)
15681 elts.safe_push (element_size);
15682 tree modulo_tree = elts.build ();
15683 /* Modulo the provided shift value against that vector. */
15684 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15685 unsigned_arg1_type, arg1);
15686 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15687 unsigned_arg1_type, unsigned_arg1,
15688 modulo_tree);
15689 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15690 /* And finally, do the shift. */
15691 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15692 gimple_set_location (g, gimple_location (stmt));
15693 gsi_replace (gsi, g, true);
15694 return true;
15695 }
15696 /* Flavors of vector shift right. */
15697 case ALTIVEC_BUILTIN_VSRB:
15698 case ALTIVEC_BUILTIN_VSRH:
15699 case ALTIVEC_BUILTIN_VSRW:
15700 case P8V_BUILTIN_VSRD:
15701 {
15702 arg0 = gimple_call_arg (stmt, 0);
15703 arg1 = gimple_call_arg (stmt, 1);
15704 lhs = gimple_call_lhs (stmt);
15705 gimple_seq stmts = NULL;
15706 /* Convert arg0 to unsigned. */
15707 tree arg0_unsigned
15708 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15709 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15710 tree res
15711 = gimple_build (&stmts, RSHIFT_EXPR,
15712 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
15713 /* Convert result back to the lhs type. */
15714 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15715 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15716 update_call_from_tree (gsi, res);
15717 return true;
15718 }
15719 /* Vector loads. */
15720 case ALTIVEC_BUILTIN_LVX_V16QI:
15721 case ALTIVEC_BUILTIN_LVX_V8HI:
15722 case ALTIVEC_BUILTIN_LVX_V4SI:
15723 case ALTIVEC_BUILTIN_LVX_V4SF:
15724 case ALTIVEC_BUILTIN_LVX_V2DI:
15725 case ALTIVEC_BUILTIN_LVX_V2DF:
15726 case ALTIVEC_BUILTIN_LVX_V1TI:
15727 {
15728 arg0 = gimple_call_arg (stmt, 0); // offset
15729 arg1 = gimple_call_arg (stmt, 1); // address
15730 lhs = gimple_call_lhs (stmt);
15731 location_t loc = gimple_location (stmt);
15732 /* Since arg1 may be cast to a different type, just use ptr_type_node
15733 here instead of trying to enforce TBAA on pointer types. */
15734 tree arg1_type = ptr_type_node;
15735 tree lhs_type = TREE_TYPE (lhs);
15736 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15737 the tree using the value from arg0. The resulting type will match
15738 the type of arg1. */
15739 gimple_seq stmts = NULL;
15740 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15741 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15742 arg1_type, arg1, temp_offset);
15743 /* Mask off any lower bits from the address. */
15744 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15745 arg1_type, temp_addr,
15746 build_int_cst (arg1_type, -16));
15747 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15748 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15749 take an offset, but since we've already incorporated the offset
15750 above, here we just pass in a zero. */
15751 gimple *g
15752 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15753 build_int_cst (arg1_type, 0)));
15754 gimple_set_location (g, loc);
15755 gsi_replace (gsi, g, true);
15756 return true;
15757 }
15758 /* Vector stores. */
15759 case ALTIVEC_BUILTIN_STVX_V16QI:
15760 case ALTIVEC_BUILTIN_STVX_V8HI:
15761 case ALTIVEC_BUILTIN_STVX_V4SI:
15762 case ALTIVEC_BUILTIN_STVX_V4SF:
15763 case ALTIVEC_BUILTIN_STVX_V2DI:
15764 case ALTIVEC_BUILTIN_STVX_V2DF:
15765 {
15766 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15767 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15768 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15769 location_t loc = gimple_location (stmt);
15770 tree arg0_type = TREE_TYPE (arg0);
15771 /* Use ptr_type_node (no TBAA) for the arg2_type.
15772 FIXME: (Richard) "A proper fix would be to transition this type as
15773 seen from the frontend to GIMPLE, for example in a similar way we
15774 do for MEM_REFs by piggy-backing that on an extra argument, a
15775 constant zero pointer of the alias pointer type to use (which would
15776 also serve as a type indicator of the store itself). I'd use a
15777 target specific internal function for this (not sure if we can have
15778 those target specific, but I guess if it's folded away then that's
15779 fine) and get away with the overload set." */
15780 tree arg2_type = ptr_type_node;
15781 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15782 the tree using the value from arg0. The resulting type will match
15783 the type of arg2. */
15784 gimple_seq stmts = NULL;
15785 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15786 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15787 arg2_type, arg2, temp_offset);
15788 /* Mask off any lower bits from the address. */
15789 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15790 arg2_type, temp_addr,
15791 build_int_cst (arg2_type, -16));
15792 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15793 /* The desired gimple result should be similar to:
15794 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15795 gimple *g
15796 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15797 build_int_cst (arg2_type, 0)), arg0);
15798 gimple_set_location (g, loc);
15799 gsi_replace (gsi, g, true);
15800 return true;
15801 }
15802
15803 /* unaligned Vector loads. */
15804 case VSX_BUILTIN_LXVW4X_V16QI:
15805 case VSX_BUILTIN_LXVW4X_V8HI:
15806 case VSX_BUILTIN_LXVW4X_V4SF:
15807 case VSX_BUILTIN_LXVW4X_V4SI:
15808 case VSX_BUILTIN_LXVD2X_V2DF:
15809 case VSX_BUILTIN_LXVD2X_V2DI:
15810 {
15811 arg0 = gimple_call_arg (stmt, 0); // offset
15812 arg1 = gimple_call_arg (stmt, 1); // address
15813 lhs = gimple_call_lhs (stmt);
15814 location_t loc = gimple_location (stmt);
15815 /* Since arg1 may be cast to a different type, just use ptr_type_node
15816 here instead of trying to enforce TBAA on pointer types. */
15817 tree arg1_type = ptr_type_node;
15818 tree lhs_type = TREE_TYPE (lhs);
15819 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15820 required alignment (power) is 4 bytes regardless of data type. */
15821 tree align_ltype = build_aligned_type (lhs_type, 4);
15822 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15823 the tree using the value from arg0. The resulting type will match
15824 the type of arg1. */
15825 gimple_seq stmts = NULL;
15826 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15827 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15828 arg1_type, arg1, temp_offset);
15829 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15830 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15831 take an offset, but since we've already incorporated the offset
15832 above, here we just pass in a zero. */
15833 gimple *g;
15834 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15835 build_int_cst (arg1_type, 0)));
15836 gimple_set_location (g, loc);
15837 gsi_replace (gsi, g, true);
15838 return true;
15839 }
15840
15841 /* unaligned Vector stores. */
15842 case VSX_BUILTIN_STXVW4X_V16QI:
15843 case VSX_BUILTIN_STXVW4X_V8HI:
15844 case VSX_BUILTIN_STXVW4X_V4SF:
15845 case VSX_BUILTIN_STXVW4X_V4SI:
15846 case VSX_BUILTIN_STXVD2X_V2DF:
15847 case VSX_BUILTIN_STXVD2X_V2DI:
15848 {
15849 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15850 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15851 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15852 location_t loc = gimple_location (stmt);
15853 tree arg0_type = TREE_TYPE (arg0);
15854 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15855 tree arg2_type = ptr_type_node;
15856 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15857 required alignment (power) is 4 bytes regardless of data type. */
15858 tree align_stype = build_aligned_type (arg0_type, 4);
15859 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15860 the tree using the value from arg1. */
15861 gimple_seq stmts = NULL;
15862 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15863 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15864 arg2_type, arg2, temp_offset);
15865 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15866 gimple *g;
15867 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15868 build_int_cst (arg2_type, 0)), arg0);
15869 gimple_set_location (g, loc);
15870 gsi_replace (gsi, g, true);
15871 return true;
15872 }
15873
15874 /* Vector Fused multiply-add (fma). */
15875 case ALTIVEC_BUILTIN_VMADDFP:
15876 case VSX_BUILTIN_XVMADDDP:
15877 case ALTIVEC_BUILTIN_VMLADDUHM:
15878 {
15879 arg0 = gimple_call_arg (stmt, 0);
15880 arg1 = gimple_call_arg (stmt, 1);
15881 tree arg2 = gimple_call_arg (stmt, 2);
15882 lhs = gimple_call_lhs (stmt);
15883 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15884 gimple_call_set_lhs (g, lhs);
15885 gimple_call_set_nothrow (g, true);
15886 gimple_set_location (g, gimple_location (stmt));
15887 gsi_replace (gsi, g, true);
15888 return true;
15889 }
15890
15891 /* Vector compares; EQ, NE, GE, GT, LE. */
15892 case ALTIVEC_BUILTIN_VCMPEQUB:
15893 case ALTIVEC_BUILTIN_VCMPEQUH:
15894 case ALTIVEC_BUILTIN_VCMPEQUW:
15895 case P8V_BUILTIN_VCMPEQUD:
15896 fold_compare_helper (gsi, EQ_EXPR, stmt);
15897 return true;
15898
15899 case P9V_BUILTIN_CMPNEB:
15900 case P9V_BUILTIN_CMPNEH:
15901 case P9V_BUILTIN_CMPNEW:
15902 fold_compare_helper (gsi, NE_EXPR, stmt);
15903 return true;
15904
15905 case VSX_BUILTIN_CMPGE_16QI:
15906 case VSX_BUILTIN_CMPGE_U16QI:
15907 case VSX_BUILTIN_CMPGE_8HI:
15908 case VSX_BUILTIN_CMPGE_U8HI:
15909 case VSX_BUILTIN_CMPGE_4SI:
15910 case VSX_BUILTIN_CMPGE_U4SI:
15911 case VSX_BUILTIN_CMPGE_2DI:
15912 case VSX_BUILTIN_CMPGE_U2DI:
15913 fold_compare_helper (gsi, GE_EXPR, stmt);
15914 return true;
15915
15916 case ALTIVEC_BUILTIN_VCMPGTSB:
15917 case ALTIVEC_BUILTIN_VCMPGTUB:
15918 case ALTIVEC_BUILTIN_VCMPGTSH:
15919 case ALTIVEC_BUILTIN_VCMPGTUH:
15920 case ALTIVEC_BUILTIN_VCMPGTSW:
15921 case ALTIVEC_BUILTIN_VCMPGTUW:
15922 case P8V_BUILTIN_VCMPGTUD:
15923 case P8V_BUILTIN_VCMPGTSD:
15924 fold_compare_helper (gsi, GT_EXPR, stmt);
15925 return true;
15926
15927 case VSX_BUILTIN_CMPLE_16QI:
15928 case VSX_BUILTIN_CMPLE_U16QI:
15929 case VSX_BUILTIN_CMPLE_8HI:
15930 case VSX_BUILTIN_CMPLE_U8HI:
15931 case VSX_BUILTIN_CMPLE_4SI:
15932 case VSX_BUILTIN_CMPLE_U4SI:
15933 case VSX_BUILTIN_CMPLE_2DI:
15934 case VSX_BUILTIN_CMPLE_U2DI:
15935 fold_compare_helper (gsi, LE_EXPR, stmt);
15936 return true;
15937
15938 /* flavors of vec_splat_[us]{8,16,32}. */
15939 case ALTIVEC_BUILTIN_VSPLTISB:
15940 case ALTIVEC_BUILTIN_VSPLTISH:
15941 case ALTIVEC_BUILTIN_VSPLTISW:
15942 {
15943 int size;
15944 if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
15945 size = 8;
15946 else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
15947 size = 16;
15948 else
15949 size = 32;
15950
15951 arg0 = gimple_call_arg (stmt, 0);
15952 lhs = gimple_call_lhs (stmt);
15953
15954 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15955 5-bit signed constant in range -16 to +15. */
15956 if (TREE_CODE (arg0) != INTEGER_CST
15957 || !IN_RANGE (sext_hwi (TREE_INT_CST_LOW (arg0), size),
15958 -16, 15))
15959 return false;
15960 gimple_seq stmts = NULL;
15961 location_t loc = gimple_location (stmt);
15962 tree splat_value = gimple_convert (&stmts, loc,
15963 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15964 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15965 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15966 g = gimple_build_assign (lhs, splat_tree);
15967 gimple_set_location (g, gimple_location (stmt));
15968 gsi_replace (gsi, g, true);
15969 return true;
15970 }
15971
15972 /* Flavors of vec_splat. */
15973 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
15974 case ALTIVEC_BUILTIN_VSPLTB:
15975 case ALTIVEC_BUILTIN_VSPLTH:
15976 case ALTIVEC_BUILTIN_VSPLTW:
15977 case VSX_BUILTIN_XXSPLTD_V2DI:
15978 case VSX_BUILTIN_XXSPLTD_V2DF:
15979 {
15980 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
15981 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
15982 /* Only fold the vec_splat_*() if arg1 is both a constant value and
15983 is a valid index into the arg0 vector. */
15984 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
15985 if (TREE_CODE (arg1) != INTEGER_CST
15986 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
15987 return false;
15988 lhs = gimple_call_lhs (stmt);
15989 tree lhs_type = TREE_TYPE (lhs);
15990 tree arg0_type = TREE_TYPE (arg0);
15991 tree splat;
15992 if (TREE_CODE (arg0) == VECTOR_CST)
15993 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
15994 else
15995 {
15996 /* Determine (in bits) the length and start location of the
15997 splat value for a call to the tree_vec_extract helper. */
15998 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
15999 * BITS_PER_UNIT / n_elts;
16000 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16001 tree len = build_int_cst (bitsizetype, splat_elem_size);
16002 tree start = build_int_cst (bitsizetype, splat_start_bit);
16003 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16004 len, start);
16005 }
16006 /* And finally, build the new vector. */
16007 tree splat_tree = build_vector_from_val (lhs_type, splat);
16008 g = gimple_build_assign (lhs, splat_tree);
16009 gimple_set_location (g, gimple_location (stmt));
16010 gsi_replace (gsi, g, true);
16011 return true;
16012 }
16013
16014 /* vec_mergel (integrals). */
16015 case ALTIVEC_BUILTIN_VMRGLH:
16016 case ALTIVEC_BUILTIN_VMRGLW:
16017 case VSX_BUILTIN_XXMRGLW_4SI:
16018 case ALTIVEC_BUILTIN_VMRGLB:
16019 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16020 case VSX_BUILTIN_XXMRGLW_4SF:
16021 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16022 fold_mergehl_helper (gsi, stmt, 1);
16023 return true;
16024 /* vec_mergeh (integrals). */
16025 case ALTIVEC_BUILTIN_VMRGHH:
16026 case ALTIVEC_BUILTIN_VMRGHW:
16027 case VSX_BUILTIN_XXMRGHW_4SI:
16028 case ALTIVEC_BUILTIN_VMRGHB:
16029 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16030 case VSX_BUILTIN_XXMRGHW_4SF:
16031 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16032 fold_mergehl_helper (gsi, stmt, 0);
16033 return true;
16034
16035 /* Flavors of vec_mergee. */
16036 case P8V_BUILTIN_VMRGEW_V4SI:
16037 case P8V_BUILTIN_VMRGEW_V2DI:
16038 case P8V_BUILTIN_VMRGEW_V4SF:
16039 case P8V_BUILTIN_VMRGEW_V2DF:
16040 fold_mergeeo_helper (gsi, stmt, 0);
16041 return true;
16042 /* Flavors of vec_mergeo. */
16043 case P8V_BUILTIN_VMRGOW_V4SI:
16044 case P8V_BUILTIN_VMRGOW_V2DI:
16045 case P8V_BUILTIN_VMRGOW_V4SF:
16046 case P8V_BUILTIN_VMRGOW_V2DF:
16047 fold_mergeeo_helper (gsi, stmt, 1);
16048 return true;
16049
16050 /* d = vec_pack (a, b) */
16051 case P8V_BUILTIN_VPKUDUM:
16052 case ALTIVEC_BUILTIN_VPKUHUM:
16053 case ALTIVEC_BUILTIN_VPKUWUM:
16054 {
16055 arg0 = gimple_call_arg (stmt, 0);
16056 arg1 = gimple_call_arg (stmt, 1);
16057 lhs = gimple_call_lhs (stmt);
16058 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16059 gimple_set_location (g, gimple_location (stmt));
16060 gsi_replace (gsi, g, true);
16061 return true;
16062 }
16063
16064 /* d = vec_unpackh (a) */
16065 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16066 in this code is sensitive to endian-ness, and needs to be inverted to
16067 handle both LE and BE targets. */
16068 case ALTIVEC_BUILTIN_VUPKHSB:
16069 case ALTIVEC_BUILTIN_VUPKHSH:
16070 case P8V_BUILTIN_VUPKHSW:
16071 {
16072 arg0 = gimple_call_arg (stmt, 0);
16073 lhs = gimple_call_lhs (stmt);
16074 if (BYTES_BIG_ENDIAN)
16075 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16076 else
16077 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16078 gimple_set_location (g, gimple_location (stmt));
16079 gsi_replace (gsi, g, true);
16080 return true;
16081 }
16082 /* d = vec_unpackl (a) */
16083 case ALTIVEC_BUILTIN_VUPKLSB:
16084 case ALTIVEC_BUILTIN_VUPKLSH:
16085 case P8V_BUILTIN_VUPKLSW:
16086 {
16087 arg0 = gimple_call_arg (stmt, 0);
16088 lhs = gimple_call_lhs (stmt);
16089 if (BYTES_BIG_ENDIAN)
16090 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16091 else
16092 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16093 gimple_set_location (g, gimple_location (stmt));
16094 gsi_replace (gsi, g, true);
16095 return true;
16096 }
16097 /* There is no gimple type corresponding with pixel, so just return. */
16098 case ALTIVEC_BUILTIN_VUPKHPX:
16099 case ALTIVEC_BUILTIN_VUPKLPX:
16100 return false;
16101
16102 /* vec_perm. */
16103 case ALTIVEC_BUILTIN_VPERM_16QI:
16104 case ALTIVEC_BUILTIN_VPERM_8HI:
16105 case ALTIVEC_BUILTIN_VPERM_4SI:
16106 case ALTIVEC_BUILTIN_VPERM_2DI:
16107 case ALTIVEC_BUILTIN_VPERM_4SF:
16108 case ALTIVEC_BUILTIN_VPERM_2DF:
16109 {
16110 arg0 = gimple_call_arg (stmt, 0);
16111 arg1 = gimple_call_arg (stmt, 1);
16112 tree permute = gimple_call_arg (stmt, 2);
16113 lhs = gimple_call_lhs (stmt);
16114 location_t loc = gimple_location (stmt);
16115 gimple_seq stmts = NULL;
16116 // convert arg0 and arg1 to match the type of the permute
16117 // for the VEC_PERM_EXPR operation.
16118 tree permute_type = (TREE_TYPE (permute));
16119 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16120 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16121 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16122 permute_type, arg0_ptype, arg1_ptype,
16123 permute);
16124 // Convert the result back to the desired lhs type upon completion.
16125 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16126 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16127 g = gimple_build_assign (lhs, temp);
16128 gimple_set_location (g, loc);
16129 gsi_replace (gsi, g, true);
16130 return true;
16131 }
16132
16133 default:
16134 if (TARGET_DEBUG_BUILTIN)
16135 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16136 fn_code, fn_name1, fn_name2);
16137 break;
16138 }
16139
16140 return false;
16141 }
16142
16143 /* Expand an expression EXP that calls a built-in function,
16144 with result going to TARGET if that's convenient
16145 (and in mode MODE if that's convenient).
16146 SUBTARGET may be used as the target for computing one of EXP's operands.
16147 IGNORE is nonzero if the value is to be ignored. */
16148
16149 static rtx
16150 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16151 machine_mode mode ATTRIBUTE_UNUSED,
16152 int ignore ATTRIBUTE_UNUSED)
16153 {
16154 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16155 enum rs6000_builtins fcode
16156 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16157 size_t uns_fcode = (size_t)fcode;
16158 const struct builtin_description *d;
16159 size_t i;
16160 rtx ret;
16161 bool success;
16162 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16163 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16164 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16165
16166 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16167 floating point type, depending on whether long double is the IBM extended
16168 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16169 we only define one variant of the built-in function, and switch the code
16170 when defining it, rather than defining two built-ins and using the
16171 overload table in rs6000-c.c to switch between the two. If we don't have
16172 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16173 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16174 if (FLOAT128_IEEE_P (TFmode))
16175 switch (icode)
16176 {
16177 default:
16178 break;
16179
16180 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16181 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16182 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16183 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16184 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16185 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16186 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16187 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16188 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16189 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16190 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16191 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16192 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16193 }
16194
16195 if (TARGET_DEBUG_BUILTIN)
16196 {
16197 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16198 const char *name2 = (icode != CODE_FOR_nothing)
16199 ? get_insn_name ((int) icode)
16200 : "nothing";
16201 const char *name3;
16202
16203 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16204 {
16205 default: name3 = "unknown"; break;
16206 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16207 case RS6000_BTC_UNARY: name3 = "unary"; break;
16208 case RS6000_BTC_BINARY: name3 = "binary"; break;
16209 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16210 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16211 case RS6000_BTC_ABS: name3 = "abs"; break;
16212 case RS6000_BTC_DST: name3 = "dst"; break;
16213 }
16214
16215
16216 fprintf (stderr,
16217 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16218 (name1) ? name1 : "---", fcode,
16219 (name2) ? name2 : "---", (int) icode,
16220 name3,
16221 func_valid_p ? "" : ", not valid");
16222 }
16223
16224 if (!func_valid_p)
16225 {
16226 rs6000_invalid_builtin (fcode);
16227
16228 /* Given it is invalid, just generate a normal call. */
16229 return expand_call (exp, target, ignore);
16230 }
16231
16232 switch (fcode)
16233 {
16234 case RS6000_BUILTIN_RECIP:
16235 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16236
16237 case RS6000_BUILTIN_RECIPF:
16238 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16239
16240 case RS6000_BUILTIN_RSQRTF:
16241 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16242
16243 case RS6000_BUILTIN_RSQRT:
16244 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16245
16246 case POWER7_BUILTIN_BPERMD:
16247 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16248 ? CODE_FOR_bpermd_di
16249 : CODE_FOR_bpermd_si), exp, target);
16250
16251 case RS6000_BUILTIN_GET_TB:
16252 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16253 target);
16254
16255 case RS6000_BUILTIN_MFTB:
16256 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16257 ? CODE_FOR_rs6000_mftb_di
16258 : CODE_FOR_rs6000_mftb_si),
16259 target);
16260
16261 case RS6000_BUILTIN_MFFS:
16262 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16263
16264 case RS6000_BUILTIN_MTFSB0:
16265 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16266
16267 case RS6000_BUILTIN_MTFSB1:
16268 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16269
16270 case RS6000_BUILTIN_SET_FPSCR_RN:
16271 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16272 exp);
16273
16274 case RS6000_BUILTIN_SET_FPSCR_DRN:
16275 return
16276 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16277 exp);
16278
16279 case RS6000_BUILTIN_MFFSL:
16280 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16281
16282 case RS6000_BUILTIN_MTFSF:
16283 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16284
16285 case RS6000_BUILTIN_CPU_INIT:
16286 case RS6000_BUILTIN_CPU_IS:
16287 case RS6000_BUILTIN_CPU_SUPPORTS:
16288 return cpu_expand_builtin (fcode, exp, target);
16289
16290 case MISC_BUILTIN_SPEC_BARRIER:
16291 {
16292 emit_insn (gen_speculation_barrier ());
16293 return NULL_RTX;
16294 }
16295
16296 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16297 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16298 {
16299 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16300 : (int) CODE_FOR_altivec_lvsl_direct);
16301 machine_mode tmode = insn_data[icode2].operand[0].mode;
16302 machine_mode mode = insn_data[icode2].operand[1].mode;
16303 tree arg;
16304 rtx op, addr, pat;
16305
16306 gcc_assert (TARGET_ALTIVEC);
16307
16308 arg = CALL_EXPR_ARG (exp, 0);
16309 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16310 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16311 addr = memory_address (mode, op);
16312 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16313 op = addr;
16314 else
16315 {
16316 /* For the load case need to negate the address. */
16317 op = gen_reg_rtx (GET_MODE (addr));
16318 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16319 }
16320 op = gen_rtx_MEM (mode, op);
16321
16322 if (target == 0
16323 || GET_MODE (target) != tmode
16324 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16325 target = gen_reg_rtx (tmode);
16326
16327 pat = GEN_FCN (icode2) (target, op);
16328 if (!pat)
16329 return 0;
16330 emit_insn (pat);
16331
16332 return target;
16333 }
16334
16335 case ALTIVEC_BUILTIN_VCFUX:
16336 case ALTIVEC_BUILTIN_VCFSX:
16337 case ALTIVEC_BUILTIN_VCTUXS:
16338 case ALTIVEC_BUILTIN_VCTSXS:
16339 /* FIXME: There's got to be a nicer way to handle this case than
16340 constructing a new CALL_EXPR. */
16341 if (call_expr_nargs (exp) == 1)
16342 {
16343 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16344 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16345 }
16346 break;
16347
16348 /* For the pack and unpack int128 routines, fix up the builtin so it
16349 uses the correct IBM128 type. */
16350 case MISC_BUILTIN_PACK_IF:
16351 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16352 {
16353 icode = CODE_FOR_packtf;
16354 fcode = MISC_BUILTIN_PACK_TF;
16355 uns_fcode = (size_t)fcode;
16356 }
16357 break;
16358
16359 case MISC_BUILTIN_UNPACK_IF:
16360 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16361 {
16362 icode = CODE_FOR_unpacktf;
16363 fcode = MISC_BUILTIN_UNPACK_TF;
16364 uns_fcode = (size_t)fcode;
16365 }
16366 break;
16367
16368 default:
16369 break;
16370 }
16371
16372 if (TARGET_ALTIVEC)
16373 {
16374 ret = altivec_expand_builtin (exp, target, &success);
16375
16376 if (success)
16377 return ret;
16378 }
16379 if (TARGET_HTM)
16380 {
16381 ret = htm_expand_builtin (exp, target, &success);
16382
16383 if (success)
16384 return ret;
16385 }
16386
16387 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16388 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16389 gcc_assert (attr == RS6000_BTC_UNARY
16390 || attr == RS6000_BTC_BINARY
16391 || attr == RS6000_BTC_TERNARY
16392 || attr == RS6000_BTC_SPECIAL);
16393
16394 /* Handle simple unary operations. */
16395 d = bdesc_1arg;
16396 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16397 if (d->code == fcode)
16398 return rs6000_expand_unop_builtin (icode, exp, target);
16399
16400 /* Handle simple binary operations. */
16401 d = bdesc_2arg;
16402 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16403 if (d->code == fcode)
16404 return rs6000_expand_binop_builtin (icode, exp, target);
16405
16406 /* Handle simple ternary operations. */
16407 d = bdesc_3arg;
16408 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16409 if (d->code == fcode)
16410 return rs6000_expand_ternop_builtin (icode, exp, target);
16411
16412 /* Handle simple no-argument operations. */
16413 d = bdesc_0arg;
16414 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16415 if (d->code == fcode)
16416 return rs6000_expand_zeroop_builtin (icode, target);
16417
16418 gcc_unreachable ();
16419 }
16420
16421 /* Create a builtin vector type with a name. Taking care not to give
16422 the canonical type a name. */
16423
16424 static tree
16425 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16426 {
16427 tree result = build_vector_type (elt_type, num_elts);
16428
16429 /* Copy so we don't give the canonical type a name. */
16430 result = build_variant_type_copy (result);
16431
16432 add_builtin_type (name, result);
16433
16434 return result;
16435 }
16436
16437 static void
16438 rs6000_init_builtins (void)
16439 {
16440 tree tdecl;
16441 tree ftype;
16442 machine_mode mode;
16443
16444 if (TARGET_DEBUG_BUILTIN)
16445 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16446 (TARGET_ALTIVEC) ? ", altivec" : "",
16447 (TARGET_VSX) ? ", vsx" : "");
16448
16449 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16450 : "__vector long long",
16451 intDI_type_node, 2);
16452 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16453 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16454 intSI_type_node, 4);
16455 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16456 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16457 intHI_type_node, 8);
16458 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16459 intQI_type_node, 16);
16460
16461 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16462 unsigned_intQI_type_node, 16);
16463 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16464 unsigned_intHI_type_node, 8);
16465 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16466 unsigned_intSI_type_node, 4);
16467 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16468 ? "__vector unsigned long"
16469 : "__vector unsigned long long",
16470 unsigned_intDI_type_node, 2);
16471
16472 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16473
16474 const_str_type_node
16475 = build_pointer_type (build_qualified_type (char_type_node,
16476 TYPE_QUAL_CONST));
16477
16478 /* We use V1TI mode as a special container to hold __int128_t items that
16479 must live in VSX registers. */
16480 if (intTI_type_node)
16481 {
16482 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16483 intTI_type_node, 1);
16484 unsigned_V1TI_type_node
16485 = rs6000_vector_type ("__vector unsigned __int128",
16486 unsigned_intTI_type_node, 1);
16487 }
16488
16489 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16490 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16491 'vector unsigned short'. */
16492
16493 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16494 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16495 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16496 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16497 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16498
16499 long_integer_type_internal_node = long_integer_type_node;
16500 long_unsigned_type_internal_node = long_unsigned_type_node;
16501 long_long_integer_type_internal_node = long_long_integer_type_node;
16502 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16503 intQI_type_internal_node = intQI_type_node;
16504 uintQI_type_internal_node = unsigned_intQI_type_node;
16505 intHI_type_internal_node = intHI_type_node;
16506 uintHI_type_internal_node = unsigned_intHI_type_node;
16507 intSI_type_internal_node = intSI_type_node;
16508 uintSI_type_internal_node = unsigned_intSI_type_node;
16509 intDI_type_internal_node = intDI_type_node;
16510 uintDI_type_internal_node = unsigned_intDI_type_node;
16511 intTI_type_internal_node = intTI_type_node;
16512 uintTI_type_internal_node = unsigned_intTI_type_node;
16513 float_type_internal_node = float_type_node;
16514 double_type_internal_node = double_type_node;
16515 long_double_type_internal_node = long_double_type_node;
16516 dfloat64_type_internal_node = dfloat64_type_node;
16517 dfloat128_type_internal_node = dfloat128_type_node;
16518 void_type_internal_node = void_type_node;
16519
16520 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16521 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16522 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16523 format that uses a pair of doubles, depending on the switches and
16524 defaults.
16525
16526 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16527 floating point, we need make sure the type is non-zero or else self-test
16528 fails during bootstrap.
16529
16530 Always create __ibm128 as a separate type, even if the current long double
16531 format is IBM extended double.
16532
16533 For IEEE 128-bit floating point, always create the type __ieee128. If the
16534 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16535 __ieee128. */
16536 if (TARGET_FLOAT128_TYPE)
16537 {
16538 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16539 ibm128_float_type_node = long_double_type_node;
16540 else
16541 {
16542 ibm128_float_type_node = make_node (REAL_TYPE);
16543 TYPE_PRECISION (ibm128_float_type_node) = 128;
16544 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16545 layout_type (ibm128_float_type_node);
16546 }
16547
16548 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16549 "__ibm128");
16550
16551 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16552 ieee128_float_type_node = long_double_type_node;
16553 else
16554 ieee128_float_type_node = float128_type_node;
16555
16556 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16557 "__ieee128");
16558 }
16559
16560 else
16561 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16562
16563 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16564 tree type node. */
16565 builtin_mode_to_type[QImode][0] = integer_type_node;
16566 builtin_mode_to_type[HImode][0] = integer_type_node;
16567 builtin_mode_to_type[SImode][0] = intSI_type_node;
16568 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16569 builtin_mode_to_type[DImode][0] = intDI_type_node;
16570 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16571 builtin_mode_to_type[TImode][0] = intTI_type_node;
16572 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16573 builtin_mode_to_type[SFmode][0] = float_type_node;
16574 builtin_mode_to_type[DFmode][0] = double_type_node;
16575 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16576 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16577 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16578 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16579 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16580 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16581 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16582 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16583 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16584 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16585 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16586 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16587 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16588 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16589 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16590 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16591 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16592
16593 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16594 TYPE_NAME (bool_char_type_node) = tdecl;
16595
16596 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16597 TYPE_NAME (bool_short_type_node) = tdecl;
16598
16599 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16600 TYPE_NAME (bool_int_type_node) = tdecl;
16601
16602 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16603 TYPE_NAME (pixel_type_node) = tdecl;
16604
16605 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16606 bool_char_type_node, 16);
16607 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16608 bool_short_type_node, 8);
16609 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16610 bool_int_type_node, 4);
16611 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16612 ? "__vector __bool long"
16613 : "__vector __bool long long",
16614 bool_long_long_type_node, 2);
16615 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16616 pixel_type_node, 8);
16617
16618 /* Create Altivec and VSX builtins on machines with at least the
16619 general purpose extensions (970 and newer) to allow the use of
16620 the target attribute. */
16621 if (TARGET_EXTRA_BUILTINS)
16622 altivec_init_builtins ();
16623 if (TARGET_HTM)
16624 htm_init_builtins ();
16625
16626 if (TARGET_EXTRA_BUILTINS)
16627 rs6000_common_init_builtins ();
16628
16629 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16630 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16631 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16632
16633 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16634 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16635 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16636
16637 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16638 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16639 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16640
16641 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16642 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16643 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16644
16645 mode = (TARGET_64BIT) ? DImode : SImode;
16646 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16647 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16648 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16649
16650 ftype = build_function_type_list (unsigned_intDI_type_node,
16651 NULL_TREE);
16652 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16653
16654 if (TARGET_64BIT)
16655 ftype = build_function_type_list (unsigned_intDI_type_node,
16656 NULL_TREE);
16657 else
16658 ftype = build_function_type_list (unsigned_intSI_type_node,
16659 NULL_TREE);
16660 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16661
16662 ftype = build_function_type_list (double_type_node, NULL_TREE);
16663 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16664
16665 ftype = build_function_type_list (double_type_node, NULL_TREE);
16666 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16667
16668 ftype = build_function_type_list (void_type_node,
16669 intSI_type_node,
16670 NULL_TREE);
16671 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16672
16673 ftype = build_function_type_list (void_type_node,
16674 intSI_type_node,
16675 NULL_TREE);
16676 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16677
16678 ftype = build_function_type_list (void_type_node,
16679 intDI_type_node,
16680 NULL_TREE);
16681 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16682
16683 ftype = build_function_type_list (void_type_node,
16684 intDI_type_node,
16685 NULL_TREE);
16686 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16687
16688 ftype = build_function_type_list (void_type_node,
16689 intSI_type_node, double_type_node,
16690 NULL_TREE);
16691 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16692
16693 ftype = build_function_type_list (void_type_node, NULL_TREE);
16694 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16695 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16696 MISC_BUILTIN_SPEC_BARRIER);
16697
16698 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16699 NULL_TREE);
16700 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16701 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16702
16703 /* AIX libm provides clog as __clog. */
16704 if (TARGET_XCOFF &&
16705 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16706 set_user_assembler_name (tdecl, "__clog");
16707
16708 #ifdef SUBTARGET_INIT_BUILTINS
16709 SUBTARGET_INIT_BUILTINS;
16710 #endif
16711 }
16712
16713 /* Returns the rs6000 builtin decl for CODE. */
16714
16715 static tree
16716 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16717 {
16718 HOST_WIDE_INT fnmask;
16719
16720 if (code >= RS6000_BUILTIN_COUNT)
16721 return error_mark_node;
16722
16723 fnmask = rs6000_builtin_info[code].mask;
16724 if ((fnmask & rs6000_builtin_mask) != fnmask)
16725 {
16726 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16727 return error_mark_node;
16728 }
16729
16730 return rs6000_builtin_decls[code];
16731 }
16732
16733 static void
16734 altivec_init_builtins (void)
16735 {
16736 const struct builtin_description *d;
16737 size_t i;
16738 tree ftype;
16739 tree decl;
16740 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16741
16742 tree pvoid_type_node = build_pointer_type (void_type_node);
16743
16744 tree pcvoid_type_node
16745 = build_pointer_type (build_qualified_type (void_type_node,
16746 TYPE_QUAL_CONST));
16747
16748 tree int_ftype_opaque
16749 = build_function_type_list (integer_type_node,
16750 opaque_V4SI_type_node, NULL_TREE);
16751 tree opaque_ftype_opaque
16752 = build_function_type_list (integer_type_node, NULL_TREE);
16753 tree opaque_ftype_opaque_int
16754 = build_function_type_list (opaque_V4SI_type_node,
16755 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16756 tree opaque_ftype_opaque_opaque_int
16757 = build_function_type_list (opaque_V4SI_type_node,
16758 opaque_V4SI_type_node, opaque_V4SI_type_node,
16759 integer_type_node, NULL_TREE);
16760 tree opaque_ftype_opaque_opaque_opaque
16761 = build_function_type_list (opaque_V4SI_type_node,
16762 opaque_V4SI_type_node, opaque_V4SI_type_node,
16763 opaque_V4SI_type_node, NULL_TREE);
16764 tree opaque_ftype_opaque_opaque
16765 = build_function_type_list (opaque_V4SI_type_node,
16766 opaque_V4SI_type_node, opaque_V4SI_type_node,
16767 NULL_TREE);
16768 tree int_ftype_int_opaque_opaque
16769 = build_function_type_list (integer_type_node,
16770 integer_type_node, opaque_V4SI_type_node,
16771 opaque_V4SI_type_node, NULL_TREE);
16772 tree int_ftype_int_v4si_v4si
16773 = build_function_type_list (integer_type_node,
16774 integer_type_node, V4SI_type_node,
16775 V4SI_type_node, NULL_TREE);
16776 tree int_ftype_int_v2di_v2di
16777 = build_function_type_list (integer_type_node,
16778 integer_type_node, V2DI_type_node,
16779 V2DI_type_node, NULL_TREE);
16780 tree void_ftype_v4si
16781 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16782 tree v8hi_ftype_void
16783 = build_function_type_list (V8HI_type_node, NULL_TREE);
16784 tree void_ftype_void
16785 = build_function_type_list (void_type_node, NULL_TREE);
16786 tree void_ftype_int
16787 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16788
16789 tree opaque_ftype_long_pcvoid
16790 = build_function_type_list (opaque_V4SI_type_node,
16791 long_integer_type_node, pcvoid_type_node,
16792 NULL_TREE);
16793 tree v16qi_ftype_long_pcvoid
16794 = build_function_type_list (V16QI_type_node,
16795 long_integer_type_node, pcvoid_type_node,
16796 NULL_TREE);
16797 tree v8hi_ftype_long_pcvoid
16798 = build_function_type_list (V8HI_type_node,
16799 long_integer_type_node, pcvoid_type_node,
16800 NULL_TREE);
16801 tree v4si_ftype_long_pcvoid
16802 = build_function_type_list (V4SI_type_node,
16803 long_integer_type_node, pcvoid_type_node,
16804 NULL_TREE);
16805 tree v4sf_ftype_long_pcvoid
16806 = build_function_type_list (V4SF_type_node,
16807 long_integer_type_node, pcvoid_type_node,
16808 NULL_TREE);
16809 tree v2df_ftype_long_pcvoid
16810 = build_function_type_list (V2DF_type_node,
16811 long_integer_type_node, pcvoid_type_node,
16812 NULL_TREE);
16813 tree v2di_ftype_long_pcvoid
16814 = build_function_type_list (V2DI_type_node,
16815 long_integer_type_node, pcvoid_type_node,
16816 NULL_TREE);
16817 tree v1ti_ftype_long_pcvoid
16818 = build_function_type_list (V1TI_type_node,
16819 long_integer_type_node, pcvoid_type_node,
16820 NULL_TREE);
16821
16822 tree void_ftype_opaque_long_pvoid
16823 = build_function_type_list (void_type_node,
16824 opaque_V4SI_type_node, long_integer_type_node,
16825 pvoid_type_node, NULL_TREE);
16826 tree void_ftype_v4si_long_pvoid
16827 = build_function_type_list (void_type_node,
16828 V4SI_type_node, long_integer_type_node,
16829 pvoid_type_node, NULL_TREE);
16830 tree void_ftype_v16qi_long_pvoid
16831 = build_function_type_list (void_type_node,
16832 V16QI_type_node, long_integer_type_node,
16833 pvoid_type_node, NULL_TREE);
16834
16835 tree void_ftype_v16qi_pvoid_long
16836 = build_function_type_list (void_type_node,
16837 V16QI_type_node, pvoid_type_node,
16838 long_integer_type_node, NULL_TREE);
16839
16840 tree void_ftype_v8hi_long_pvoid
16841 = build_function_type_list (void_type_node,
16842 V8HI_type_node, long_integer_type_node,
16843 pvoid_type_node, NULL_TREE);
16844 tree void_ftype_v4sf_long_pvoid
16845 = build_function_type_list (void_type_node,
16846 V4SF_type_node, long_integer_type_node,
16847 pvoid_type_node, NULL_TREE);
16848 tree void_ftype_v2df_long_pvoid
16849 = build_function_type_list (void_type_node,
16850 V2DF_type_node, long_integer_type_node,
16851 pvoid_type_node, NULL_TREE);
16852 tree void_ftype_v1ti_long_pvoid
16853 = build_function_type_list (void_type_node,
16854 V1TI_type_node, long_integer_type_node,
16855 pvoid_type_node, NULL_TREE);
16856 tree void_ftype_v2di_long_pvoid
16857 = build_function_type_list (void_type_node,
16858 V2DI_type_node, long_integer_type_node,
16859 pvoid_type_node, NULL_TREE);
16860 tree int_ftype_int_v8hi_v8hi
16861 = build_function_type_list (integer_type_node,
16862 integer_type_node, V8HI_type_node,
16863 V8HI_type_node, NULL_TREE);
16864 tree int_ftype_int_v16qi_v16qi
16865 = build_function_type_list (integer_type_node,
16866 integer_type_node, V16QI_type_node,
16867 V16QI_type_node, NULL_TREE);
16868 tree int_ftype_int_v4sf_v4sf
16869 = build_function_type_list (integer_type_node,
16870 integer_type_node, V4SF_type_node,
16871 V4SF_type_node, NULL_TREE);
16872 tree int_ftype_int_v2df_v2df
16873 = build_function_type_list (integer_type_node,
16874 integer_type_node, V2DF_type_node,
16875 V2DF_type_node, NULL_TREE);
16876 tree v2di_ftype_v2di
16877 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16878 tree v4si_ftype_v4si
16879 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16880 tree v8hi_ftype_v8hi
16881 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16882 tree v16qi_ftype_v16qi
16883 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16884 tree v4sf_ftype_v4sf
16885 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16886 tree v2df_ftype_v2df
16887 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16888 tree void_ftype_pcvoid_int_int
16889 = build_function_type_list (void_type_node,
16890 pcvoid_type_node, integer_type_node,
16891 integer_type_node, NULL_TREE);
16892
16893 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16894 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16895 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16896 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16897 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16898 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16899 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16900 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16901 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16902 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16903 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16904 ALTIVEC_BUILTIN_LVXL_V2DF);
16905 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16906 ALTIVEC_BUILTIN_LVXL_V2DI);
16907 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16908 ALTIVEC_BUILTIN_LVXL_V4SF);
16909 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16910 ALTIVEC_BUILTIN_LVXL_V4SI);
16911 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16912 ALTIVEC_BUILTIN_LVXL_V8HI);
16913 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16914 ALTIVEC_BUILTIN_LVXL_V16QI);
16915 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16916 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16917 ALTIVEC_BUILTIN_LVX_V1TI);
16918 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16919 ALTIVEC_BUILTIN_LVX_V2DF);
16920 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16921 ALTIVEC_BUILTIN_LVX_V2DI);
16922 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16923 ALTIVEC_BUILTIN_LVX_V4SF);
16924 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16925 ALTIVEC_BUILTIN_LVX_V4SI);
16926 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16927 ALTIVEC_BUILTIN_LVX_V8HI);
16928 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16929 ALTIVEC_BUILTIN_LVX_V16QI);
16930 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16931 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16932 ALTIVEC_BUILTIN_STVX_V2DF);
16933 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16934 ALTIVEC_BUILTIN_STVX_V2DI);
16935 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16936 ALTIVEC_BUILTIN_STVX_V4SF);
16937 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16938 ALTIVEC_BUILTIN_STVX_V4SI);
16939 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16940 ALTIVEC_BUILTIN_STVX_V8HI);
16941 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16942 ALTIVEC_BUILTIN_STVX_V16QI);
16943 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16944 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16945 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16946 ALTIVEC_BUILTIN_STVXL_V2DF);
16947 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16948 ALTIVEC_BUILTIN_STVXL_V2DI);
16949 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16950 ALTIVEC_BUILTIN_STVXL_V4SF);
16951 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16952 ALTIVEC_BUILTIN_STVXL_V4SI);
16953 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16954 ALTIVEC_BUILTIN_STVXL_V8HI);
16955 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16956 ALTIVEC_BUILTIN_STVXL_V16QI);
16957 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16958 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16959 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16960 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16961 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16962 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16963 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16964 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16965 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16966 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16967 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16968 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16969 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16970 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16971 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16972 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16973
16974 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16975 VSX_BUILTIN_LXVD2X_V2DF);
16976 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16977 VSX_BUILTIN_LXVD2X_V2DI);
16978 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16979 VSX_BUILTIN_LXVW4X_V4SF);
16980 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16981 VSX_BUILTIN_LXVW4X_V4SI);
16982 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16983 VSX_BUILTIN_LXVW4X_V8HI);
16984 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16985 VSX_BUILTIN_LXVW4X_V16QI);
16986 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16987 VSX_BUILTIN_STXVD2X_V2DF);
16988 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16989 VSX_BUILTIN_STXVD2X_V2DI);
16990 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16991 VSX_BUILTIN_STXVW4X_V4SF);
16992 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16993 VSX_BUILTIN_STXVW4X_V4SI);
16994 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16995 VSX_BUILTIN_STXVW4X_V8HI);
16996 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16997 VSX_BUILTIN_STXVW4X_V16QI);
16998
16999 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17000 VSX_BUILTIN_LD_ELEMREV_V2DF);
17001 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17002 VSX_BUILTIN_LD_ELEMREV_V2DI);
17003 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17004 VSX_BUILTIN_LD_ELEMREV_V4SF);
17005 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17006 VSX_BUILTIN_LD_ELEMREV_V4SI);
17007 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17008 VSX_BUILTIN_LD_ELEMREV_V8HI);
17009 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17010 VSX_BUILTIN_LD_ELEMREV_V16QI);
17011 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17012 VSX_BUILTIN_ST_ELEMREV_V2DF);
17013 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17014 VSX_BUILTIN_ST_ELEMREV_V1TI);
17015 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17016 VSX_BUILTIN_ST_ELEMREV_V2DI);
17017 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17018 VSX_BUILTIN_ST_ELEMREV_V4SF);
17019 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17020 VSX_BUILTIN_ST_ELEMREV_V4SI);
17021 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17022 VSX_BUILTIN_ST_ELEMREV_V8HI);
17023 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17024 VSX_BUILTIN_ST_ELEMREV_V16QI);
17025
17026 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17027 VSX_BUILTIN_VEC_LD);
17028 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17029 VSX_BUILTIN_VEC_ST);
17030 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17031 VSX_BUILTIN_VEC_XL);
17032 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17033 VSX_BUILTIN_VEC_XL_BE);
17034 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17035 VSX_BUILTIN_VEC_XST);
17036 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17037 VSX_BUILTIN_VEC_XST_BE);
17038
17039 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17040 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17041 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17042
17043 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17044 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17045 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17046 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17047 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17048 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17049 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17050 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17051 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17052 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17053 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17054 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17055
17056 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17057 ALTIVEC_BUILTIN_VEC_ADDE);
17058 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17059 ALTIVEC_BUILTIN_VEC_ADDEC);
17060 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17061 ALTIVEC_BUILTIN_VEC_CMPNE);
17062 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17063 ALTIVEC_BUILTIN_VEC_MUL);
17064 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17065 ALTIVEC_BUILTIN_VEC_SUBE);
17066 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17067 ALTIVEC_BUILTIN_VEC_SUBEC);
17068
17069 /* Cell builtins. */
17070 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17071 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17072 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17073 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17074
17075 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17076 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17077 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17078 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17079
17080 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17081 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17082 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17083 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17084
17085 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17086 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17087 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17088 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17089
17090 if (TARGET_P9_VECTOR)
17091 {
17092 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17093 P9V_BUILTIN_STXVL);
17094 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17095 P9V_BUILTIN_XST_LEN_R);
17096 }
17097
17098 /* Add the DST variants. */
17099 d = bdesc_dst;
17100 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17101 {
17102 HOST_WIDE_INT mask = d->mask;
17103
17104 /* It is expected that these dst built-in functions may have
17105 d->icode equal to CODE_FOR_nothing. */
17106 if ((mask & builtin_mask) != mask)
17107 {
17108 if (TARGET_DEBUG_BUILTIN)
17109 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17110 d->name);
17111 continue;
17112 }
17113 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17114 }
17115
17116 /* Initialize the predicates. */
17117 d = bdesc_altivec_preds;
17118 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17119 {
17120 machine_mode mode1;
17121 tree type;
17122 HOST_WIDE_INT mask = d->mask;
17123
17124 if ((mask & builtin_mask) != mask)
17125 {
17126 if (TARGET_DEBUG_BUILTIN)
17127 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17128 d->name);
17129 continue;
17130 }
17131
17132 if (rs6000_overloaded_builtin_p (d->code))
17133 mode1 = VOIDmode;
17134 else
17135 {
17136 /* Cannot define builtin if the instruction is disabled. */
17137 gcc_assert (d->icode != CODE_FOR_nothing);
17138 mode1 = insn_data[d->icode].operand[1].mode;
17139 }
17140
17141 switch (mode1)
17142 {
17143 case E_VOIDmode:
17144 type = int_ftype_int_opaque_opaque;
17145 break;
17146 case E_V2DImode:
17147 type = int_ftype_int_v2di_v2di;
17148 break;
17149 case E_V4SImode:
17150 type = int_ftype_int_v4si_v4si;
17151 break;
17152 case E_V8HImode:
17153 type = int_ftype_int_v8hi_v8hi;
17154 break;
17155 case E_V16QImode:
17156 type = int_ftype_int_v16qi_v16qi;
17157 break;
17158 case E_V4SFmode:
17159 type = int_ftype_int_v4sf_v4sf;
17160 break;
17161 case E_V2DFmode:
17162 type = int_ftype_int_v2df_v2df;
17163 break;
17164 default:
17165 gcc_unreachable ();
17166 }
17167
17168 def_builtin (d->name, type, d->code);
17169 }
17170
17171 /* Initialize the abs* operators. */
17172 d = bdesc_abs;
17173 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17174 {
17175 machine_mode mode0;
17176 tree type;
17177 HOST_WIDE_INT mask = d->mask;
17178
17179 if ((mask & builtin_mask) != mask)
17180 {
17181 if (TARGET_DEBUG_BUILTIN)
17182 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17183 d->name);
17184 continue;
17185 }
17186
17187 /* Cannot define builtin if the instruction is disabled. */
17188 gcc_assert (d->icode != CODE_FOR_nothing);
17189 mode0 = insn_data[d->icode].operand[0].mode;
17190
17191 switch (mode0)
17192 {
17193 case E_V2DImode:
17194 type = v2di_ftype_v2di;
17195 break;
17196 case E_V4SImode:
17197 type = v4si_ftype_v4si;
17198 break;
17199 case E_V8HImode:
17200 type = v8hi_ftype_v8hi;
17201 break;
17202 case E_V16QImode:
17203 type = v16qi_ftype_v16qi;
17204 break;
17205 case E_V4SFmode:
17206 type = v4sf_ftype_v4sf;
17207 break;
17208 case E_V2DFmode:
17209 type = v2df_ftype_v2df;
17210 break;
17211 default:
17212 gcc_unreachable ();
17213 }
17214
17215 def_builtin (d->name, type, d->code);
17216 }
17217
17218 /* Initialize target builtin that implements
17219 targetm.vectorize.builtin_mask_for_load. */
17220
17221 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17222 v16qi_ftype_long_pcvoid,
17223 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17224 BUILT_IN_MD, NULL, NULL_TREE);
17225 TREE_READONLY (decl) = 1;
17226 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17227 altivec_builtin_mask_for_load = decl;
17228
17229 /* Access to the vec_init patterns. */
17230 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17231 integer_type_node, integer_type_node,
17232 integer_type_node, NULL_TREE);
17233 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17234
17235 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17236 short_integer_type_node,
17237 short_integer_type_node,
17238 short_integer_type_node,
17239 short_integer_type_node,
17240 short_integer_type_node,
17241 short_integer_type_node,
17242 short_integer_type_node, NULL_TREE);
17243 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17244
17245 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17246 char_type_node, char_type_node,
17247 char_type_node, char_type_node,
17248 char_type_node, char_type_node,
17249 char_type_node, char_type_node,
17250 char_type_node, char_type_node,
17251 char_type_node, char_type_node,
17252 char_type_node, char_type_node,
17253 char_type_node, NULL_TREE);
17254 def_builtin ("__builtin_vec_init_v16qi", ftype,
17255 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17256
17257 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17258 float_type_node, float_type_node,
17259 float_type_node, NULL_TREE);
17260 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17261
17262 /* VSX builtins. */
17263 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17264 double_type_node, NULL_TREE);
17265 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17266
17267 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17268 intDI_type_node, NULL_TREE);
17269 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17270
17271 /* Access to the vec_set patterns. */
17272 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17273 intSI_type_node,
17274 integer_type_node, NULL_TREE);
17275 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17276
17277 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17278 intHI_type_node,
17279 integer_type_node, NULL_TREE);
17280 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17281
17282 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17283 intQI_type_node,
17284 integer_type_node, NULL_TREE);
17285 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17286
17287 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17288 float_type_node,
17289 integer_type_node, NULL_TREE);
17290 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17291
17292 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17293 double_type_node,
17294 integer_type_node, NULL_TREE);
17295 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17296
17297 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17298 intDI_type_node,
17299 integer_type_node, NULL_TREE);
17300 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17301
17302 /* Access to the vec_extract patterns. */
17303 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17304 integer_type_node, NULL_TREE);
17305 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17306
17307 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17308 integer_type_node, NULL_TREE);
17309 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17310
17311 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17312 integer_type_node, NULL_TREE);
17313 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17314
17315 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17316 integer_type_node, NULL_TREE);
17317 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17318
17319 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17320 integer_type_node, NULL_TREE);
17321 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17322
17323 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17324 integer_type_node, NULL_TREE);
17325 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17326
17327
17328 if (V1TI_type_node)
17329 {
17330 tree v1ti_ftype_long_pcvoid
17331 = build_function_type_list (V1TI_type_node,
17332 long_integer_type_node, pcvoid_type_node,
17333 NULL_TREE);
17334 tree void_ftype_v1ti_long_pvoid
17335 = build_function_type_list (void_type_node,
17336 V1TI_type_node, long_integer_type_node,
17337 pvoid_type_node, NULL_TREE);
17338 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17339 VSX_BUILTIN_LD_ELEMREV_V1TI);
17340 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17341 VSX_BUILTIN_LXVD2X_V1TI);
17342 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17343 VSX_BUILTIN_STXVD2X_V1TI);
17344 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17345 NULL_TREE, NULL_TREE);
17346 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17347 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17348 intTI_type_node,
17349 integer_type_node, NULL_TREE);
17350 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17351 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17352 integer_type_node, NULL_TREE);
17353 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17354 }
17355
17356 }
17357
17358 static void
17359 htm_init_builtins (void)
17360 {
17361 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17362 const struct builtin_description *d;
17363 size_t i;
17364
17365 d = bdesc_htm;
17366 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17367 {
17368 tree op[MAX_HTM_OPERANDS], type;
17369 HOST_WIDE_INT mask = d->mask;
17370 unsigned attr = rs6000_builtin_info[d->code].attr;
17371 bool void_func = (attr & RS6000_BTC_VOID);
17372 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17373 int nopnds = 0;
17374 tree gpr_type_node;
17375 tree rettype;
17376 tree argtype;
17377
17378 /* It is expected that these htm built-in functions may have
17379 d->icode equal to CODE_FOR_nothing. */
17380
17381 if (TARGET_32BIT && TARGET_POWERPC64)
17382 gpr_type_node = long_long_unsigned_type_node;
17383 else
17384 gpr_type_node = long_unsigned_type_node;
17385
17386 if (attr & RS6000_BTC_SPR)
17387 {
17388 rettype = gpr_type_node;
17389 argtype = gpr_type_node;
17390 }
17391 else if (d->code == HTM_BUILTIN_TABORTDC
17392 || d->code == HTM_BUILTIN_TABORTDCI)
17393 {
17394 rettype = unsigned_type_node;
17395 argtype = gpr_type_node;
17396 }
17397 else
17398 {
17399 rettype = unsigned_type_node;
17400 argtype = unsigned_type_node;
17401 }
17402
17403 if ((mask & builtin_mask) != mask)
17404 {
17405 if (TARGET_DEBUG_BUILTIN)
17406 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17407 continue;
17408 }
17409
17410 if (d->name == 0)
17411 {
17412 if (TARGET_DEBUG_BUILTIN)
17413 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17414 (long unsigned) i);
17415 continue;
17416 }
17417
17418 op[nopnds++] = (void_func) ? void_type_node : rettype;
17419
17420 if (attr_args == RS6000_BTC_UNARY)
17421 op[nopnds++] = argtype;
17422 else if (attr_args == RS6000_BTC_BINARY)
17423 {
17424 op[nopnds++] = argtype;
17425 op[nopnds++] = argtype;
17426 }
17427 else if (attr_args == RS6000_BTC_TERNARY)
17428 {
17429 op[nopnds++] = argtype;
17430 op[nopnds++] = argtype;
17431 op[nopnds++] = argtype;
17432 }
17433
17434 switch (nopnds)
17435 {
17436 case 1:
17437 type = build_function_type_list (op[0], NULL_TREE);
17438 break;
17439 case 2:
17440 type = build_function_type_list (op[0], op[1], NULL_TREE);
17441 break;
17442 case 3:
17443 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17444 break;
17445 case 4:
17446 type = build_function_type_list (op[0], op[1], op[2], op[3],
17447 NULL_TREE);
17448 break;
17449 default:
17450 gcc_unreachable ();
17451 }
17452
17453 def_builtin (d->name, type, d->code);
17454 }
17455 }
17456
17457 /* Hash function for builtin functions with up to 3 arguments and a return
17458 type. */
17459 hashval_t
17460 builtin_hasher::hash (builtin_hash_struct *bh)
17461 {
17462 unsigned ret = 0;
17463 int i;
17464
17465 for (i = 0; i < 4; i++)
17466 {
17467 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17468 ret = (ret * 2) + bh->uns_p[i];
17469 }
17470
17471 return ret;
17472 }
17473
17474 /* Compare builtin hash entries H1 and H2 for equivalence. */
17475 bool
17476 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17477 {
17478 return ((p1->mode[0] == p2->mode[0])
17479 && (p1->mode[1] == p2->mode[1])
17480 && (p1->mode[2] == p2->mode[2])
17481 && (p1->mode[3] == p2->mode[3])
17482 && (p1->uns_p[0] == p2->uns_p[0])
17483 && (p1->uns_p[1] == p2->uns_p[1])
17484 && (p1->uns_p[2] == p2->uns_p[2])
17485 && (p1->uns_p[3] == p2->uns_p[3]));
17486 }
17487
17488 /* Map types for builtin functions with an explicit return type and up to 3
17489 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17490 of the argument. */
17491 static tree
17492 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17493 machine_mode mode_arg1, machine_mode mode_arg2,
17494 enum rs6000_builtins builtin, const char *name)
17495 {
17496 struct builtin_hash_struct h;
17497 struct builtin_hash_struct *h2;
17498 int num_args = 3;
17499 int i;
17500 tree ret_type = NULL_TREE;
17501 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17502
17503 /* Create builtin_hash_table. */
17504 if (builtin_hash_table == NULL)
17505 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17506
17507 h.type = NULL_TREE;
17508 h.mode[0] = mode_ret;
17509 h.mode[1] = mode_arg0;
17510 h.mode[2] = mode_arg1;
17511 h.mode[3] = mode_arg2;
17512 h.uns_p[0] = 0;
17513 h.uns_p[1] = 0;
17514 h.uns_p[2] = 0;
17515 h.uns_p[3] = 0;
17516
17517 /* If the builtin is a type that produces unsigned results or takes unsigned
17518 arguments, and it is returned as a decl for the vectorizer (such as
17519 widening multiplies, permute), make sure the arguments and return value
17520 are type correct. */
17521 switch (builtin)
17522 {
17523 /* unsigned 1 argument functions. */
17524 case CRYPTO_BUILTIN_VSBOX:
17525 case P8V_BUILTIN_VGBBD:
17526 case MISC_BUILTIN_CDTBCD:
17527 case MISC_BUILTIN_CBCDTD:
17528 h.uns_p[0] = 1;
17529 h.uns_p[1] = 1;
17530 break;
17531
17532 /* unsigned 2 argument functions. */
17533 case ALTIVEC_BUILTIN_VMULEUB:
17534 case ALTIVEC_BUILTIN_VMULEUH:
17535 case P8V_BUILTIN_VMULEUW:
17536 case ALTIVEC_BUILTIN_VMULOUB:
17537 case ALTIVEC_BUILTIN_VMULOUH:
17538 case P8V_BUILTIN_VMULOUW:
17539 case CRYPTO_BUILTIN_VCIPHER:
17540 case CRYPTO_BUILTIN_VCIPHERLAST:
17541 case CRYPTO_BUILTIN_VNCIPHER:
17542 case CRYPTO_BUILTIN_VNCIPHERLAST:
17543 case CRYPTO_BUILTIN_VPMSUMB:
17544 case CRYPTO_BUILTIN_VPMSUMH:
17545 case CRYPTO_BUILTIN_VPMSUMW:
17546 case CRYPTO_BUILTIN_VPMSUMD:
17547 case CRYPTO_BUILTIN_VPMSUM:
17548 case MISC_BUILTIN_ADDG6S:
17549 case MISC_BUILTIN_DIVWEU:
17550 case MISC_BUILTIN_DIVDEU:
17551 case VSX_BUILTIN_UDIV_V2DI:
17552 case ALTIVEC_BUILTIN_VMAXUB:
17553 case ALTIVEC_BUILTIN_VMINUB:
17554 case ALTIVEC_BUILTIN_VMAXUH:
17555 case ALTIVEC_BUILTIN_VMINUH:
17556 case ALTIVEC_BUILTIN_VMAXUW:
17557 case ALTIVEC_BUILTIN_VMINUW:
17558 case P8V_BUILTIN_VMAXUD:
17559 case P8V_BUILTIN_VMINUD:
17560 h.uns_p[0] = 1;
17561 h.uns_p[1] = 1;
17562 h.uns_p[2] = 1;
17563 break;
17564
17565 /* unsigned 3 argument functions. */
17566 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17567 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17568 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17569 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17570 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17571 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17572 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17573 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17574 case VSX_BUILTIN_VPERM_16QI_UNS:
17575 case VSX_BUILTIN_VPERM_8HI_UNS:
17576 case VSX_BUILTIN_VPERM_4SI_UNS:
17577 case VSX_BUILTIN_VPERM_2DI_UNS:
17578 case VSX_BUILTIN_XXSEL_16QI_UNS:
17579 case VSX_BUILTIN_XXSEL_8HI_UNS:
17580 case VSX_BUILTIN_XXSEL_4SI_UNS:
17581 case VSX_BUILTIN_XXSEL_2DI_UNS:
17582 case CRYPTO_BUILTIN_VPERMXOR:
17583 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17584 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17585 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17586 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17587 case CRYPTO_BUILTIN_VSHASIGMAW:
17588 case CRYPTO_BUILTIN_VSHASIGMAD:
17589 case CRYPTO_BUILTIN_VSHASIGMA:
17590 h.uns_p[0] = 1;
17591 h.uns_p[1] = 1;
17592 h.uns_p[2] = 1;
17593 h.uns_p[3] = 1;
17594 break;
17595
17596 /* signed permute functions with unsigned char mask. */
17597 case ALTIVEC_BUILTIN_VPERM_16QI:
17598 case ALTIVEC_BUILTIN_VPERM_8HI:
17599 case ALTIVEC_BUILTIN_VPERM_4SI:
17600 case ALTIVEC_BUILTIN_VPERM_4SF:
17601 case ALTIVEC_BUILTIN_VPERM_2DI:
17602 case ALTIVEC_BUILTIN_VPERM_2DF:
17603 case VSX_BUILTIN_VPERM_16QI:
17604 case VSX_BUILTIN_VPERM_8HI:
17605 case VSX_BUILTIN_VPERM_4SI:
17606 case VSX_BUILTIN_VPERM_4SF:
17607 case VSX_BUILTIN_VPERM_2DI:
17608 case VSX_BUILTIN_VPERM_2DF:
17609 h.uns_p[3] = 1;
17610 break;
17611
17612 /* unsigned args, signed return. */
17613 case VSX_BUILTIN_XVCVUXDSP:
17614 case VSX_BUILTIN_XVCVUXDDP_UNS:
17615 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17616 h.uns_p[1] = 1;
17617 break;
17618
17619 /* signed args, unsigned return. */
17620 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17621 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17622 case MISC_BUILTIN_UNPACK_TD:
17623 case MISC_BUILTIN_UNPACK_V1TI:
17624 h.uns_p[0] = 1;
17625 break;
17626
17627 /* unsigned arguments, bool return (compares). */
17628 case ALTIVEC_BUILTIN_VCMPEQUB:
17629 case ALTIVEC_BUILTIN_VCMPEQUH:
17630 case ALTIVEC_BUILTIN_VCMPEQUW:
17631 case P8V_BUILTIN_VCMPEQUD:
17632 case VSX_BUILTIN_CMPGE_U16QI:
17633 case VSX_BUILTIN_CMPGE_U8HI:
17634 case VSX_BUILTIN_CMPGE_U4SI:
17635 case VSX_BUILTIN_CMPGE_U2DI:
17636 case ALTIVEC_BUILTIN_VCMPGTUB:
17637 case ALTIVEC_BUILTIN_VCMPGTUH:
17638 case ALTIVEC_BUILTIN_VCMPGTUW:
17639 case P8V_BUILTIN_VCMPGTUD:
17640 h.uns_p[1] = 1;
17641 h.uns_p[2] = 1;
17642 break;
17643
17644 /* unsigned arguments for 128-bit pack instructions. */
17645 case MISC_BUILTIN_PACK_TD:
17646 case MISC_BUILTIN_PACK_V1TI:
17647 h.uns_p[1] = 1;
17648 h.uns_p[2] = 1;
17649 break;
17650
17651 /* unsigned second arguments (vector shift right). */
17652 case ALTIVEC_BUILTIN_VSRB:
17653 case ALTIVEC_BUILTIN_VSRH:
17654 case ALTIVEC_BUILTIN_VSRW:
17655 case P8V_BUILTIN_VSRD:
17656 h.uns_p[2] = 1;
17657 break;
17658
17659 default:
17660 break;
17661 }
17662
17663 /* Figure out how many args are present. */
17664 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17665 num_args--;
17666
17667 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17668 if (!ret_type && h.uns_p[0])
17669 ret_type = builtin_mode_to_type[h.mode[0]][0];
17670
17671 if (!ret_type)
17672 fatal_error (input_location,
17673 "internal error: builtin function %qs had an unexpected "
17674 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17675
17676 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17677 arg_type[i] = NULL_TREE;
17678
17679 for (i = 0; i < num_args; i++)
17680 {
17681 int m = (int) h.mode[i+1];
17682 int uns_p = h.uns_p[i+1];
17683
17684 arg_type[i] = builtin_mode_to_type[m][uns_p];
17685 if (!arg_type[i] && uns_p)
17686 arg_type[i] = builtin_mode_to_type[m][0];
17687
17688 if (!arg_type[i])
17689 fatal_error (input_location,
17690 "internal error: builtin function %qs, argument %d "
17691 "had unexpected argument type %qs", name, i,
17692 GET_MODE_NAME (m));
17693 }
17694
17695 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17696 if (*found == NULL)
17697 {
17698 h2 = ggc_alloc<builtin_hash_struct> ();
17699 *h2 = h;
17700 *found = h2;
17701
17702 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17703 arg_type[2], NULL_TREE);
17704 }
17705
17706 return (*found)->type;
17707 }
17708
17709 static void
17710 rs6000_common_init_builtins (void)
17711 {
17712 const struct builtin_description *d;
17713 size_t i;
17714
17715 tree opaque_ftype_opaque = NULL_TREE;
17716 tree opaque_ftype_opaque_opaque = NULL_TREE;
17717 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17718 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17719
17720 /* Create Altivec and VSX builtins on machines with at least the
17721 general purpose extensions (970 and newer) to allow the use of
17722 the target attribute. */
17723
17724 if (TARGET_EXTRA_BUILTINS)
17725 builtin_mask |= RS6000_BTM_COMMON;
17726
17727 /* Add the ternary operators. */
17728 d = bdesc_3arg;
17729 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17730 {
17731 tree type;
17732 HOST_WIDE_INT mask = d->mask;
17733
17734 if ((mask & builtin_mask) != mask)
17735 {
17736 if (TARGET_DEBUG_BUILTIN)
17737 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17738 continue;
17739 }
17740
17741 if (rs6000_overloaded_builtin_p (d->code))
17742 {
17743 if (! (type = opaque_ftype_opaque_opaque_opaque))
17744 type = opaque_ftype_opaque_opaque_opaque
17745 = build_function_type_list (opaque_V4SI_type_node,
17746 opaque_V4SI_type_node,
17747 opaque_V4SI_type_node,
17748 opaque_V4SI_type_node,
17749 NULL_TREE);
17750 }
17751 else
17752 {
17753 enum insn_code icode = d->icode;
17754 if (d->name == 0)
17755 {
17756 if (TARGET_DEBUG_BUILTIN)
17757 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17758 (long unsigned)i);
17759
17760 continue;
17761 }
17762
17763 if (icode == CODE_FOR_nothing)
17764 {
17765 if (TARGET_DEBUG_BUILTIN)
17766 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17767 d->name);
17768
17769 continue;
17770 }
17771
17772 type = builtin_function_type (insn_data[icode].operand[0].mode,
17773 insn_data[icode].operand[1].mode,
17774 insn_data[icode].operand[2].mode,
17775 insn_data[icode].operand[3].mode,
17776 d->code, d->name);
17777 }
17778
17779 def_builtin (d->name, type, d->code);
17780 }
17781
17782 /* Add the binary operators. */
17783 d = bdesc_2arg;
17784 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17785 {
17786 machine_mode mode0, mode1, mode2;
17787 tree type;
17788 HOST_WIDE_INT mask = d->mask;
17789
17790 if ((mask & builtin_mask) != mask)
17791 {
17792 if (TARGET_DEBUG_BUILTIN)
17793 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17794 continue;
17795 }
17796
17797 if (rs6000_overloaded_builtin_p (d->code))
17798 {
17799 if (! (type = opaque_ftype_opaque_opaque))
17800 type = opaque_ftype_opaque_opaque
17801 = build_function_type_list (opaque_V4SI_type_node,
17802 opaque_V4SI_type_node,
17803 opaque_V4SI_type_node,
17804 NULL_TREE);
17805 }
17806 else
17807 {
17808 enum insn_code icode = d->icode;
17809 if (d->name == 0)
17810 {
17811 if (TARGET_DEBUG_BUILTIN)
17812 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17813 (long unsigned)i);
17814
17815 continue;
17816 }
17817
17818 if (icode == CODE_FOR_nothing)
17819 {
17820 if (TARGET_DEBUG_BUILTIN)
17821 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17822 d->name);
17823
17824 continue;
17825 }
17826
17827 mode0 = insn_data[icode].operand[0].mode;
17828 mode1 = insn_data[icode].operand[1].mode;
17829 mode2 = insn_data[icode].operand[2].mode;
17830
17831 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17832 d->code, d->name);
17833 }
17834
17835 def_builtin (d->name, type, d->code);
17836 }
17837
17838 /* Add the simple unary operators. */
17839 d = bdesc_1arg;
17840 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17841 {
17842 machine_mode mode0, mode1;
17843 tree type;
17844 HOST_WIDE_INT mask = d->mask;
17845
17846 if ((mask & builtin_mask) != mask)
17847 {
17848 if (TARGET_DEBUG_BUILTIN)
17849 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17850 continue;
17851 }
17852
17853 if (rs6000_overloaded_builtin_p (d->code))
17854 {
17855 if (! (type = opaque_ftype_opaque))
17856 type = opaque_ftype_opaque
17857 = build_function_type_list (opaque_V4SI_type_node,
17858 opaque_V4SI_type_node,
17859 NULL_TREE);
17860 }
17861 else
17862 {
17863 enum insn_code icode = d->icode;
17864 if (d->name == 0)
17865 {
17866 if (TARGET_DEBUG_BUILTIN)
17867 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17868 (long unsigned)i);
17869
17870 continue;
17871 }
17872
17873 if (icode == CODE_FOR_nothing)
17874 {
17875 if (TARGET_DEBUG_BUILTIN)
17876 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17877 d->name);
17878
17879 continue;
17880 }
17881
17882 mode0 = insn_data[icode].operand[0].mode;
17883 mode1 = insn_data[icode].operand[1].mode;
17884
17885 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17886 d->code, d->name);
17887 }
17888
17889 def_builtin (d->name, type, d->code);
17890 }
17891
17892 /* Add the simple no-argument operators. */
17893 d = bdesc_0arg;
17894 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17895 {
17896 machine_mode mode0;
17897 tree type;
17898 HOST_WIDE_INT mask = d->mask;
17899
17900 if ((mask & builtin_mask) != mask)
17901 {
17902 if (TARGET_DEBUG_BUILTIN)
17903 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17904 continue;
17905 }
17906 if (rs6000_overloaded_builtin_p (d->code))
17907 {
17908 if (!opaque_ftype_opaque)
17909 opaque_ftype_opaque
17910 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17911 type = opaque_ftype_opaque;
17912 }
17913 else
17914 {
17915 enum insn_code icode = d->icode;
17916 if (d->name == 0)
17917 {
17918 if (TARGET_DEBUG_BUILTIN)
17919 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17920 (long unsigned) i);
17921 continue;
17922 }
17923 if (icode == CODE_FOR_nothing)
17924 {
17925 if (TARGET_DEBUG_BUILTIN)
17926 fprintf (stderr,
17927 "rs6000_builtin, skip no-argument %s (no code)\n",
17928 d->name);
17929 continue;
17930 }
17931 mode0 = insn_data[icode].operand[0].mode;
17932 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17933 d->code, d->name);
17934 }
17935 def_builtin (d->name, type, d->code);
17936 }
17937 }
17938
17939 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17940 static void
17941 init_float128_ibm (machine_mode mode)
17942 {
17943 if (!TARGET_XL_COMPAT)
17944 {
17945 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17946 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17947 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17948 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17949
17950 if (!TARGET_HARD_FLOAT)
17951 {
17952 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17953 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17954 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17955 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17956 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17957 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17958 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17959 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17960
17961 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17962 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17963 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17964 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17965 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17966 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17967 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17968 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17969 }
17970 }
17971 else
17972 {
17973 set_optab_libfunc (add_optab, mode, "_xlqadd");
17974 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17975 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17976 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17977 }
17978
17979 /* Add various conversions for IFmode to use the traditional TFmode
17980 names. */
17981 if (mode == IFmode)
17982 {
17983 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
17984 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
17985 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
17986 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
17987 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
17988 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
17989
17990 if (TARGET_POWERPC64)
17991 {
17992 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17993 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17994 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17995 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17996 }
17997 }
17998 }
17999
18000 /* Create a decl for either complex long double multiply or complex long double
18001 divide when long double is IEEE 128-bit floating point. We can't use
18002 __multc3 and __divtc3 because the original long double using IBM extended
18003 double used those names. The complex multiply/divide functions are encoded
18004 as builtin functions with a complex result and 4 scalar inputs. */
18005
18006 static void
18007 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18008 {
18009 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18010 name, NULL_TREE);
18011
18012 set_builtin_decl (fncode, fndecl, true);
18013
18014 if (TARGET_DEBUG_BUILTIN)
18015 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18016
18017 return;
18018 }
18019
18020 /* Set up IEEE 128-bit floating point routines. Use different names if the
18021 arguments can be passed in a vector register. The historical PowerPC
18022 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18023 continue to use that if we aren't using vector registers to pass IEEE
18024 128-bit floating point. */
18025
18026 static void
18027 init_float128_ieee (machine_mode mode)
18028 {
18029 if (FLOAT128_VECTOR_P (mode))
18030 {
18031 static bool complex_muldiv_init_p = false;
18032
18033 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18034 we have clone or target attributes, this will be called a second
18035 time. We want to create the built-in function only once. */
18036 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18037 {
18038 complex_muldiv_init_p = true;
18039 built_in_function fncode_mul =
18040 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18041 - MIN_MODE_COMPLEX_FLOAT);
18042 built_in_function fncode_div =
18043 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18044 - MIN_MODE_COMPLEX_FLOAT);
18045
18046 tree fntype = build_function_type_list (complex_long_double_type_node,
18047 long_double_type_node,
18048 long_double_type_node,
18049 long_double_type_node,
18050 long_double_type_node,
18051 NULL_TREE);
18052
18053 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18054 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18055 }
18056
18057 set_optab_libfunc (add_optab, mode, "__addkf3");
18058 set_optab_libfunc (sub_optab, mode, "__subkf3");
18059 set_optab_libfunc (neg_optab, mode, "__negkf2");
18060 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18061 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18062 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18063 set_optab_libfunc (abs_optab, mode, "__abskf2");
18064 set_optab_libfunc (powi_optab, mode, "__powikf2");
18065
18066 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18067 set_optab_libfunc (ne_optab, mode, "__nekf2");
18068 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18069 set_optab_libfunc (ge_optab, mode, "__gekf2");
18070 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18071 set_optab_libfunc (le_optab, mode, "__lekf2");
18072 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18073
18074 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18075 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18076 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18077 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18078
18079 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18080 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18081 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18082
18083 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18084 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18085 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18086
18087 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18088 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18089 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18090 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18091 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18092 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18093
18094 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18095 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18096 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18097 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18098
18099 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18100 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18101 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18102 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18103
18104 if (TARGET_POWERPC64)
18105 {
18106 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18107 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18108 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18109 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18110 }
18111 }
18112
18113 else
18114 {
18115 set_optab_libfunc (add_optab, mode, "_q_add");
18116 set_optab_libfunc (sub_optab, mode, "_q_sub");
18117 set_optab_libfunc (neg_optab, mode, "_q_neg");
18118 set_optab_libfunc (smul_optab, mode, "_q_mul");
18119 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18120 if (TARGET_PPC_GPOPT)
18121 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18122
18123 set_optab_libfunc (eq_optab, mode, "_q_feq");
18124 set_optab_libfunc (ne_optab, mode, "_q_fne");
18125 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18126 set_optab_libfunc (ge_optab, mode, "_q_fge");
18127 set_optab_libfunc (lt_optab, mode, "_q_flt");
18128 set_optab_libfunc (le_optab, mode, "_q_fle");
18129
18130 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18131 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18132 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18133 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18134 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18135 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18136 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18137 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18138 }
18139 }
18140
18141 static void
18142 rs6000_init_libfuncs (void)
18143 {
18144 /* __float128 support. */
18145 if (TARGET_FLOAT128_TYPE)
18146 {
18147 init_float128_ibm (IFmode);
18148 init_float128_ieee (KFmode);
18149 }
18150
18151 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18152 if (TARGET_LONG_DOUBLE_128)
18153 {
18154 if (!TARGET_IEEEQUAD)
18155 init_float128_ibm (TFmode);
18156
18157 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18158 else
18159 init_float128_ieee (TFmode);
18160 }
18161 }
18162
18163 /* Emit a potentially record-form instruction, setting DST from SRC.
18164 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18165 signed comparison of DST with zero. If DOT is 1, the generated RTL
18166 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18167 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18168 a separate COMPARE. */
18169
18170 void
18171 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18172 {
18173 if (dot == 0)
18174 {
18175 emit_move_insn (dst, src);
18176 return;
18177 }
18178
18179 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18180 {
18181 emit_move_insn (dst, src);
18182 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18183 return;
18184 }
18185
18186 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18187 if (dot == 1)
18188 {
18189 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18190 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18191 }
18192 else
18193 {
18194 rtx set = gen_rtx_SET (dst, src);
18195 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18196 }
18197 }
18198
18199 \f
18200 /* A validation routine: say whether CODE, a condition code, and MODE
18201 match. The other alternatives either don't make sense or should
18202 never be generated. */
18203
18204 void
18205 validate_condition_mode (enum rtx_code code, machine_mode mode)
18206 {
18207 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18208 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18209 && GET_MODE_CLASS (mode) == MODE_CC);
18210
18211 /* These don't make sense. */
18212 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18213 || mode != CCUNSmode);
18214
18215 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18216 || mode == CCUNSmode);
18217
18218 gcc_assert (mode == CCFPmode
18219 || (code != ORDERED && code != UNORDERED
18220 && code != UNEQ && code != LTGT
18221 && code != UNGT && code != UNLT
18222 && code != UNGE && code != UNLE));
18223
18224 /* These should never be generated except for
18225 flag_finite_math_only. */
18226 gcc_assert (mode != CCFPmode
18227 || flag_finite_math_only
18228 || (code != LE && code != GE
18229 && code != UNEQ && code != LTGT
18230 && code != UNGT && code != UNLT));
18231
18232 /* These are invalid; the information is not there. */
18233 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18234 }
18235
18236 \f
18237 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18238 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18239 not zero, store there the bit offset (counted from the right) where
18240 the single stretch of 1 bits begins; and similarly for B, the bit
18241 offset where it ends. */
18242
18243 bool
18244 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18245 {
18246 unsigned HOST_WIDE_INT val = INTVAL (mask);
18247 unsigned HOST_WIDE_INT bit;
18248 int nb, ne;
18249 int n = GET_MODE_PRECISION (mode);
18250
18251 if (mode != DImode && mode != SImode)
18252 return false;
18253
18254 if (INTVAL (mask) >= 0)
18255 {
18256 bit = val & -val;
18257 ne = exact_log2 (bit);
18258 nb = exact_log2 (val + bit);
18259 }
18260 else if (val + 1 == 0)
18261 {
18262 nb = n;
18263 ne = 0;
18264 }
18265 else if (val & 1)
18266 {
18267 val = ~val;
18268 bit = val & -val;
18269 nb = exact_log2 (bit);
18270 ne = exact_log2 (val + bit);
18271 }
18272 else
18273 {
18274 bit = val & -val;
18275 ne = exact_log2 (bit);
18276 if (val + bit == 0)
18277 nb = n;
18278 else
18279 nb = 0;
18280 }
18281
18282 nb--;
18283
18284 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18285 return false;
18286
18287 if (b)
18288 *b = nb;
18289 if (e)
18290 *e = ne;
18291
18292 return true;
18293 }
18294
18295 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18296 or rldicr instruction, to implement an AND with it in mode MODE. */
18297
18298 bool
18299 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18300 {
18301 int nb, ne;
18302
18303 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18304 return false;
18305
18306 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18307 does not wrap. */
18308 if (mode == DImode)
18309 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18310
18311 /* For SImode, rlwinm can do everything. */
18312 if (mode == SImode)
18313 return (nb < 32 && ne < 32);
18314
18315 return false;
18316 }
18317
18318 /* Return the instruction template for an AND with mask in mode MODE, with
18319 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18320
18321 const char *
18322 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18323 {
18324 int nb, ne;
18325
18326 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18327 gcc_unreachable ();
18328
18329 if (mode == DImode && ne == 0)
18330 {
18331 operands[3] = GEN_INT (63 - nb);
18332 if (dot)
18333 return "rldicl. %0,%1,0,%3";
18334 return "rldicl %0,%1,0,%3";
18335 }
18336
18337 if (mode == DImode && nb == 63)
18338 {
18339 operands[3] = GEN_INT (63 - ne);
18340 if (dot)
18341 return "rldicr. %0,%1,0,%3";
18342 return "rldicr %0,%1,0,%3";
18343 }
18344
18345 if (nb < 32 && ne < 32)
18346 {
18347 operands[3] = GEN_INT (31 - nb);
18348 operands[4] = GEN_INT (31 - ne);
18349 if (dot)
18350 return "rlwinm. %0,%1,0,%3,%4";
18351 return "rlwinm %0,%1,0,%3,%4";
18352 }
18353
18354 gcc_unreachable ();
18355 }
18356
18357 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18358 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18359 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18360
18361 bool
18362 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18363 {
18364 int nb, ne;
18365
18366 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18367 return false;
18368
18369 int n = GET_MODE_PRECISION (mode);
18370 int sh = -1;
18371
18372 if (CONST_INT_P (XEXP (shift, 1)))
18373 {
18374 sh = INTVAL (XEXP (shift, 1));
18375 if (sh < 0 || sh >= n)
18376 return false;
18377 }
18378
18379 rtx_code code = GET_CODE (shift);
18380
18381 /* Convert any shift by 0 to a rotate, to simplify below code. */
18382 if (sh == 0)
18383 code = ROTATE;
18384
18385 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18386 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18387 code = ASHIFT;
18388 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18389 {
18390 code = LSHIFTRT;
18391 sh = n - sh;
18392 }
18393
18394 /* DImode rotates need rld*. */
18395 if (mode == DImode && code == ROTATE)
18396 return (nb == 63 || ne == 0 || ne == sh);
18397
18398 /* SImode rotates need rlw*. */
18399 if (mode == SImode && code == ROTATE)
18400 return (nb < 32 && ne < 32 && sh < 32);
18401
18402 /* Wrap-around masks are only okay for rotates. */
18403 if (ne > nb)
18404 return false;
18405
18406 /* Variable shifts are only okay for rotates. */
18407 if (sh < 0)
18408 return false;
18409
18410 /* Don't allow ASHIFT if the mask is wrong for that. */
18411 if (code == ASHIFT && ne < sh)
18412 return false;
18413
18414 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18415 if the mask is wrong for that. */
18416 if (nb < 32 && ne < 32 && sh < 32
18417 && !(code == LSHIFTRT && nb >= 32 - sh))
18418 return true;
18419
18420 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18421 if the mask is wrong for that. */
18422 if (code == LSHIFTRT)
18423 sh = 64 - sh;
18424 if (nb == 63 || ne == 0 || ne == sh)
18425 return !(code == LSHIFTRT && nb >= sh);
18426
18427 return false;
18428 }
18429
18430 /* Return the instruction template for a shift with mask in mode MODE, with
18431 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18432
18433 const char *
18434 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18435 {
18436 int nb, ne;
18437
18438 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18439 gcc_unreachable ();
18440
18441 if (mode == DImode && ne == 0)
18442 {
18443 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18444 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18445 operands[3] = GEN_INT (63 - nb);
18446 if (dot)
18447 return "rld%I2cl. %0,%1,%2,%3";
18448 return "rld%I2cl %0,%1,%2,%3";
18449 }
18450
18451 if (mode == DImode && nb == 63)
18452 {
18453 operands[3] = GEN_INT (63 - ne);
18454 if (dot)
18455 return "rld%I2cr. %0,%1,%2,%3";
18456 return "rld%I2cr %0,%1,%2,%3";
18457 }
18458
18459 if (mode == DImode
18460 && GET_CODE (operands[4]) != LSHIFTRT
18461 && CONST_INT_P (operands[2])
18462 && ne == INTVAL (operands[2]))
18463 {
18464 operands[3] = GEN_INT (63 - nb);
18465 if (dot)
18466 return "rld%I2c. %0,%1,%2,%3";
18467 return "rld%I2c %0,%1,%2,%3";
18468 }
18469
18470 if (nb < 32 && ne < 32)
18471 {
18472 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18473 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18474 operands[3] = GEN_INT (31 - nb);
18475 operands[4] = GEN_INT (31 - ne);
18476 /* This insn can also be a 64-bit rotate with mask that really makes
18477 it just a shift right (with mask); the %h below are to adjust for
18478 that situation (shift count is >= 32 in that case). */
18479 if (dot)
18480 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18481 return "rlw%I2nm %0,%1,%h2,%3,%4";
18482 }
18483
18484 gcc_unreachable ();
18485 }
18486
18487 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18488 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18489 ASHIFT, or LSHIFTRT) in mode MODE. */
18490
18491 bool
18492 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18493 {
18494 int nb, ne;
18495
18496 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18497 return false;
18498
18499 int n = GET_MODE_PRECISION (mode);
18500
18501 int sh = INTVAL (XEXP (shift, 1));
18502 if (sh < 0 || sh >= n)
18503 return false;
18504
18505 rtx_code code = GET_CODE (shift);
18506
18507 /* Convert any shift by 0 to a rotate, to simplify below code. */
18508 if (sh == 0)
18509 code = ROTATE;
18510
18511 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18512 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18513 code = ASHIFT;
18514 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18515 {
18516 code = LSHIFTRT;
18517 sh = n - sh;
18518 }
18519
18520 /* DImode rotates need rldimi. */
18521 if (mode == DImode && code == ROTATE)
18522 return (ne == sh);
18523
18524 /* SImode rotates need rlwimi. */
18525 if (mode == SImode && code == ROTATE)
18526 return (nb < 32 && ne < 32 && sh < 32);
18527
18528 /* Wrap-around masks are only okay for rotates. */
18529 if (ne > nb)
18530 return false;
18531
18532 /* Don't allow ASHIFT if the mask is wrong for that. */
18533 if (code == ASHIFT && ne < sh)
18534 return false;
18535
18536 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18537 if the mask is wrong for that. */
18538 if (nb < 32 && ne < 32 && sh < 32
18539 && !(code == LSHIFTRT && nb >= 32 - sh))
18540 return true;
18541
18542 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18543 if the mask is wrong for that. */
18544 if (code == LSHIFTRT)
18545 sh = 64 - sh;
18546 if (ne == sh)
18547 return !(code == LSHIFTRT && nb >= sh);
18548
18549 return false;
18550 }
18551
18552 /* Return the instruction template for an insert with mask in mode MODE, with
18553 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18554
18555 const char *
18556 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18557 {
18558 int nb, ne;
18559
18560 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18561 gcc_unreachable ();
18562
18563 /* Prefer rldimi because rlwimi is cracked. */
18564 if (TARGET_POWERPC64
18565 && (!dot || mode == DImode)
18566 && GET_CODE (operands[4]) != LSHIFTRT
18567 && ne == INTVAL (operands[2]))
18568 {
18569 operands[3] = GEN_INT (63 - nb);
18570 if (dot)
18571 return "rldimi. %0,%1,%2,%3";
18572 return "rldimi %0,%1,%2,%3";
18573 }
18574
18575 if (nb < 32 && ne < 32)
18576 {
18577 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18578 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18579 operands[3] = GEN_INT (31 - nb);
18580 operands[4] = GEN_INT (31 - ne);
18581 if (dot)
18582 return "rlwimi. %0,%1,%2,%3,%4";
18583 return "rlwimi %0,%1,%2,%3,%4";
18584 }
18585
18586 gcc_unreachable ();
18587 }
18588
18589 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18590 using two machine instructions. */
18591
18592 bool
18593 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18594 {
18595 /* There are two kinds of AND we can handle with two insns:
18596 1) those we can do with two rl* insn;
18597 2) ori[s];xori[s].
18598
18599 We do not handle that last case yet. */
18600
18601 /* If there is just one stretch of ones, we can do it. */
18602 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18603 return true;
18604
18605 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18606 one insn, we can do the whole thing with two. */
18607 unsigned HOST_WIDE_INT val = INTVAL (c);
18608 unsigned HOST_WIDE_INT bit1 = val & -val;
18609 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18610 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18611 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18612 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18613 }
18614
18615 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18616 If EXPAND is true, split rotate-and-mask instructions we generate to
18617 their constituent parts as well (this is used during expand); if DOT
18618 is 1, make the last insn a record-form instruction clobbering the
18619 destination GPR and setting the CC reg (from operands[3]); if 2, set
18620 that GPR as well as the CC reg. */
18621
18622 void
18623 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18624 {
18625 gcc_assert (!(expand && dot));
18626
18627 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18628
18629 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18630 shift right. This generates better code than doing the masks without
18631 shifts, or shifting first right and then left. */
18632 int nb, ne;
18633 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18634 {
18635 gcc_assert (mode == DImode);
18636
18637 int shift = 63 - nb;
18638 if (expand)
18639 {
18640 rtx tmp1 = gen_reg_rtx (DImode);
18641 rtx tmp2 = gen_reg_rtx (DImode);
18642 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18643 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18644 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18645 }
18646 else
18647 {
18648 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18649 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18650 emit_move_insn (operands[0], tmp);
18651 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18652 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18653 }
18654 return;
18655 }
18656
18657 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18658 that does the rest. */
18659 unsigned HOST_WIDE_INT bit1 = val & -val;
18660 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18661 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18662 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18663
18664 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18665 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18666
18667 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18668
18669 /* Two "no-rotate"-and-mask instructions, for SImode. */
18670 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18671 {
18672 gcc_assert (mode == SImode);
18673
18674 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18675 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18676 emit_move_insn (reg, tmp);
18677 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18678 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18679 return;
18680 }
18681
18682 gcc_assert (mode == DImode);
18683
18684 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18685 insns; we have to do the first in SImode, because it wraps. */
18686 if (mask2 <= 0xffffffff
18687 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18688 {
18689 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18690 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18691 GEN_INT (mask1));
18692 rtx reg_low = gen_lowpart (SImode, reg);
18693 emit_move_insn (reg_low, tmp);
18694 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18695 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18696 return;
18697 }
18698
18699 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18700 at the top end), rotate back and clear the other hole. */
18701 int right = exact_log2 (bit3);
18702 int left = 64 - right;
18703
18704 /* Rotate the mask too. */
18705 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18706
18707 if (expand)
18708 {
18709 rtx tmp1 = gen_reg_rtx (DImode);
18710 rtx tmp2 = gen_reg_rtx (DImode);
18711 rtx tmp3 = gen_reg_rtx (DImode);
18712 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18713 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18714 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18715 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18716 }
18717 else
18718 {
18719 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18720 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18721 emit_move_insn (operands[0], tmp);
18722 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18723 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18724 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18725 }
18726 }
18727 \f
18728 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18729 for lfq and stfq insns iff the registers are hard registers. */
18730
18731 int
18732 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18733 {
18734 /* We might have been passed a SUBREG. */
18735 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18736 return 0;
18737
18738 /* We might have been passed non floating point registers. */
18739 if (!FP_REGNO_P (REGNO (reg1))
18740 || !FP_REGNO_P (REGNO (reg2)))
18741 return 0;
18742
18743 return (REGNO (reg1) == REGNO (reg2) - 1);
18744 }
18745
18746 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18747 addr1 and addr2 must be in consecutive memory locations
18748 (addr2 == addr1 + 8). */
18749
18750 int
18751 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18752 {
18753 rtx addr1, addr2;
18754 unsigned int reg1, reg2;
18755 int offset1, offset2;
18756
18757 /* The mems cannot be volatile. */
18758 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18759 return 0;
18760
18761 addr1 = XEXP (mem1, 0);
18762 addr2 = XEXP (mem2, 0);
18763
18764 /* Extract an offset (if used) from the first addr. */
18765 if (GET_CODE (addr1) == PLUS)
18766 {
18767 /* If not a REG, return zero. */
18768 if (GET_CODE (XEXP (addr1, 0)) != REG)
18769 return 0;
18770 else
18771 {
18772 reg1 = REGNO (XEXP (addr1, 0));
18773 /* The offset must be constant! */
18774 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18775 return 0;
18776 offset1 = INTVAL (XEXP (addr1, 1));
18777 }
18778 }
18779 else if (GET_CODE (addr1) != REG)
18780 return 0;
18781 else
18782 {
18783 reg1 = REGNO (addr1);
18784 /* This was a simple (mem (reg)) expression. Offset is 0. */
18785 offset1 = 0;
18786 }
18787
18788 /* And now for the second addr. */
18789 if (GET_CODE (addr2) == PLUS)
18790 {
18791 /* If not a REG, return zero. */
18792 if (GET_CODE (XEXP (addr2, 0)) != REG)
18793 return 0;
18794 else
18795 {
18796 reg2 = REGNO (XEXP (addr2, 0));
18797 /* The offset must be constant. */
18798 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18799 return 0;
18800 offset2 = INTVAL (XEXP (addr2, 1));
18801 }
18802 }
18803 else if (GET_CODE (addr2) != REG)
18804 return 0;
18805 else
18806 {
18807 reg2 = REGNO (addr2);
18808 /* This was a simple (mem (reg)) expression. Offset is 0. */
18809 offset2 = 0;
18810 }
18811
18812 /* Both of these must have the same base register. */
18813 if (reg1 != reg2)
18814 return 0;
18815
18816 /* The offset for the second addr must be 8 more than the first addr. */
18817 if (offset2 != offset1 + 8)
18818 return 0;
18819
18820 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18821 instructions. */
18822 return 1;
18823 }
18824 \f
18825 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18826 need to use DDmode, in all other cases we can use the same mode. */
18827 static machine_mode
18828 rs6000_secondary_memory_needed_mode (machine_mode mode)
18829 {
18830 if (lra_in_progress && mode == SDmode)
18831 return DDmode;
18832 return mode;
18833 }
18834
18835 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18836 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18837 only work on the traditional altivec registers, note if an altivec register
18838 was chosen. */
18839
18840 static enum rs6000_reg_type
18841 register_to_reg_type (rtx reg, bool *is_altivec)
18842 {
18843 HOST_WIDE_INT regno;
18844 enum reg_class rclass;
18845
18846 if (GET_CODE (reg) == SUBREG)
18847 reg = SUBREG_REG (reg);
18848
18849 if (!REG_P (reg))
18850 return NO_REG_TYPE;
18851
18852 regno = REGNO (reg);
18853 if (regno >= FIRST_PSEUDO_REGISTER)
18854 {
18855 if (!lra_in_progress && !reload_completed)
18856 return PSEUDO_REG_TYPE;
18857
18858 regno = true_regnum (reg);
18859 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18860 return PSEUDO_REG_TYPE;
18861 }
18862
18863 gcc_assert (regno >= 0);
18864
18865 if (is_altivec && ALTIVEC_REGNO_P (regno))
18866 *is_altivec = true;
18867
18868 rclass = rs6000_regno_regclass[regno];
18869 return reg_class_to_reg_type[(int)rclass];
18870 }
18871
18872 /* Helper function to return the cost of adding a TOC entry address. */
18873
18874 static inline int
18875 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18876 {
18877 int ret;
18878
18879 if (TARGET_CMODEL != CMODEL_SMALL)
18880 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18881
18882 else
18883 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18884
18885 return ret;
18886 }
18887
18888 /* Helper function for rs6000_secondary_reload to determine whether the memory
18889 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18890 needs reloading. Return negative if the memory is not handled by the memory
18891 helper functions and to try a different reload method, 0 if no additional
18892 instructions are need, and positive to give the extra cost for the
18893 memory. */
18894
18895 static int
18896 rs6000_secondary_reload_memory (rtx addr,
18897 enum reg_class rclass,
18898 machine_mode mode)
18899 {
18900 int extra_cost = 0;
18901 rtx reg, and_arg, plus_arg0, plus_arg1;
18902 addr_mask_type addr_mask;
18903 const char *type = NULL;
18904 const char *fail_msg = NULL;
18905
18906 if (GPR_REG_CLASS_P (rclass))
18907 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18908
18909 else if (rclass == FLOAT_REGS)
18910 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18911
18912 else if (rclass == ALTIVEC_REGS)
18913 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18914
18915 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18916 else if (rclass == VSX_REGS)
18917 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18918 & ~RELOAD_REG_AND_M16);
18919
18920 /* If the register allocator hasn't made up its mind yet on the register
18921 class to use, settle on defaults to use. */
18922 else if (rclass == NO_REGS)
18923 {
18924 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18925 & ~RELOAD_REG_AND_M16);
18926
18927 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18928 addr_mask &= ~(RELOAD_REG_INDEXED
18929 | RELOAD_REG_PRE_INCDEC
18930 | RELOAD_REG_PRE_MODIFY);
18931 }
18932
18933 else
18934 addr_mask = 0;
18935
18936 /* If the register isn't valid in this register class, just return now. */
18937 if ((addr_mask & RELOAD_REG_VALID) == 0)
18938 {
18939 if (TARGET_DEBUG_ADDR)
18940 {
18941 fprintf (stderr,
18942 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18943 "not valid in class\n",
18944 GET_MODE_NAME (mode), reg_class_names[rclass]);
18945 debug_rtx (addr);
18946 }
18947
18948 return -1;
18949 }
18950
18951 switch (GET_CODE (addr))
18952 {
18953 /* Does the register class supports auto update forms for this mode? We
18954 don't need a scratch register, since the powerpc only supports
18955 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18956 case PRE_INC:
18957 case PRE_DEC:
18958 reg = XEXP (addr, 0);
18959 if (!base_reg_operand (addr, GET_MODE (reg)))
18960 {
18961 fail_msg = "no base register #1";
18962 extra_cost = -1;
18963 }
18964
18965 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18966 {
18967 extra_cost = 1;
18968 type = "update";
18969 }
18970 break;
18971
18972 case PRE_MODIFY:
18973 reg = XEXP (addr, 0);
18974 plus_arg1 = XEXP (addr, 1);
18975 if (!base_reg_operand (reg, GET_MODE (reg))
18976 || GET_CODE (plus_arg1) != PLUS
18977 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18978 {
18979 fail_msg = "bad PRE_MODIFY";
18980 extra_cost = -1;
18981 }
18982
18983 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18984 {
18985 extra_cost = 1;
18986 type = "update";
18987 }
18988 break;
18989
18990 /* Do we need to simulate AND -16 to clear the bottom address bits used
18991 in VMX load/stores? Only allow the AND for vector sizes. */
18992 case AND:
18993 and_arg = XEXP (addr, 0);
18994 if (GET_MODE_SIZE (mode) != 16
18995 || GET_CODE (XEXP (addr, 1)) != CONST_INT
18996 || INTVAL (XEXP (addr, 1)) != -16)
18997 {
18998 fail_msg = "bad Altivec AND #1";
18999 extra_cost = -1;
19000 }
19001
19002 if (rclass != ALTIVEC_REGS)
19003 {
19004 if (legitimate_indirect_address_p (and_arg, false))
19005 extra_cost = 1;
19006
19007 else if (legitimate_indexed_address_p (and_arg, false))
19008 extra_cost = 2;
19009
19010 else
19011 {
19012 fail_msg = "bad Altivec AND #2";
19013 extra_cost = -1;
19014 }
19015
19016 type = "and";
19017 }
19018 break;
19019
19020 /* If this is an indirect address, make sure it is a base register. */
19021 case REG:
19022 case SUBREG:
19023 if (!legitimate_indirect_address_p (addr, false))
19024 {
19025 extra_cost = 1;
19026 type = "move";
19027 }
19028 break;
19029
19030 /* If this is an indexed address, make sure the register class can handle
19031 indexed addresses for this mode. */
19032 case PLUS:
19033 plus_arg0 = XEXP (addr, 0);
19034 plus_arg1 = XEXP (addr, 1);
19035
19036 /* (plus (plus (reg) (constant)) (constant)) is generated during
19037 push_reload processing, so handle it now. */
19038 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19039 {
19040 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19041 {
19042 extra_cost = 1;
19043 type = "offset";
19044 }
19045 }
19046
19047 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19048 push_reload processing, so handle it now. */
19049 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19050 {
19051 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19052 {
19053 extra_cost = 1;
19054 type = "indexed #2";
19055 }
19056 }
19057
19058 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19059 {
19060 fail_msg = "no base register #2";
19061 extra_cost = -1;
19062 }
19063
19064 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19065 {
19066 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19067 || !legitimate_indexed_address_p (addr, false))
19068 {
19069 extra_cost = 1;
19070 type = "indexed";
19071 }
19072 }
19073
19074 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19075 && CONST_INT_P (plus_arg1))
19076 {
19077 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19078 {
19079 extra_cost = 1;
19080 type = "vector d-form offset";
19081 }
19082 }
19083
19084 /* Make sure the register class can handle offset addresses. */
19085 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19086 {
19087 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19088 {
19089 extra_cost = 1;
19090 type = "offset #2";
19091 }
19092 }
19093
19094 else
19095 {
19096 fail_msg = "bad PLUS";
19097 extra_cost = -1;
19098 }
19099
19100 break;
19101
19102 case LO_SUM:
19103 /* Quad offsets are restricted and can't handle normal addresses. */
19104 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19105 {
19106 extra_cost = -1;
19107 type = "vector d-form lo_sum";
19108 }
19109
19110 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19111 {
19112 fail_msg = "bad LO_SUM";
19113 extra_cost = -1;
19114 }
19115
19116 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19117 {
19118 extra_cost = 1;
19119 type = "lo_sum";
19120 }
19121 break;
19122
19123 /* Static addresses need to create a TOC entry. */
19124 case CONST:
19125 case SYMBOL_REF:
19126 case LABEL_REF:
19127 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19128 {
19129 extra_cost = -1;
19130 type = "vector d-form lo_sum #2";
19131 }
19132
19133 else
19134 {
19135 type = "address";
19136 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19137 }
19138 break;
19139
19140 /* TOC references look like offsetable memory. */
19141 case UNSPEC:
19142 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19143 {
19144 fail_msg = "bad UNSPEC";
19145 extra_cost = -1;
19146 }
19147
19148 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19149 {
19150 extra_cost = -1;
19151 type = "vector d-form lo_sum #3";
19152 }
19153
19154 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19155 {
19156 extra_cost = 1;
19157 type = "toc reference";
19158 }
19159 break;
19160
19161 default:
19162 {
19163 fail_msg = "bad address";
19164 extra_cost = -1;
19165 }
19166 }
19167
19168 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19169 {
19170 if (extra_cost < 0)
19171 fprintf (stderr,
19172 "rs6000_secondary_reload_memory error: mode = %s, "
19173 "class = %s, addr_mask = '%s', %s\n",
19174 GET_MODE_NAME (mode),
19175 reg_class_names[rclass],
19176 rs6000_debug_addr_mask (addr_mask, false),
19177 (fail_msg != NULL) ? fail_msg : "<bad address>");
19178
19179 else
19180 fprintf (stderr,
19181 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19182 "addr_mask = '%s', extra cost = %d, %s\n",
19183 GET_MODE_NAME (mode),
19184 reg_class_names[rclass],
19185 rs6000_debug_addr_mask (addr_mask, false),
19186 extra_cost,
19187 (type) ? type : "<none>");
19188
19189 debug_rtx (addr);
19190 }
19191
19192 return extra_cost;
19193 }
19194
19195 /* Helper function for rs6000_secondary_reload to return true if a move to a
19196 different register classe is really a simple move. */
19197
19198 static bool
19199 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19200 enum rs6000_reg_type from_type,
19201 machine_mode mode)
19202 {
19203 int size = GET_MODE_SIZE (mode);
19204
19205 /* Add support for various direct moves available. In this function, we only
19206 look at cases where we don't need any extra registers, and one or more
19207 simple move insns are issued. Originally small integers are not allowed
19208 in FPR/VSX registers. Single precision binary floating is not a simple
19209 move because we need to convert to the single precision memory layout.
19210 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19211 need special direct move handling, which we do not support yet. */
19212 if (TARGET_DIRECT_MOVE
19213 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19214 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19215 {
19216 if (TARGET_POWERPC64)
19217 {
19218 /* ISA 2.07: MTVSRD or MVFVSRD. */
19219 if (size == 8)
19220 return true;
19221
19222 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19223 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19224 return true;
19225 }
19226
19227 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19228 if (TARGET_P8_VECTOR)
19229 {
19230 if (mode == SImode)
19231 return true;
19232
19233 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19234 return true;
19235 }
19236
19237 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19238 if (mode == SDmode)
19239 return true;
19240 }
19241
19242 /* Power6+: MFTGPR or MFFGPR. */
19243 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19244 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19245 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19246 return true;
19247
19248 /* Move to/from SPR. */
19249 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19250 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19251 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19252 return true;
19253
19254 return false;
19255 }
19256
19257 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19258 special direct moves that involve allocating an extra register, return the
19259 insn code of the helper function if there is such a function or
19260 CODE_FOR_nothing if not. */
19261
19262 static bool
19263 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19264 enum rs6000_reg_type from_type,
19265 machine_mode mode,
19266 secondary_reload_info *sri,
19267 bool altivec_p)
19268 {
19269 bool ret = false;
19270 enum insn_code icode = CODE_FOR_nothing;
19271 int cost = 0;
19272 int size = GET_MODE_SIZE (mode);
19273
19274 if (TARGET_POWERPC64 && size == 16)
19275 {
19276 /* Handle moving 128-bit values from GPRs to VSX point registers on
19277 ISA 2.07 (power8, power9) when running in 64-bit mode using
19278 XXPERMDI to glue the two 64-bit values back together. */
19279 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19280 {
19281 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19282 icode = reg_addr[mode].reload_vsx_gpr;
19283 }
19284
19285 /* Handle moving 128-bit values from VSX point registers to GPRs on
19286 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19287 bottom 64-bit value. */
19288 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19289 {
19290 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19291 icode = reg_addr[mode].reload_gpr_vsx;
19292 }
19293 }
19294
19295 else if (TARGET_POWERPC64 && mode == SFmode)
19296 {
19297 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19298 {
19299 cost = 3; /* xscvdpspn, mfvsrd, and. */
19300 icode = reg_addr[mode].reload_gpr_vsx;
19301 }
19302
19303 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19304 {
19305 cost = 2; /* mtvsrz, xscvspdpn. */
19306 icode = reg_addr[mode].reload_vsx_gpr;
19307 }
19308 }
19309
19310 else if (!TARGET_POWERPC64 && size == 8)
19311 {
19312 /* Handle moving 64-bit values from GPRs to floating point registers on
19313 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19314 32-bit values back together. Altivec register classes must be handled
19315 specially since a different instruction is used, and the secondary
19316 reload support requires a single instruction class in the scratch
19317 register constraint. However, right now TFmode is not allowed in
19318 Altivec registers, so the pattern will never match. */
19319 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19320 {
19321 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19322 icode = reg_addr[mode].reload_fpr_gpr;
19323 }
19324 }
19325
19326 if (icode != CODE_FOR_nothing)
19327 {
19328 ret = true;
19329 if (sri)
19330 {
19331 sri->icode = icode;
19332 sri->extra_cost = cost;
19333 }
19334 }
19335
19336 return ret;
19337 }
19338
19339 /* Return whether a move between two register classes can be done either
19340 directly (simple move) or via a pattern that uses a single extra temporary
19341 (using ISA 2.07's direct move in this case. */
19342
19343 static bool
19344 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19345 enum rs6000_reg_type from_type,
19346 machine_mode mode,
19347 secondary_reload_info *sri,
19348 bool altivec_p)
19349 {
19350 /* Fall back to load/store reloads if either type is not a register. */
19351 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19352 return false;
19353
19354 /* If we haven't allocated registers yet, assume the move can be done for the
19355 standard register types. */
19356 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19357 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19358 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19359 return true;
19360
19361 /* Moves to the same set of registers is a simple move for non-specialized
19362 registers. */
19363 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19364 return true;
19365
19366 /* Check whether a simple move can be done directly. */
19367 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19368 {
19369 if (sri)
19370 {
19371 sri->icode = CODE_FOR_nothing;
19372 sri->extra_cost = 0;
19373 }
19374 return true;
19375 }
19376
19377 /* Now check if we can do it in a few steps. */
19378 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19379 altivec_p);
19380 }
19381
19382 /* Inform reload about cases where moving X with a mode MODE to a register in
19383 RCLASS requires an extra scratch or immediate register. Return the class
19384 needed for the immediate register.
19385
19386 For VSX and Altivec, we may need a register to convert sp+offset into
19387 reg+sp.
19388
19389 For misaligned 64-bit gpr loads and stores we need a register to
19390 convert an offset address to indirect. */
19391
19392 static reg_class_t
19393 rs6000_secondary_reload (bool in_p,
19394 rtx x,
19395 reg_class_t rclass_i,
19396 machine_mode mode,
19397 secondary_reload_info *sri)
19398 {
19399 enum reg_class rclass = (enum reg_class) rclass_i;
19400 reg_class_t ret = ALL_REGS;
19401 enum insn_code icode;
19402 bool default_p = false;
19403 bool done_p = false;
19404
19405 /* Allow subreg of memory before/during reload. */
19406 bool memory_p = (MEM_P (x)
19407 || (!reload_completed && GET_CODE (x) == SUBREG
19408 && MEM_P (SUBREG_REG (x))));
19409
19410 sri->icode = CODE_FOR_nothing;
19411 sri->t_icode = CODE_FOR_nothing;
19412 sri->extra_cost = 0;
19413 icode = ((in_p)
19414 ? reg_addr[mode].reload_load
19415 : reg_addr[mode].reload_store);
19416
19417 if (REG_P (x) || register_operand (x, mode))
19418 {
19419 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19420 bool altivec_p = (rclass == ALTIVEC_REGS);
19421 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19422
19423 if (!in_p)
19424 std::swap (to_type, from_type);
19425
19426 /* Can we do a direct move of some sort? */
19427 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19428 altivec_p))
19429 {
19430 icode = (enum insn_code)sri->icode;
19431 default_p = false;
19432 done_p = true;
19433 ret = NO_REGS;
19434 }
19435 }
19436
19437 /* Make sure 0.0 is not reloaded or forced into memory. */
19438 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19439 {
19440 ret = NO_REGS;
19441 default_p = false;
19442 done_p = true;
19443 }
19444
19445 /* If this is a scalar floating point value and we want to load it into the
19446 traditional Altivec registers, do it via a move via a traditional floating
19447 point register, unless we have D-form addressing. Also make sure that
19448 non-zero constants use a FPR. */
19449 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19450 && !mode_supports_vmx_dform (mode)
19451 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19452 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19453 {
19454 ret = FLOAT_REGS;
19455 default_p = false;
19456 done_p = true;
19457 }
19458
19459 /* Handle reload of load/stores if we have reload helper functions. */
19460 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19461 {
19462 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19463 mode);
19464
19465 if (extra_cost >= 0)
19466 {
19467 done_p = true;
19468 ret = NO_REGS;
19469 if (extra_cost > 0)
19470 {
19471 sri->extra_cost = extra_cost;
19472 sri->icode = icode;
19473 }
19474 }
19475 }
19476
19477 /* Handle unaligned loads and stores of integer registers. */
19478 if (!done_p && TARGET_POWERPC64
19479 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19480 && memory_p
19481 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19482 {
19483 rtx addr = XEXP (x, 0);
19484 rtx off = address_offset (addr);
19485
19486 if (off != NULL_RTX)
19487 {
19488 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19489 unsigned HOST_WIDE_INT offset = INTVAL (off);
19490
19491 /* We need a secondary reload when our legitimate_address_p
19492 says the address is good (as otherwise the entire address
19493 will be reloaded), and the offset is not a multiple of
19494 four or we have an address wrap. Address wrap will only
19495 occur for LO_SUMs since legitimate_offset_address_p
19496 rejects addresses for 16-byte mems that will wrap. */
19497 if (GET_CODE (addr) == LO_SUM
19498 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19499 && ((offset & 3) != 0
19500 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19501 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19502 && (offset & 3) != 0))
19503 {
19504 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19505 if (in_p)
19506 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19507 : CODE_FOR_reload_di_load);
19508 else
19509 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19510 : CODE_FOR_reload_di_store);
19511 sri->extra_cost = 2;
19512 ret = NO_REGS;
19513 done_p = true;
19514 }
19515 else
19516 default_p = true;
19517 }
19518 else
19519 default_p = true;
19520 }
19521
19522 if (!done_p && !TARGET_POWERPC64
19523 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19524 && memory_p
19525 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19526 {
19527 rtx addr = XEXP (x, 0);
19528 rtx off = address_offset (addr);
19529
19530 if (off != NULL_RTX)
19531 {
19532 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19533 unsigned HOST_WIDE_INT offset = INTVAL (off);
19534
19535 /* We need a secondary reload when our legitimate_address_p
19536 says the address is good (as otherwise the entire address
19537 will be reloaded), and we have a wrap.
19538
19539 legitimate_lo_sum_address_p allows LO_SUM addresses to
19540 have any offset so test for wrap in the low 16 bits.
19541
19542 legitimate_offset_address_p checks for the range
19543 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19544 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19545 [0x7ff4,0x7fff] respectively, so test for the
19546 intersection of these ranges, [0x7ffc,0x7fff] and
19547 [0x7ff4,0x7ff7] respectively.
19548
19549 Note that the address we see here may have been
19550 manipulated by legitimize_reload_address. */
19551 if (GET_CODE (addr) == LO_SUM
19552 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19553 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19554 {
19555 if (in_p)
19556 sri->icode = CODE_FOR_reload_si_load;
19557 else
19558 sri->icode = CODE_FOR_reload_si_store;
19559 sri->extra_cost = 2;
19560 ret = NO_REGS;
19561 done_p = true;
19562 }
19563 else
19564 default_p = true;
19565 }
19566 else
19567 default_p = true;
19568 }
19569
19570 if (!done_p)
19571 default_p = true;
19572
19573 if (default_p)
19574 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19575
19576 gcc_assert (ret != ALL_REGS);
19577
19578 if (TARGET_DEBUG_ADDR)
19579 {
19580 fprintf (stderr,
19581 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19582 "mode = %s",
19583 reg_class_names[ret],
19584 in_p ? "true" : "false",
19585 reg_class_names[rclass],
19586 GET_MODE_NAME (mode));
19587
19588 if (reload_completed)
19589 fputs (", after reload", stderr);
19590
19591 if (!done_p)
19592 fputs (", done_p not set", stderr);
19593
19594 if (default_p)
19595 fputs (", default secondary reload", stderr);
19596
19597 if (sri->icode != CODE_FOR_nothing)
19598 fprintf (stderr, ", reload func = %s, extra cost = %d",
19599 insn_data[sri->icode].name, sri->extra_cost);
19600
19601 else if (sri->extra_cost > 0)
19602 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19603
19604 fputs ("\n", stderr);
19605 debug_rtx (x);
19606 }
19607
19608 return ret;
19609 }
19610
19611 /* Better tracing for rs6000_secondary_reload_inner. */
19612
19613 static void
19614 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19615 bool store_p)
19616 {
19617 rtx set, clobber;
19618
19619 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19620
19621 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19622 store_p ? "store" : "load");
19623
19624 if (store_p)
19625 set = gen_rtx_SET (mem, reg);
19626 else
19627 set = gen_rtx_SET (reg, mem);
19628
19629 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19630 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19631 }
19632
19633 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19634 ATTRIBUTE_NORETURN;
19635
19636 static void
19637 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19638 bool store_p)
19639 {
19640 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19641 gcc_unreachable ();
19642 }
19643
19644 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19645 reload helper functions. These were identified in
19646 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19647 reload, it calls the insns:
19648 reload_<RELOAD:mode>_<P:mptrsize>_store
19649 reload_<RELOAD:mode>_<P:mptrsize>_load
19650
19651 which in turn calls this function, to do whatever is necessary to create
19652 valid addresses. */
19653
19654 void
19655 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19656 {
19657 int regno = true_regnum (reg);
19658 machine_mode mode = GET_MODE (reg);
19659 addr_mask_type addr_mask;
19660 rtx addr;
19661 rtx new_addr;
19662 rtx op_reg, op0, op1;
19663 rtx and_op;
19664 rtx cc_clobber;
19665 rtvec rv;
19666
19667 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19668 || !base_reg_operand (scratch, GET_MODE (scratch)))
19669 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19670
19671 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19672 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19673
19674 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19675 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19676
19677 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19678 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19679
19680 else
19681 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19682
19683 /* Make sure the mode is valid in this register class. */
19684 if ((addr_mask & RELOAD_REG_VALID) == 0)
19685 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19686
19687 if (TARGET_DEBUG_ADDR)
19688 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19689
19690 new_addr = addr = XEXP (mem, 0);
19691 switch (GET_CODE (addr))
19692 {
19693 /* Does the register class support auto update forms for this mode? If
19694 not, do the update now. We don't need a scratch register, since the
19695 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19696 case PRE_INC:
19697 case PRE_DEC:
19698 op_reg = XEXP (addr, 0);
19699 if (!base_reg_operand (op_reg, Pmode))
19700 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19701
19702 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19703 {
19704 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
19705 new_addr = op_reg;
19706 }
19707 break;
19708
19709 case PRE_MODIFY:
19710 op0 = XEXP (addr, 0);
19711 op1 = XEXP (addr, 1);
19712 if (!base_reg_operand (op0, Pmode)
19713 || GET_CODE (op1) != PLUS
19714 || !rtx_equal_p (op0, XEXP (op1, 0)))
19715 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19716
19717 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19718 {
19719 emit_insn (gen_rtx_SET (op0, op1));
19720 new_addr = reg;
19721 }
19722 break;
19723
19724 /* Do we need to simulate AND -16 to clear the bottom address bits used
19725 in VMX load/stores? */
19726 case AND:
19727 op0 = XEXP (addr, 0);
19728 op1 = XEXP (addr, 1);
19729 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19730 {
19731 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19732 op_reg = op0;
19733
19734 else if (GET_CODE (op1) == PLUS)
19735 {
19736 emit_insn (gen_rtx_SET (scratch, op1));
19737 op_reg = scratch;
19738 }
19739
19740 else
19741 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19742
19743 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19744 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19745 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19746 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19747 new_addr = scratch;
19748 }
19749 break;
19750
19751 /* If this is an indirect address, make sure it is a base register. */
19752 case REG:
19753 case SUBREG:
19754 if (!base_reg_operand (addr, GET_MODE (addr)))
19755 {
19756 emit_insn (gen_rtx_SET (scratch, addr));
19757 new_addr = scratch;
19758 }
19759 break;
19760
19761 /* If this is an indexed address, make sure the register class can handle
19762 indexed addresses for this mode. */
19763 case PLUS:
19764 op0 = XEXP (addr, 0);
19765 op1 = XEXP (addr, 1);
19766 if (!base_reg_operand (op0, Pmode))
19767 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19768
19769 else if (int_reg_operand (op1, Pmode))
19770 {
19771 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19772 {
19773 emit_insn (gen_rtx_SET (scratch, addr));
19774 new_addr = scratch;
19775 }
19776 }
19777
19778 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19779 {
19780 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19781 || !quad_address_p (addr, mode, false))
19782 {
19783 emit_insn (gen_rtx_SET (scratch, addr));
19784 new_addr = scratch;
19785 }
19786 }
19787
19788 /* Make sure the register class can handle offset addresses. */
19789 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19790 {
19791 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19792 {
19793 emit_insn (gen_rtx_SET (scratch, addr));
19794 new_addr = scratch;
19795 }
19796 }
19797
19798 else
19799 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19800
19801 break;
19802
19803 case LO_SUM:
19804 op0 = XEXP (addr, 0);
19805 op1 = XEXP (addr, 1);
19806 if (!base_reg_operand (op0, Pmode))
19807 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19808
19809 else if (int_reg_operand (op1, Pmode))
19810 {
19811 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19812 {
19813 emit_insn (gen_rtx_SET (scratch, addr));
19814 new_addr = scratch;
19815 }
19816 }
19817
19818 /* Quad offsets are restricted and can't handle normal addresses. */
19819 else if (mode_supports_dq_form (mode))
19820 {
19821 emit_insn (gen_rtx_SET (scratch, addr));
19822 new_addr = scratch;
19823 }
19824
19825 /* Make sure the register class can handle offset addresses. */
19826 else if (legitimate_lo_sum_address_p (mode, addr, false))
19827 {
19828 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19829 {
19830 emit_insn (gen_rtx_SET (scratch, addr));
19831 new_addr = scratch;
19832 }
19833 }
19834
19835 else
19836 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19837
19838 break;
19839
19840 case SYMBOL_REF:
19841 case CONST:
19842 case LABEL_REF:
19843 rs6000_emit_move (scratch, addr, Pmode);
19844 new_addr = scratch;
19845 break;
19846
19847 default:
19848 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19849 }
19850
19851 /* Adjust the address if it changed. */
19852 if (addr != new_addr)
19853 {
19854 mem = replace_equiv_address_nv (mem, new_addr);
19855 if (TARGET_DEBUG_ADDR)
19856 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19857 }
19858
19859 /* Now create the move. */
19860 if (store_p)
19861 emit_insn (gen_rtx_SET (mem, reg));
19862 else
19863 emit_insn (gen_rtx_SET (reg, mem));
19864
19865 return;
19866 }
19867
19868 /* Convert reloads involving 64-bit gprs and misaligned offset
19869 addressing, or multiple 32-bit gprs and offsets that are too large,
19870 to use indirect addressing. */
19871
19872 void
19873 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19874 {
19875 int regno = true_regnum (reg);
19876 enum reg_class rclass;
19877 rtx addr;
19878 rtx scratch_or_premodify = scratch;
19879
19880 if (TARGET_DEBUG_ADDR)
19881 {
19882 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19883 store_p ? "store" : "load");
19884 fprintf (stderr, "reg:\n");
19885 debug_rtx (reg);
19886 fprintf (stderr, "mem:\n");
19887 debug_rtx (mem);
19888 fprintf (stderr, "scratch:\n");
19889 debug_rtx (scratch);
19890 }
19891
19892 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
19893 gcc_assert (GET_CODE (mem) == MEM);
19894 rclass = REGNO_REG_CLASS (regno);
19895 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19896 addr = XEXP (mem, 0);
19897
19898 if (GET_CODE (addr) == PRE_MODIFY)
19899 {
19900 gcc_assert (REG_P (XEXP (addr, 0))
19901 && GET_CODE (XEXP (addr, 1)) == PLUS
19902 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19903 scratch_or_premodify = XEXP (addr, 0);
19904 if (!HARD_REGISTER_P (scratch_or_premodify))
19905 /* If we have a pseudo here then reload will have arranged
19906 to have it replaced, but only in the original insn.
19907 Use the replacement here too. */
19908 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
19909
19910 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
19911 expressions from the original insn, without unsharing them.
19912 Any RTL that points into the original insn will of course
19913 have register replacements applied. That is why we don't
19914 need to look for replacements under the PLUS. */
19915 addr = XEXP (addr, 1);
19916 }
19917 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19918
19919 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19920
19921 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19922
19923 /* Now create the move. */
19924 if (store_p)
19925 emit_insn (gen_rtx_SET (mem, reg));
19926 else
19927 emit_insn (gen_rtx_SET (reg, mem));
19928
19929 return;
19930 }
19931
19932 /* Given an rtx X being reloaded into a reg required to be
19933 in class CLASS, return the class of reg to actually use.
19934 In general this is just CLASS; but on some machines
19935 in some cases it is preferable to use a more restrictive class.
19936
19937 On the RS/6000, we have to return NO_REGS when we want to reload a
19938 floating-point CONST_DOUBLE to force it to be copied to memory.
19939
19940 We also don't want to reload integer values into floating-point
19941 registers if we can at all help it. In fact, this can
19942 cause reload to die, if it tries to generate a reload of CTR
19943 into a FP register and discovers it doesn't have the memory location
19944 required.
19945
19946 ??? Would it be a good idea to have reload do the converse, that is
19947 try to reload floating modes into FP registers if possible?
19948 */
19949
19950 static enum reg_class
19951 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19952 {
19953 machine_mode mode = GET_MODE (x);
19954 bool is_constant = CONSTANT_P (x);
19955
19956 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19957 reload class for it. */
19958 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19959 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19960 return NO_REGS;
19961
19962 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19963 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19964 return NO_REGS;
19965
19966 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19967 the reloading of address expressions using PLUS into floating point
19968 registers. */
19969 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19970 {
19971 if (is_constant)
19972 {
19973 /* Zero is always allowed in all VSX registers. */
19974 if (x == CONST0_RTX (mode))
19975 return rclass;
19976
19977 /* If this is a vector constant that can be formed with a few Altivec
19978 instructions, we want altivec registers. */
19979 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19980 return ALTIVEC_REGS;
19981
19982 /* If this is an integer constant that can easily be loaded into
19983 vector registers, allow it. */
19984 if (CONST_INT_P (x))
19985 {
19986 HOST_WIDE_INT value = INTVAL (x);
19987
19988 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19989 2.06 can generate it in the Altivec registers with
19990 VSPLTI<x>. */
19991 if (value == -1)
19992 {
19993 if (TARGET_P8_VECTOR)
19994 return rclass;
19995 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19996 return ALTIVEC_REGS;
19997 else
19998 return NO_REGS;
19999 }
20000
20001 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20002 a sign extend in the Altivec registers. */
20003 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20004 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20005 return ALTIVEC_REGS;
20006 }
20007
20008 /* Force constant to memory. */
20009 return NO_REGS;
20010 }
20011
20012 /* D-form addressing can easily reload the value. */
20013 if (mode_supports_vmx_dform (mode)
20014 || mode_supports_dq_form (mode))
20015 return rclass;
20016
20017 /* If this is a scalar floating point value and we don't have D-form
20018 addressing, prefer the traditional floating point registers so that we
20019 can use D-form (register+offset) addressing. */
20020 if (rclass == VSX_REGS
20021 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20022 return FLOAT_REGS;
20023
20024 /* Prefer the Altivec registers if Altivec is handling the vector
20025 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20026 loads. */
20027 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20028 || mode == V1TImode)
20029 return ALTIVEC_REGS;
20030
20031 return rclass;
20032 }
20033
20034 if (is_constant || GET_CODE (x) == PLUS)
20035 {
20036 if (reg_class_subset_p (GENERAL_REGS, rclass))
20037 return GENERAL_REGS;
20038 if (reg_class_subset_p (BASE_REGS, rclass))
20039 return BASE_REGS;
20040 return NO_REGS;
20041 }
20042
20043 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20044 return GENERAL_REGS;
20045
20046 return rclass;
20047 }
20048
20049 /* Debug version of rs6000_preferred_reload_class. */
20050 static enum reg_class
20051 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20052 {
20053 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20054
20055 fprintf (stderr,
20056 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20057 "mode = %s, x:\n",
20058 reg_class_names[ret], reg_class_names[rclass],
20059 GET_MODE_NAME (GET_MODE (x)));
20060 debug_rtx (x);
20061
20062 return ret;
20063 }
20064
20065 /* If we are copying between FP or AltiVec registers and anything else, we need
20066 a memory location. The exception is when we are targeting ppc64 and the
20067 move to/from fpr to gpr instructions are available. Also, under VSX, you
20068 can copy vector registers from the FP register set to the Altivec register
20069 set and vice versa. */
20070
20071 static bool
20072 rs6000_secondary_memory_needed (machine_mode mode,
20073 reg_class_t from_class,
20074 reg_class_t to_class)
20075 {
20076 enum rs6000_reg_type from_type, to_type;
20077 bool altivec_p = ((from_class == ALTIVEC_REGS)
20078 || (to_class == ALTIVEC_REGS));
20079
20080 /* If a simple/direct move is available, we don't need secondary memory */
20081 from_type = reg_class_to_reg_type[(int)from_class];
20082 to_type = reg_class_to_reg_type[(int)to_class];
20083
20084 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20085 (secondary_reload_info *)0, altivec_p))
20086 return false;
20087
20088 /* If we have a floating point or vector register class, we need to use
20089 memory to transfer the data. */
20090 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20091 return true;
20092
20093 return false;
20094 }
20095
20096 /* Debug version of rs6000_secondary_memory_needed. */
20097 static bool
20098 rs6000_debug_secondary_memory_needed (machine_mode mode,
20099 reg_class_t from_class,
20100 reg_class_t to_class)
20101 {
20102 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20103
20104 fprintf (stderr,
20105 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20106 "to_class = %s, mode = %s\n",
20107 ret ? "true" : "false",
20108 reg_class_names[from_class],
20109 reg_class_names[to_class],
20110 GET_MODE_NAME (mode));
20111
20112 return ret;
20113 }
20114
20115 /* Return the register class of a scratch register needed to copy IN into
20116 or out of a register in RCLASS in MODE. If it can be done directly,
20117 NO_REGS is returned. */
20118
20119 static enum reg_class
20120 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20121 rtx in)
20122 {
20123 int regno;
20124
20125 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20126 #if TARGET_MACHO
20127 && MACHOPIC_INDIRECT
20128 #endif
20129 ))
20130 {
20131 /* We cannot copy a symbolic operand directly into anything
20132 other than BASE_REGS for TARGET_ELF. So indicate that a
20133 register from BASE_REGS is needed as an intermediate
20134 register.
20135
20136 On Darwin, pic addresses require a load from memory, which
20137 needs a base register. */
20138 if (rclass != BASE_REGS
20139 && (GET_CODE (in) == SYMBOL_REF
20140 || GET_CODE (in) == HIGH
20141 || GET_CODE (in) == LABEL_REF
20142 || GET_CODE (in) == CONST))
20143 return BASE_REGS;
20144 }
20145
20146 if (GET_CODE (in) == REG)
20147 {
20148 regno = REGNO (in);
20149 if (regno >= FIRST_PSEUDO_REGISTER)
20150 {
20151 regno = true_regnum (in);
20152 if (regno >= FIRST_PSEUDO_REGISTER)
20153 regno = -1;
20154 }
20155 }
20156 else if (GET_CODE (in) == SUBREG)
20157 {
20158 regno = true_regnum (in);
20159 if (regno >= FIRST_PSEUDO_REGISTER)
20160 regno = -1;
20161 }
20162 else
20163 regno = -1;
20164
20165 /* If we have VSX register moves, prefer moving scalar values between
20166 Altivec registers and GPR by going via an FPR (and then via memory)
20167 instead of reloading the secondary memory address for Altivec moves. */
20168 if (TARGET_VSX
20169 && GET_MODE_SIZE (mode) < 16
20170 && !mode_supports_vmx_dform (mode)
20171 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20172 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20173 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20174 && (regno >= 0 && INT_REGNO_P (regno)))))
20175 return FLOAT_REGS;
20176
20177 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20178 into anything. */
20179 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20180 || (regno >= 0 && INT_REGNO_P (regno)))
20181 return NO_REGS;
20182
20183 /* Constants, memory, and VSX registers can go into VSX registers (both the
20184 traditional floating point and the altivec registers). */
20185 if (rclass == VSX_REGS
20186 && (regno == -1 || VSX_REGNO_P (regno)))
20187 return NO_REGS;
20188
20189 /* Constants, memory, and FP registers can go into FP registers. */
20190 if ((regno == -1 || FP_REGNO_P (regno))
20191 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20192 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20193
20194 /* Memory, and AltiVec registers can go into AltiVec registers. */
20195 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20196 && rclass == ALTIVEC_REGS)
20197 return NO_REGS;
20198
20199 /* We can copy among the CR registers. */
20200 if ((rclass == CR_REGS || rclass == CR0_REGS)
20201 && regno >= 0 && CR_REGNO_P (regno))
20202 return NO_REGS;
20203
20204 /* Otherwise, we need GENERAL_REGS. */
20205 return GENERAL_REGS;
20206 }
20207
20208 /* Debug version of rs6000_secondary_reload_class. */
20209 static enum reg_class
20210 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20211 machine_mode mode, rtx in)
20212 {
20213 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20214 fprintf (stderr,
20215 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20216 "mode = %s, input rtx:\n",
20217 reg_class_names[ret], reg_class_names[rclass],
20218 GET_MODE_NAME (mode));
20219 debug_rtx (in);
20220
20221 return ret;
20222 }
20223
20224 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20225
20226 static bool
20227 rs6000_can_change_mode_class (machine_mode from,
20228 machine_mode to,
20229 reg_class_t rclass)
20230 {
20231 unsigned from_size = GET_MODE_SIZE (from);
20232 unsigned to_size = GET_MODE_SIZE (to);
20233
20234 if (from_size != to_size)
20235 {
20236 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20237
20238 if (reg_classes_intersect_p (xclass, rclass))
20239 {
20240 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20241 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20242 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20243 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20244
20245 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20246 single register under VSX because the scalar part of the register
20247 is in the upper 64-bits, and not the lower 64-bits. Types like
20248 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20249 IEEE floating point can't overlap, and neither can small
20250 values. */
20251
20252 if (to_float128_vector_p && from_float128_vector_p)
20253 return true;
20254
20255 else if (to_float128_vector_p || from_float128_vector_p)
20256 return false;
20257
20258 /* TDmode in floating-mode registers must always go into a register
20259 pair with the most significant word in the even-numbered register
20260 to match ISA requirements. In little-endian mode, this does not
20261 match subreg numbering, so we cannot allow subregs. */
20262 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20263 return false;
20264
20265 if (from_size < 8 || to_size < 8)
20266 return false;
20267
20268 if (from_size == 8 && (8 * to_nregs) != to_size)
20269 return false;
20270
20271 if (to_size == 8 && (8 * from_nregs) != from_size)
20272 return false;
20273
20274 return true;
20275 }
20276 else
20277 return true;
20278 }
20279
20280 /* Since the VSX register set includes traditional floating point registers
20281 and altivec registers, just check for the size being different instead of
20282 trying to check whether the modes are vector modes. Otherwise it won't
20283 allow say DF and DI to change classes. For types like TFmode and TDmode
20284 that take 2 64-bit registers, rather than a single 128-bit register, don't
20285 allow subregs of those types to other 128 bit types. */
20286 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20287 {
20288 unsigned num_regs = (from_size + 15) / 16;
20289 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20290 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20291 return false;
20292
20293 return (from_size == 8 || from_size == 16);
20294 }
20295
20296 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20297 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20298 return false;
20299
20300 return true;
20301 }
20302
20303 /* Debug version of rs6000_can_change_mode_class. */
20304 static bool
20305 rs6000_debug_can_change_mode_class (machine_mode from,
20306 machine_mode to,
20307 reg_class_t rclass)
20308 {
20309 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20310
20311 fprintf (stderr,
20312 "rs6000_can_change_mode_class, return %s, from = %s, "
20313 "to = %s, rclass = %s\n",
20314 ret ? "true" : "false",
20315 GET_MODE_NAME (from), GET_MODE_NAME (to),
20316 reg_class_names[rclass]);
20317
20318 return ret;
20319 }
20320 \f
20321 /* Return a string to do a move operation of 128 bits of data. */
20322
20323 const char *
20324 rs6000_output_move_128bit (rtx operands[])
20325 {
20326 rtx dest = operands[0];
20327 rtx src = operands[1];
20328 machine_mode mode = GET_MODE (dest);
20329 int dest_regno;
20330 int src_regno;
20331 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20332 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20333
20334 if (REG_P (dest))
20335 {
20336 dest_regno = REGNO (dest);
20337 dest_gpr_p = INT_REGNO_P (dest_regno);
20338 dest_fp_p = FP_REGNO_P (dest_regno);
20339 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20340 dest_vsx_p = dest_fp_p | dest_vmx_p;
20341 }
20342 else
20343 {
20344 dest_regno = -1;
20345 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20346 }
20347
20348 if (REG_P (src))
20349 {
20350 src_regno = REGNO (src);
20351 src_gpr_p = INT_REGNO_P (src_regno);
20352 src_fp_p = FP_REGNO_P (src_regno);
20353 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20354 src_vsx_p = src_fp_p | src_vmx_p;
20355 }
20356 else
20357 {
20358 src_regno = -1;
20359 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20360 }
20361
20362 /* Register moves. */
20363 if (dest_regno >= 0 && src_regno >= 0)
20364 {
20365 if (dest_gpr_p)
20366 {
20367 if (src_gpr_p)
20368 return "#";
20369
20370 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20371 return (WORDS_BIG_ENDIAN
20372 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20373 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20374
20375 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20376 return "#";
20377 }
20378
20379 else if (TARGET_VSX && dest_vsx_p)
20380 {
20381 if (src_vsx_p)
20382 return "xxlor %x0,%x1,%x1";
20383
20384 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20385 return (WORDS_BIG_ENDIAN
20386 ? "mtvsrdd %x0,%1,%L1"
20387 : "mtvsrdd %x0,%L1,%1");
20388
20389 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20390 return "#";
20391 }
20392
20393 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20394 return "vor %0,%1,%1";
20395
20396 else if (dest_fp_p && src_fp_p)
20397 return "#";
20398 }
20399
20400 /* Loads. */
20401 else if (dest_regno >= 0 && MEM_P (src))
20402 {
20403 if (dest_gpr_p)
20404 {
20405 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20406 return "lq %0,%1";
20407 else
20408 return "#";
20409 }
20410
20411 else if (TARGET_ALTIVEC && dest_vmx_p
20412 && altivec_indexed_or_indirect_operand (src, mode))
20413 return "lvx %0,%y1";
20414
20415 else if (TARGET_VSX && dest_vsx_p)
20416 {
20417 if (mode_supports_dq_form (mode)
20418 && quad_address_p (XEXP (src, 0), mode, true))
20419 return "lxv %x0,%1";
20420
20421 else if (TARGET_P9_VECTOR)
20422 return "lxvx %x0,%y1";
20423
20424 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20425 return "lxvw4x %x0,%y1";
20426
20427 else
20428 return "lxvd2x %x0,%y1";
20429 }
20430
20431 else if (TARGET_ALTIVEC && dest_vmx_p)
20432 return "lvx %0,%y1";
20433
20434 else if (dest_fp_p)
20435 return "#";
20436 }
20437
20438 /* Stores. */
20439 else if (src_regno >= 0 && MEM_P (dest))
20440 {
20441 if (src_gpr_p)
20442 {
20443 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20444 return "stq %1,%0";
20445 else
20446 return "#";
20447 }
20448
20449 else if (TARGET_ALTIVEC && src_vmx_p
20450 && altivec_indexed_or_indirect_operand (dest, mode))
20451 return "stvx %1,%y0";
20452
20453 else if (TARGET_VSX && src_vsx_p)
20454 {
20455 if (mode_supports_dq_form (mode)
20456 && quad_address_p (XEXP (dest, 0), mode, true))
20457 return "stxv %x1,%0";
20458
20459 else if (TARGET_P9_VECTOR)
20460 return "stxvx %x1,%y0";
20461
20462 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20463 return "stxvw4x %x1,%y0";
20464
20465 else
20466 return "stxvd2x %x1,%y0";
20467 }
20468
20469 else if (TARGET_ALTIVEC && src_vmx_p)
20470 return "stvx %1,%y0";
20471
20472 else if (src_fp_p)
20473 return "#";
20474 }
20475
20476 /* Constants. */
20477 else if (dest_regno >= 0
20478 && (GET_CODE (src) == CONST_INT
20479 || GET_CODE (src) == CONST_WIDE_INT
20480 || GET_CODE (src) == CONST_DOUBLE
20481 || GET_CODE (src) == CONST_VECTOR))
20482 {
20483 if (dest_gpr_p)
20484 return "#";
20485
20486 else if ((dest_vmx_p && TARGET_ALTIVEC)
20487 || (dest_vsx_p && TARGET_VSX))
20488 return output_vec_const_move (operands);
20489 }
20490
20491 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20492 }
20493
20494 /* Validate a 128-bit move. */
20495 bool
20496 rs6000_move_128bit_ok_p (rtx operands[])
20497 {
20498 machine_mode mode = GET_MODE (operands[0]);
20499 return (gpc_reg_operand (operands[0], mode)
20500 || gpc_reg_operand (operands[1], mode));
20501 }
20502
20503 /* Return true if a 128-bit move needs to be split. */
20504 bool
20505 rs6000_split_128bit_ok_p (rtx operands[])
20506 {
20507 if (!reload_completed)
20508 return false;
20509
20510 if (!gpr_or_gpr_p (operands[0], operands[1]))
20511 return false;
20512
20513 if (quad_load_store_p (operands[0], operands[1]))
20514 return false;
20515
20516 return true;
20517 }
20518
20519 \f
20520 /* Given a comparison operation, return the bit number in CCR to test. We
20521 know this is a valid comparison.
20522
20523 SCC_P is 1 if this is for an scc. That means that %D will have been
20524 used instead of %C, so the bits will be in different places.
20525
20526 Return -1 if OP isn't a valid comparison for some reason. */
20527
20528 int
20529 ccr_bit (rtx op, int scc_p)
20530 {
20531 enum rtx_code code = GET_CODE (op);
20532 machine_mode cc_mode;
20533 int cc_regnum;
20534 int base_bit;
20535 rtx reg;
20536
20537 if (!COMPARISON_P (op))
20538 return -1;
20539
20540 reg = XEXP (op, 0);
20541
20542 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20543
20544 cc_mode = GET_MODE (reg);
20545 cc_regnum = REGNO (reg);
20546 base_bit = 4 * (cc_regnum - CR0_REGNO);
20547
20548 validate_condition_mode (code, cc_mode);
20549
20550 /* When generating a sCOND operation, only positive conditions are
20551 allowed. */
20552 gcc_assert (!scc_p
20553 || code == EQ || code == GT || code == LT || code == UNORDERED
20554 || code == GTU || code == LTU);
20555
20556 switch (code)
20557 {
20558 case NE:
20559 return scc_p ? base_bit + 3 : base_bit + 2;
20560 case EQ:
20561 return base_bit + 2;
20562 case GT: case GTU: case UNLE:
20563 return base_bit + 1;
20564 case LT: case LTU: case UNGE:
20565 return base_bit;
20566 case ORDERED: case UNORDERED:
20567 return base_bit + 3;
20568
20569 case GE: case GEU:
20570 /* If scc, we will have done a cror to put the bit in the
20571 unordered position. So test that bit. For integer, this is ! LT
20572 unless this is an scc insn. */
20573 return scc_p ? base_bit + 3 : base_bit;
20574
20575 case LE: case LEU:
20576 return scc_p ? base_bit + 3 : base_bit + 1;
20577
20578 default:
20579 gcc_unreachable ();
20580 }
20581 }
20582 \f
20583 /* Return the GOT register. */
20584
20585 rtx
20586 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20587 {
20588 /* The second flow pass currently (June 1999) can't update
20589 regs_ever_live without disturbing other parts of the compiler, so
20590 update it here to make the prolog/epilogue code happy. */
20591 if (!can_create_pseudo_p ()
20592 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20593 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20594
20595 crtl->uses_pic_offset_table = 1;
20596
20597 return pic_offset_table_rtx;
20598 }
20599 \f
20600 static rs6000_stack_t stack_info;
20601
20602 /* Function to init struct machine_function.
20603 This will be called, via a pointer variable,
20604 from push_function_context. */
20605
20606 static struct machine_function *
20607 rs6000_init_machine_status (void)
20608 {
20609 stack_info.reload_completed = 0;
20610 return ggc_cleared_alloc<machine_function> ();
20611 }
20612 \f
20613 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20614
20615 /* Write out a function code label. */
20616
20617 void
20618 rs6000_output_function_entry (FILE *file, const char *fname)
20619 {
20620 if (fname[0] != '.')
20621 {
20622 switch (DEFAULT_ABI)
20623 {
20624 default:
20625 gcc_unreachable ();
20626
20627 case ABI_AIX:
20628 if (DOT_SYMBOLS)
20629 putc ('.', file);
20630 else
20631 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20632 break;
20633
20634 case ABI_ELFv2:
20635 case ABI_V4:
20636 case ABI_DARWIN:
20637 break;
20638 }
20639 }
20640
20641 RS6000_OUTPUT_BASENAME (file, fname);
20642 }
20643
20644 /* Print an operand. Recognize special options, documented below. */
20645
20646 #if TARGET_ELF
20647 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20648 only introduced by the linker, when applying the sda21
20649 relocation. */
20650 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20651 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20652 #else
20653 #define SMALL_DATA_RELOC "sda21"
20654 #define SMALL_DATA_REG 0
20655 #endif
20656
20657 void
20658 print_operand (FILE *file, rtx x, int code)
20659 {
20660 int i;
20661 unsigned HOST_WIDE_INT uval;
20662
20663 switch (code)
20664 {
20665 /* %a is output_address. */
20666
20667 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20668 output_operand. */
20669
20670 case 'D':
20671 /* Like 'J' but get to the GT bit only. */
20672 gcc_assert (REG_P (x));
20673
20674 /* Bit 1 is GT bit. */
20675 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20676
20677 /* Add one for shift count in rlinm for scc. */
20678 fprintf (file, "%d", i + 1);
20679 return;
20680
20681 case 'e':
20682 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20683 if (! INT_P (x))
20684 {
20685 output_operand_lossage ("invalid %%e value");
20686 return;
20687 }
20688
20689 uval = INTVAL (x);
20690 if ((uval & 0xffff) == 0 && uval != 0)
20691 putc ('s', file);
20692 return;
20693
20694 case 'E':
20695 /* X is a CR register. Print the number of the EQ bit of the CR */
20696 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20697 output_operand_lossage ("invalid %%E value");
20698 else
20699 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20700 return;
20701
20702 case 'f':
20703 /* X is a CR register. Print the shift count needed to move it
20704 to the high-order four bits. */
20705 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20706 output_operand_lossage ("invalid %%f value");
20707 else
20708 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20709 return;
20710
20711 case 'F':
20712 /* Similar, but print the count for the rotate in the opposite
20713 direction. */
20714 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20715 output_operand_lossage ("invalid %%F value");
20716 else
20717 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20718 return;
20719
20720 case 'G':
20721 /* X is a constant integer. If it is negative, print "m",
20722 otherwise print "z". This is to make an aze or ame insn. */
20723 if (GET_CODE (x) != CONST_INT)
20724 output_operand_lossage ("invalid %%G value");
20725 else if (INTVAL (x) >= 0)
20726 putc ('z', file);
20727 else
20728 putc ('m', file);
20729 return;
20730
20731 case 'h':
20732 /* If constant, output low-order five bits. Otherwise, write
20733 normally. */
20734 if (INT_P (x))
20735 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20736 else
20737 print_operand (file, x, 0);
20738 return;
20739
20740 case 'H':
20741 /* If constant, output low-order six bits. Otherwise, write
20742 normally. */
20743 if (INT_P (x))
20744 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20745 else
20746 print_operand (file, x, 0);
20747 return;
20748
20749 case 'I':
20750 /* Print `i' if this is a constant, else nothing. */
20751 if (INT_P (x))
20752 putc ('i', file);
20753 return;
20754
20755 case 'j':
20756 /* Write the bit number in CCR for jump. */
20757 i = ccr_bit (x, 0);
20758 if (i == -1)
20759 output_operand_lossage ("invalid %%j code");
20760 else
20761 fprintf (file, "%d", i);
20762 return;
20763
20764 case 'J':
20765 /* Similar, but add one for shift count in rlinm for scc and pass
20766 scc flag to `ccr_bit'. */
20767 i = ccr_bit (x, 1);
20768 if (i == -1)
20769 output_operand_lossage ("invalid %%J code");
20770 else
20771 /* If we want bit 31, write a shift count of zero, not 32. */
20772 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20773 return;
20774
20775 case 'k':
20776 /* X must be a constant. Write the 1's complement of the
20777 constant. */
20778 if (! INT_P (x))
20779 output_operand_lossage ("invalid %%k value");
20780 else
20781 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20782 return;
20783
20784 case 'K':
20785 /* X must be a symbolic constant on ELF. Write an
20786 expression suitable for an 'addi' that adds in the low 16
20787 bits of the MEM. */
20788 if (GET_CODE (x) == CONST)
20789 {
20790 if (GET_CODE (XEXP (x, 0)) != PLUS
20791 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20792 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20793 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20794 output_operand_lossage ("invalid %%K value");
20795 }
20796 print_operand_address (file, x);
20797 fputs ("@l", file);
20798 return;
20799
20800 /* %l is output_asm_label. */
20801
20802 case 'L':
20803 /* Write second word of DImode or DFmode reference. Works on register
20804 or non-indexed memory only. */
20805 if (REG_P (x))
20806 fputs (reg_names[REGNO (x) + 1], file);
20807 else if (MEM_P (x))
20808 {
20809 machine_mode mode = GET_MODE (x);
20810 /* Handle possible auto-increment. Since it is pre-increment and
20811 we have already done it, we can just use an offset of word. */
20812 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20813 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20814 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20815 UNITS_PER_WORD));
20816 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20817 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20818 UNITS_PER_WORD));
20819 else
20820 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20821 UNITS_PER_WORD),
20822 0));
20823
20824 if (small_data_operand (x, GET_MODE (x)))
20825 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20826 reg_names[SMALL_DATA_REG]);
20827 }
20828 return;
20829
20830 case 'N': /* Unused */
20831 /* Write the number of elements in the vector times 4. */
20832 if (GET_CODE (x) != PARALLEL)
20833 output_operand_lossage ("invalid %%N value");
20834 else
20835 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20836 return;
20837
20838 case 'O': /* Unused */
20839 /* Similar, but subtract 1 first. */
20840 if (GET_CODE (x) != PARALLEL)
20841 output_operand_lossage ("invalid %%O value");
20842 else
20843 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20844 return;
20845
20846 case 'p':
20847 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20848 if (! INT_P (x)
20849 || INTVAL (x) < 0
20850 || (i = exact_log2 (INTVAL (x))) < 0)
20851 output_operand_lossage ("invalid %%p value");
20852 else
20853 fprintf (file, "%d", i);
20854 return;
20855
20856 case 'P':
20857 /* The operand must be an indirect memory reference. The result
20858 is the register name. */
20859 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20860 || REGNO (XEXP (x, 0)) >= 32)
20861 output_operand_lossage ("invalid %%P value");
20862 else
20863 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20864 return;
20865
20866 case 'q':
20867 /* This outputs the logical code corresponding to a boolean
20868 expression. The expression may have one or both operands
20869 negated (if one, only the first one). For condition register
20870 logical operations, it will also treat the negated
20871 CR codes as NOTs, but not handle NOTs of them. */
20872 {
20873 const char *const *t = 0;
20874 const char *s;
20875 enum rtx_code code = GET_CODE (x);
20876 static const char * const tbl[3][3] = {
20877 { "and", "andc", "nor" },
20878 { "or", "orc", "nand" },
20879 { "xor", "eqv", "xor" } };
20880
20881 if (code == AND)
20882 t = tbl[0];
20883 else if (code == IOR)
20884 t = tbl[1];
20885 else if (code == XOR)
20886 t = tbl[2];
20887 else
20888 output_operand_lossage ("invalid %%q value");
20889
20890 if (GET_CODE (XEXP (x, 0)) != NOT)
20891 s = t[0];
20892 else
20893 {
20894 if (GET_CODE (XEXP (x, 1)) == NOT)
20895 s = t[2];
20896 else
20897 s = t[1];
20898 }
20899
20900 fputs (s, file);
20901 }
20902 return;
20903
20904 case 'Q':
20905 if (! TARGET_MFCRF)
20906 return;
20907 fputc (',', file);
20908 /* FALLTHRU */
20909
20910 case 'R':
20911 /* X is a CR register. Print the mask for `mtcrf'. */
20912 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20913 output_operand_lossage ("invalid %%R value");
20914 else
20915 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20916 return;
20917
20918 case 's':
20919 /* Low 5 bits of 32 - value */
20920 if (! INT_P (x))
20921 output_operand_lossage ("invalid %%s value");
20922 else
20923 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20924 return;
20925
20926 case 't':
20927 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20928 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
20929
20930 /* Bit 3 is OV bit. */
20931 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20932
20933 /* If we want bit 31, write a shift count of zero, not 32. */
20934 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20935 return;
20936
20937 case 'T':
20938 /* Print the symbolic name of a branch target register. */
20939 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
20940 && REGNO (x) != CTR_REGNO))
20941 output_operand_lossage ("invalid %%T value");
20942 else if (REGNO (x) == LR_REGNO)
20943 fputs ("lr", file);
20944 else
20945 fputs ("ctr", file);
20946 return;
20947
20948 case 'u':
20949 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20950 for use in unsigned operand. */
20951 if (! INT_P (x))
20952 {
20953 output_operand_lossage ("invalid %%u value");
20954 return;
20955 }
20956
20957 uval = INTVAL (x);
20958 if ((uval & 0xffff) == 0)
20959 uval >>= 16;
20960
20961 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20962 return;
20963
20964 case 'v':
20965 /* High-order 16 bits of constant for use in signed operand. */
20966 if (! INT_P (x))
20967 output_operand_lossage ("invalid %%v value");
20968 else
20969 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20970 (INTVAL (x) >> 16) & 0xffff);
20971 return;
20972
20973 case 'U':
20974 /* Print `u' if this has an auto-increment or auto-decrement. */
20975 if (MEM_P (x)
20976 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20977 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20978 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20979 putc ('u', file);
20980 return;
20981
20982 case 'V':
20983 /* Print the trap code for this operand. */
20984 switch (GET_CODE (x))
20985 {
20986 case EQ:
20987 fputs ("eq", file); /* 4 */
20988 break;
20989 case NE:
20990 fputs ("ne", file); /* 24 */
20991 break;
20992 case LT:
20993 fputs ("lt", file); /* 16 */
20994 break;
20995 case LE:
20996 fputs ("le", file); /* 20 */
20997 break;
20998 case GT:
20999 fputs ("gt", file); /* 8 */
21000 break;
21001 case GE:
21002 fputs ("ge", file); /* 12 */
21003 break;
21004 case LTU:
21005 fputs ("llt", file); /* 2 */
21006 break;
21007 case LEU:
21008 fputs ("lle", file); /* 6 */
21009 break;
21010 case GTU:
21011 fputs ("lgt", file); /* 1 */
21012 break;
21013 case GEU:
21014 fputs ("lge", file); /* 5 */
21015 break;
21016 default:
21017 gcc_unreachable ();
21018 }
21019 break;
21020
21021 case 'w':
21022 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21023 normally. */
21024 if (INT_P (x))
21025 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21026 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21027 else
21028 print_operand (file, x, 0);
21029 return;
21030
21031 case 'x':
21032 /* X is a FPR or Altivec register used in a VSX context. */
21033 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21034 output_operand_lossage ("invalid %%x value");
21035 else
21036 {
21037 int reg = REGNO (x);
21038 int vsx_reg = (FP_REGNO_P (reg)
21039 ? reg - 32
21040 : reg - FIRST_ALTIVEC_REGNO + 32);
21041
21042 #ifdef TARGET_REGNAMES
21043 if (TARGET_REGNAMES)
21044 fprintf (file, "%%vs%d", vsx_reg);
21045 else
21046 #endif
21047 fprintf (file, "%d", vsx_reg);
21048 }
21049 return;
21050
21051 case 'X':
21052 if (MEM_P (x)
21053 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21054 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21055 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21056 putc ('x', file);
21057 return;
21058
21059 case 'Y':
21060 /* Like 'L', for third word of TImode/PTImode */
21061 if (REG_P (x))
21062 fputs (reg_names[REGNO (x) + 2], file);
21063 else if (MEM_P (x))
21064 {
21065 machine_mode mode = GET_MODE (x);
21066 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21067 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21068 output_address (mode, plus_constant (Pmode,
21069 XEXP (XEXP (x, 0), 0), 8));
21070 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21071 output_address (mode, plus_constant (Pmode,
21072 XEXP (XEXP (x, 0), 0), 8));
21073 else
21074 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21075 if (small_data_operand (x, GET_MODE (x)))
21076 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21077 reg_names[SMALL_DATA_REG]);
21078 }
21079 return;
21080
21081 case 'z':
21082 /* X is a SYMBOL_REF. Write out the name preceded by a
21083 period and without any trailing data in brackets. Used for function
21084 names. If we are configured for System V (or the embedded ABI) on
21085 the PowerPC, do not emit the period, since those systems do not use
21086 TOCs and the like. */
21087 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21088
21089 /* For macho, check to see if we need a stub. */
21090 if (TARGET_MACHO)
21091 {
21092 const char *name = XSTR (x, 0);
21093 #if TARGET_MACHO
21094 if (darwin_emit_branch_islands
21095 && MACHOPIC_INDIRECT
21096 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21097 name = machopic_indirection_name (x, /*stub_p=*/true);
21098 #endif
21099 assemble_name (file, name);
21100 }
21101 else if (!DOT_SYMBOLS)
21102 assemble_name (file, XSTR (x, 0));
21103 else
21104 rs6000_output_function_entry (file, XSTR (x, 0));
21105 return;
21106
21107 case 'Z':
21108 /* Like 'L', for last word of TImode/PTImode. */
21109 if (REG_P (x))
21110 fputs (reg_names[REGNO (x) + 3], file);
21111 else if (MEM_P (x))
21112 {
21113 machine_mode mode = GET_MODE (x);
21114 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21115 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21116 output_address (mode, plus_constant (Pmode,
21117 XEXP (XEXP (x, 0), 0), 12));
21118 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21119 output_address (mode, plus_constant (Pmode,
21120 XEXP (XEXP (x, 0), 0), 12));
21121 else
21122 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21123 if (small_data_operand (x, GET_MODE (x)))
21124 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21125 reg_names[SMALL_DATA_REG]);
21126 }
21127 return;
21128
21129 /* Print AltiVec memory operand. */
21130 case 'y':
21131 {
21132 rtx tmp;
21133
21134 gcc_assert (MEM_P (x));
21135
21136 tmp = XEXP (x, 0);
21137
21138 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21139 && GET_CODE (tmp) == AND
21140 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21141 && INTVAL (XEXP (tmp, 1)) == -16)
21142 tmp = XEXP (tmp, 0);
21143 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21144 && GET_CODE (tmp) == PRE_MODIFY)
21145 tmp = XEXP (tmp, 1);
21146 if (REG_P (tmp))
21147 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21148 else
21149 {
21150 if (GET_CODE (tmp) != PLUS
21151 || !REG_P (XEXP (tmp, 0))
21152 || !REG_P (XEXP (tmp, 1)))
21153 {
21154 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21155 break;
21156 }
21157
21158 if (REGNO (XEXP (tmp, 0)) == 0)
21159 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21160 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21161 else
21162 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21163 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21164 }
21165 break;
21166 }
21167
21168 case 0:
21169 if (REG_P (x))
21170 fprintf (file, "%s", reg_names[REGNO (x)]);
21171 else if (MEM_P (x))
21172 {
21173 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21174 know the width from the mode. */
21175 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21176 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21177 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21178 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21179 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21180 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21181 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21182 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21183 else
21184 output_address (GET_MODE (x), XEXP (x, 0));
21185 }
21186 else
21187 {
21188 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21189 /* This hack along with a corresponding hack in
21190 rs6000_output_addr_const_extra arranges to output addends
21191 where the assembler expects to find them. eg.
21192 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21193 without this hack would be output as "x@toc+4". We
21194 want "x+4@toc". */
21195 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21196 else
21197 output_addr_const (file, x);
21198 }
21199 return;
21200
21201 case '&':
21202 if (const char *name = get_some_local_dynamic_name ())
21203 assemble_name (file, name);
21204 else
21205 output_operand_lossage ("'%%&' used without any "
21206 "local dynamic TLS references");
21207 return;
21208
21209 default:
21210 output_operand_lossage ("invalid %%xn code");
21211 }
21212 }
21213 \f
21214 /* Print the address of an operand. */
21215
21216 void
21217 print_operand_address (FILE *file, rtx x)
21218 {
21219 if (REG_P (x))
21220 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21221 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21222 || GET_CODE (x) == LABEL_REF)
21223 {
21224 output_addr_const (file, x);
21225 if (small_data_operand (x, GET_MODE (x)))
21226 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21227 reg_names[SMALL_DATA_REG]);
21228 else
21229 gcc_assert (!TARGET_TOC);
21230 }
21231 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21232 && REG_P (XEXP (x, 1)))
21233 {
21234 if (REGNO (XEXP (x, 0)) == 0)
21235 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21236 reg_names[ REGNO (XEXP (x, 0)) ]);
21237 else
21238 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21239 reg_names[ REGNO (XEXP (x, 1)) ]);
21240 }
21241 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21242 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21243 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21244 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21245 #if TARGET_MACHO
21246 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21247 && CONSTANT_P (XEXP (x, 1)))
21248 {
21249 fprintf (file, "lo16(");
21250 output_addr_const (file, XEXP (x, 1));
21251 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21252 }
21253 #endif
21254 #if TARGET_ELF
21255 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21256 && CONSTANT_P (XEXP (x, 1)))
21257 {
21258 output_addr_const (file, XEXP (x, 1));
21259 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21260 }
21261 #endif
21262 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21263 {
21264 /* This hack along with a corresponding hack in
21265 rs6000_output_addr_const_extra arranges to output addends
21266 where the assembler expects to find them. eg.
21267 (lo_sum (reg 9)
21268 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21269 without this hack would be output as "x@toc+8@l(9)". We
21270 want "x+8@toc@l(9)". */
21271 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21272 if (GET_CODE (x) == LO_SUM)
21273 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21274 else
21275 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21276 }
21277 else
21278 output_addr_const (file, x);
21279 }
21280 \f
21281 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21282
21283 static bool
21284 rs6000_output_addr_const_extra (FILE *file, rtx x)
21285 {
21286 if (GET_CODE (x) == UNSPEC)
21287 switch (XINT (x, 1))
21288 {
21289 case UNSPEC_TOCREL:
21290 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21291 && REG_P (XVECEXP (x, 0, 1))
21292 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21293 output_addr_const (file, XVECEXP (x, 0, 0));
21294 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21295 {
21296 if (INTVAL (tocrel_offset_oac) >= 0)
21297 fprintf (file, "+");
21298 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21299 }
21300 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21301 {
21302 putc ('-', file);
21303 assemble_name (file, toc_label_name);
21304 need_toc_init = 1;
21305 }
21306 else if (TARGET_ELF)
21307 fputs ("@toc", file);
21308 return true;
21309
21310 #if TARGET_MACHO
21311 case UNSPEC_MACHOPIC_OFFSET:
21312 output_addr_const (file, XVECEXP (x, 0, 0));
21313 putc ('-', file);
21314 machopic_output_function_base_name (file);
21315 return true;
21316 #endif
21317 }
21318 return false;
21319 }
21320 \f
21321 /* Target hook for assembling integer objects. The PowerPC version has
21322 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21323 is defined. It also needs to handle DI-mode objects on 64-bit
21324 targets. */
21325
21326 static bool
21327 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21328 {
21329 #ifdef RELOCATABLE_NEEDS_FIXUP
21330 /* Special handling for SI values. */
21331 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21332 {
21333 static int recurse = 0;
21334
21335 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21336 the .fixup section. Since the TOC section is already relocated, we
21337 don't need to mark it here. We used to skip the text section, but it
21338 should never be valid for relocated addresses to be placed in the text
21339 section. */
21340 if (DEFAULT_ABI == ABI_V4
21341 && (TARGET_RELOCATABLE || flag_pic > 1)
21342 && in_section != toc_section
21343 && !recurse
21344 && !CONST_SCALAR_INT_P (x)
21345 && CONSTANT_P (x))
21346 {
21347 char buf[256];
21348
21349 recurse = 1;
21350 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21351 fixuplabelno++;
21352 ASM_OUTPUT_LABEL (asm_out_file, buf);
21353 fprintf (asm_out_file, "\t.long\t(");
21354 output_addr_const (asm_out_file, x);
21355 fprintf (asm_out_file, ")@fixup\n");
21356 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21357 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21358 fprintf (asm_out_file, "\t.long\t");
21359 assemble_name (asm_out_file, buf);
21360 fprintf (asm_out_file, "\n\t.previous\n");
21361 recurse = 0;
21362 return true;
21363 }
21364 /* Remove initial .'s to turn a -mcall-aixdesc function
21365 address into the address of the descriptor, not the function
21366 itself. */
21367 else if (GET_CODE (x) == SYMBOL_REF
21368 && XSTR (x, 0)[0] == '.'
21369 && DEFAULT_ABI == ABI_AIX)
21370 {
21371 const char *name = XSTR (x, 0);
21372 while (*name == '.')
21373 name++;
21374
21375 fprintf (asm_out_file, "\t.long\t%s\n", name);
21376 return true;
21377 }
21378 }
21379 #endif /* RELOCATABLE_NEEDS_FIXUP */
21380 return default_assemble_integer (x, size, aligned_p);
21381 }
21382
21383 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21384 /* Emit an assembler directive to set symbol visibility for DECL to
21385 VISIBILITY_TYPE. */
21386
21387 static void
21388 rs6000_assemble_visibility (tree decl, int vis)
21389 {
21390 if (TARGET_XCOFF)
21391 return;
21392
21393 /* Functions need to have their entry point symbol visibility set as
21394 well as their descriptor symbol visibility. */
21395 if (DEFAULT_ABI == ABI_AIX
21396 && DOT_SYMBOLS
21397 && TREE_CODE (decl) == FUNCTION_DECL)
21398 {
21399 static const char * const visibility_types[] = {
21400 NULL, "protected", "hidden", "internal"
21401 };
21402
21403 const char *name, *type;
21404
21405 name = ((* targetm.strip_name_encoding)
21406 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21407 type = visibility_types[vis];
21408
21409 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21410 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21411 }
21412 else
21413 default_assemble_visibility (decl, vis);
21414 }
21415 #endif
21416 \f
21417 enum rtx_code
21418 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21419 {
21420 /* Reversal of FP compares takes care -- an ordered compare
21421 becomes an unordered compare and vice versa. */
21422 if (mode == CCFPmode
21423 && (!flag_finite_math_only
21424 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21425 || code == UNEQ || code == LTGT))
21426 return reverse_condition_maybe_unordered (code);
21427 else
21428 return reverse_condition (code);
21429 }
21430
21431 /* Generate a compare for CODE. Return a brand-new rtx that
21432 represents the result of the compare. */
21433
21434 static rtx
21435 rs6000_generate_compare (rtx cmp, machine_mode mode)
21436 {
21437 machine_mode comp_mode;
21438 rtx compare_result;
21439 enum rtx_code code = GET_CODE (cmp);
21440 rtx op0 = XEXP (cmp, 0);
21441 rtx op1 = XEXP (cmp, 1);
21442
21443 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21444 comp_mode = CCmode;
21445 else if (FLOAT_MODE_P (mode))
21446 comp_mode = CCFPmode;
21447 else if (code == GTU || code == LTU
21448 || code == GEU || code == LEU)
21449 comp_mode = CCUNSmode;
21450 else if ((code == EQ || code == NE)
21451 && unsigned_reg_p (op0)
21452 && (unsigned_reg_p (op1)
21453 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21454 /* These are unsigned values, perhaps there will be a later
21455 ordering compare that can be shared with this one. */
21456 comp_mode = CCUNSmode;
21457 else
21458 comp_mode = CCmode;
21459
21460 /* If we have an unsigned compare, make sure we don't have a signed value as
21461 an immediate. */
21462 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21463 && INTVAL (op1) < 0)
21464 {
21465 op0 = copy_rtx_if_shared (op0);
21466 op1 = force_reg (GET_MODE (op0), op1);
21467 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21468 }
21469
21470 /* First, the compare. */
21471 compare_result = gen_reg_rtx (comp_mode);
21472
21473 /* IEEE 128-bit support in VSX registers when we do not have hardware
21474 support. */
21475 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21476 {
21477 rtx libfunc = NULL_RTX;
21478 bool check_nan = false;
21479 rtx dest;
21480
21481 switch (code)
21482 {
21483 case EQ:
21484 case NE:
21485 libfunc = optab_libfunc (eq_optab, mode);
21486 break;
21487
21488 case GT:
21489 case GE:
21490 libfunc = optab_libfunc (ge_optab, mode);
21491 break;
21492
21493 case LT:
21494 case LE:
21495 libfunc = optab_libfunc (le_optab, mode);
21496 break;
21497
21498 case UNORDERED:
21499 case ORDERED:
21500 libfunc = optab_libfunc (unord_optab, mode);
21501 code = (code == UNORDERED) ? NE : EQ;
21502 break;
21503
21504 case UNGE:
21505 case UNGT:
21506 check_nan = true;
21507 libfunc = optab_libfunc (ge_optab, mode);
21508 code = (code == UNGE) ? GE : GT;
21509 break;
21510
21511 case UNLE:
21512 case UNLT:
21513 check_nan = true;
21514 libfunc = optab_libfunc (le_optab, mode);
21515 code = (code == UNLE) ? LE : LT;
21516 break;
21517
21518 case UNEQ:
21519 case LTGT:
21520 check_nan = true;
21521 libfunc = optab_libfunc (eq_optab, mode);
21522 code = (code = UNEQ) ? EQ : NE;
21523 break;
21524
21525 default:
21526 gcc_unreachable ();
21527 }
21528
21529 gcc_assert (libfunc);
21530
21531 if (!check_nan)
21532 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21533 SImode, op0, mode, op1, mode);
21534
21535 /* The library signals an exception for signalling NaNs, so we need to
21536 handle isgreater, etc. by first checking isordered. */
21537 else
21538 {
21539 rtx ne_rtx, normal_dest, unord_dest;
21540 rtx unord_func = optab_libfunc (unord_optab, mode);
21541 rtx join_label = gen_label_rtx ();
21542 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21543 rtx unord_cmp = gen_reg_rtx (comp_mode);
21544
21545
21546 /* Test for either value being a NaN. */
21547 gcc_assert (unord_func);
21548 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21549 SImode, op0, mode, op1, mode);
21550
21551 /* Set value (0) if either value is a NaN, and jump to the join
21552 label. */
21553 dest = gen_reg_rtx (SImode);
21554 emit_move_insn (dest, const1_rtx);
21555 emit_insn (gen_rtx_SET (unord_cmp,
21556 gen_rtx_COMPARE (comp_mode, unord_dest,
21557 const0_rtx)));
21558
21559 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21560 emit_jump_insn (gen_rtx_SET (pc_rtx,
21561 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21562 join_ref,
21563 pc_rtx)));
21564
21565 /* Do the normal comparison, knowing that the values are not
21566 NaNs. */
21567 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21568 SImode, op0, mode, op1, mode);
21569
21570 emit_insn (gen_cstoresi4 (dest,
21571 gen_rtx_fmt_ee (code, SImode, normal_dest,
21572 const0_rtx),
21573 normal_dest, const0_rtx));
21574
21575 /* Join NaN and non-Nan paths. Compare dest against 0. */
21576 emit_label (join_label);
21577 code = NE;
21578 }
21579
21580 emit_insn (gen_rtx_SET (compare_result,
21581 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21582 }
21583
21584 else
21585 {
21586 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21587 CLOBBERs to match cmptf_internal2 pattern. */
21588 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21589 && FLOAT128_IBM_P (GET_MODE (op0))
21590 && TARGET_HARD_FLOAT)
21591 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21592 gen_rtvec (10,
21593 gen_rtx_SET (compare_result,
21594 gen_rtx_COMPARE (comp_mode, op0, op1)),
21595 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21596 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21597 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21598 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21599 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21600 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21601 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21602 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21603 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21604 else if (GET_CODE (op1) == UNSPEC
21605 && XINT (op1, 1) == UNSPEC_SP_TEST)
21606 {
21607 rtx op1b = XVECEXP (op1, 0, 0);
21608 comp_mode = CCEQmode;
21609 compare_result = gen_reg_rtx (CCEQmode);
21610 if (TARGET_64BIT)
21611 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21612 else
21613 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21614 }
21615 else
21616 emit_insn (gen_rtx_SET (compare_result,
21617 gen_rtx_COMPARE (comp_mode, op0, op1)));
21618 }
21619
21620 /* Some kinds of FP comparisons need an OR operation;
21621 under flag_finite_math_only we don't bother. */
21622 if (FLOAT_MODE_P (mode)
21623 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21624 && !flag_finite_math_only
21625 && (code == LE || code == GE
21626 || code == UNEQ || code == LTGT
21627 || code == UNGT || code == UNLT))
21628 {
21629 enum rtx_code or1, or2;
21630 rtx or1_rtx, or2_rtx, compare2_rtx;
21631 rtx or_result = gen_reg_rtx (CCEQmode);
21632
21633 switch (code)
21634 {
21635 case LE: or1 = LT; or2 = EQ; break;
21636 case GE: or1 = GT; or2 = EQ; break;
21637 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21638 case LTGT: or1 = LT; or2 = GT; break;
21639 case UNGT: or1 = UNORDERED; or2 = GT; break;
21640 case UNLT: or1 = UNORDERED; or2 = LT; break;
21641 default: gcc_unreachable ();
21642 }
21643 validate_condition_mode (or1, comp_mode);
21644 validate_condition_mode (or2, comp_mode);
21645 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21646 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21647 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21648 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21649 const_true_rtx);
21650 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21651
21652 compare_result = or_result;
21653 code = EQ;
21654 }
21655
21656 validate_condition_mode (code, GET_MODE (compare_result));
21657
21658 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21659 }
21660
21661 \f
21662 /* Return the diagnostic message string if the binary operation OP is
21663 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21664
21665 static const char*
21666 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21667 const_tree type1,
21668 const_tree type2)
21669 {
21670 machine_mode mode1 = TYPE_MODE (type1);
21671 machine_mode mode2 = TYPE_MODE (type2);
21672
21673 /* For complex modes, use the inner type. */
21674 if (COMPLEX_MODE_P (mode1))
21675 mode1 = GET_MODE_INNER (mode1);
21676
21677 if (COMPLEX_MODE_P (mode2))
21678 mode2 = GET_MODE_INNER (mode2);
21679
21680 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21681 double to intermix unless -mfloat128-convert. */
21682 if (mode1 == mode2)
21683 return NULL;
21684
21685 if (!TARGET_FLOAT128_CVT)
21686 {
21687 if ((mode1 == KFmode && mode2 == IFmode)
21688 || (mode1 == IFmode && mode2 == KFmode))
21689 return N_("__float128 and __ibm128 cannot be used in the same "
21690 "expression");
21691
21692 if (TARGET_IEEEQUAD
21693 && ((mode1 == IFmode && mode2 == TFmode)
21694 || (mode1 == TFmode && mode2 == IFmode)))
21695 return N_("__ibm128 and long double cannot be used in the same "
21696 "expression");
21697
21698 if (!TARGET_IEEEQUAD
21699 && ((mode1 == KFmode && mode2 == TFmode)
21700 || (mode1 == TFmode && mode2 == KFmode)))
21701 return N_("__float128 and long double cannot be used in the same "
21702 "expression");
21703 }
21704
21705 return NULL;
21706 }
21707
21708 \f
21709 /* Expand floating point conversion to/from __float128 and __ibm128. */
21710
21711 void
21712 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21713 {
21714 machine_mode dest_mode = GET_MODE (dest);
21715 machine_mode src_mode = GET_MODE (src);
21716 convert_optab cvt = unknown_optab;
21717 bool do_move = false;
21718 rtx libfunc = NULL_RTX;
21719 rtx dest2;
21720 typedef rtx (*rtx_2func_t) (rtx, rtx);
21721 rtx_2func_t hw_convert = (rtx_2func_t)0;
21722 size_t kf_or_tf;
21723
21724 struct hw_conv_t {
21725 rtx_2func_t from_df;
21726 rtx_2func_t from_sf;
21727 rtx_2func_t from_si_sign;
21728 rtx_2func_t from_si_uns;
21729 rtx_2func_t from_di_sign;
21730 rtx_2func_t from_di_uns;
21731 rtx_2func_t to_df;
21732 rtx_2func_t to_sf;
21733 rtx_2func_t to_si_sign;
21734 rtx_2func_t to_si_uns;
21735 rtx_2func_t to_di_sign;
21736 rtx_2func_t to_di_uns;
21737 } hw_conversions[2] = {
21738 /* convertions to/from KFmode */
21739 {
21740 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21741 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21742 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21743 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21744 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21745 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21746 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21747 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21748 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21749 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21750 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21751 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21752 },
21753
21754 /* convertions to/from TFmode */
21755 {
21756 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21757 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21758 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21759 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21760 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21761 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21762 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21763 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21764 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21765 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21766 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21767 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21768 },
21769 };
21770
21771 if (dest_mode == src_mode)
21772 gcc_unreachable ();
21773
21774 /* Eliminate memory operations. */
21775 if (MEM_P (src))
21776 src = force_reg (src_mode, src);
21777
21778 if (MEM_P (dest))
21779 {
21780 rtx tmp = gen_reg_rtx (dest_mode);
21781 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21782 rs6000_emit_move (dest, tmp, dest_mode);
21783 return;
21784 }
21785
21786 /* Convert to IEEE 128-bit floating point. */
21787 if (FLOAT128_IEEE_P (dest_mode))
21788 {
21789 if (dest_mode == KFmode)
21790 kf_or_tf = 0;
21791 else if (dest_mode == TFmode)
21792 kf_or_tf = 1;
21793 else
21794 gcc_unreachable ();
21795
21796 switch (src_mode)
21797 {
21798 case E_DFmode:
21799 cvt = sext_optab;
21800 hw_convert = hw_conversions[kf_or_tf].from_df;
21801 break;
21802
21803 case E_SFmode:
21804 cvt = sext_optab;
21805 hw_convert = hw_conversions[kf_or_tf].from_sf;
21806 break;
21807
21808 case E_KFmode:
21809 case E_IFmode:
21810 case E_TFmode:
21811 if (FLOAT128_IBM_P (src_mode))
21812 cvt = sext_optab;
21813 else
21814 do_move = true;
21815 break;
21816
21817 case E_SImode:
21818 if (unsigned_p)
21819 {
21820 cvt = ufloat_optab;
21821 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21822 }
21823 else
21824 {
21825 cvt = sfloat_optab;
21826 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
21827 }
21828 break;
21829
21830 case E_DImode:
21831 if (unsigned_p)
21832 {
21833 cvt = ufloat_optab;
21834 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
21835 }
21836 else
21837 {
21838 cvt = sfloat_optab;
21839 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
21840 }
21841 break;
21842
21843 default:
21844 gcc_unreachable ();
21845 }
21846 }
21847
21848 /* Convert from IEEE 128-bit floating point. */
21849 else if (FLOAT128_IEEE_P (src_mode))
21850 {
21851 if (src_mode == KFmode)
21852 kf_or_tf = 0;
21853 else if (src_mode == TFmode)
21854 kf_or_tf = 1;
21855 else
21856 gcc_unreachable ();
21857
21858 switch (dest_mode)
21859 {
21860 case E_DFmode:
21861 cvt = trunc_optab;
21862 hw_convert = hw_conversions[kf_or_tf].to_df;
21863 break;
21864
21865 case E_SFmode:
21866 cvt = trunc_optab;
21867 hw_convert = hw_conversions[kf_or_tf].to_sf;
21868 break;
21869
21870 case E_KFmode:
21871 case E_IFmode:
21872 case E_TFmode:
21873 if (FLOAT128_IBM_P (dest_mode))
21874 cvt = trunc_optab;
21875 else
21876 do_move = true;
21877 break;
21878
21879 case E_SImode:
21880 if (unsigned_p)
21881 {
21882 cvt = ufix_optab;
21883 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
21884 }
21885 else
21886 {
21887 cvt = sfix_optab;
21888 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
21889 }
21890 break;
21891
21892 case E_DImode:
21893 if (unsigned_p)
21894 {
21895 cvt = ufix_optab;
21896 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
21897 }
21898 else
21899 {
21900 cvt = sfix_optab;
21901 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
21902 }
21903 break;
21904
21905 default:
21906 gcc_unreachable ();
21907 }
21908 }
21909
21910 /* Both IBM format. */
21911 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
21912 do_move = true;
21913
21914 else
21915 gcc_unreachable ();
21916
21917 /* Handle conversion between TFmode/KFmode/IFmode. */
21918 if (do_move)
21919 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
21920
21921 /* Handle conversion if we have hardware support. */
21922 else if (TARGET_FLOAT128_HW && hw_convert)
21923 emit_insn ((hw_convert) (dest, src));
21924
21925 /* Call an external function to do the conversion. */
21926 else if (cvt != unknown_optab)
21927 {
21928 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
21929 gcc_assert (libfunc != NULL_RTX);
21930
21931 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
21932 src, src_mode);
21933
21934 gcc_assert (dest2 != NULL_RTX);
21935 if (!rtx_equal_p (dest, dest2))
21936 emit_move_insn (dest, dest2);
21937 }
21938
21939 else
21940 gcc_unreachable ();
21941
21942 return;
21943 }
21944
21945 \f
21946 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
21947 can be used as that dest register. Return the dest register. */
21948
21949 rtx
21950 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
21951 {
21952 if (op2 == const0_rtx)
21953 return op1;
21954
21955 if (GET_CODE (scratch) == SCRATCH)
21956 scratch = gen_reg_rtx (mode);
21957
21958 if (logical_operand (op2, mode))
21959 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
21960 else
21961 emit_insn (gen_rtx_SET (scratch,
21962 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
21963
21964 return scratch;
21965 }
21966
21967 void
21968 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
21969 {
21970 rtx condition_rtx;
21971 machine_mode op_mode;
21972 enum rtx_code cond_code;
21973 rtx result = operands[0];
21974
21975 condition_rtx = rs6000_generate_compare (operands[1], mode);
21976 cond_code = GET_CODE (condition_rtx);
21977
21978 if (cond_code == NE
21979 || cond_code == GE || cond_code == LE
21980 || cond_code == GEU || cond_code == LEU
21981 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
21982 {
21983 rtx not_result = gen_reg_rtx (CCEQmode);
21984 rtx not_op, rev_cond_rtx;
21985 machine_mode cc_mode;
21986
21987 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
21988
21989 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
21990 SImode, XEXP (condition_rtx, 0), const0_rtx);
21991 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
21992 emit_insn (gen_rtx_SET (not_result, not_op));
21993 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
21994 }
21995
21996 op_mode = GET_MODE (XEXP (operands[1], 0));
21997 if (op_mode == VOIDmode)
21998 op_mode = GET_MODE (XEXP (operands[1], 1));
21999
22000 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22001 {
22002 PUT_MODE (condition_rtx, DImode);
22003 convert_move (result, condition_rtx, 0);
22004 }
22005 else
22006 {
22007 PUT_MODE (condition_rtx, SImode);
22008 emit_insn (gen_rtx_SET (result, condition_rtx));
22009 }
22010 }
22011
22012 /* Emit a branch of kind CODE to location LOC. */
22013
22014 void
22015 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22016 {
22017 rtx condition_rtx, loc_ref;
22018
22019 condition_rtx = rs6000_generate_compare (operands[0], mode);
22020 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22021 emit_jump_insn (gen_rtx_SET (pc_rtx,
22022 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22023 loc_ref, pc_rtx)));
22024 }
22025
22026 /* Return the string to output a conditional branch to LABEL, which is
22027 the operand template of the label, or NULL if the branch is really a
22028 conditional return.
22029
22030 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22031 condition code register and its mode specifies what kind of
22032 comparison we made.
22033
22034 REVERSED is nonzero if we should reverse the sense of the comparison.
22035
22036 INSN is the insn. */
22037
22038 char *
22039 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22040 {
22041 static char string[64];
22042 enum rtx_code code = GET_CODE (op);
22043 rtx cc_reg = XEXP (op, 0);
22044 machine_mode mode = GET_MODE (cc_reg);
22045 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22046 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22047 int really_reversed = reversed ^ need_longbranch;
22048 char *s = string;
22049 const char *ccode;
22050 const char *pred;
22051 rtx note;
22052
22053 validate_condition_mode (code, mode);
22054
22055 /* Work out which way this really branches. We could use
22056 reverse_condition_maybe_unordered here always but this
22057 makes the resulting assembler clearer. */
22058 if (really_reversed)
22059 {
22060 /* Reversal of FP compares takes care -- an ordered compare
22061 becomes an unordered compare and vice versa. */
22062 if (mode == CCFPmode)
22063 code = reverse_condition_maybe_unordered (code);
22064 else
22065 code = reverse_condition (code);
22066 }
22067
22068 switch (code)
22069 {
22070 /* Not all of these are actually distinct opcodes, but
22071 we distinguish them for clarity of the resulting assembler. */
22072 case NE: case LTGT:
22073 ccode = "ne"; break;
22074 case EQ: case UNEQ:
22075 ccode = "eq"; break;
22076 case GE: case GEU:
22077 ccode = "ge"; break;
22078 case GT: case GTU: case UNGT:
22079 ccode = "gt"; break;
22080 case LE: case LEU:
22081 ccode = "le"; break;
22082 case LT: case LTU: case UNLT:
22083 ccode = "lt"; break;
22084 case UNORDERED: ccode = "un"; break;
22085 case ORDERED: ccode = "nu"; break;
22086 case UNGE: ccode = "nl"; break;
22087 case UNLE: ccode = "ng"; break;
22088 default:
22089 gcc_unreachable ();
22090 }
22091
22092 /* Maybe we have a guess as to how likely the branch is. */
22093 pred = "";
22094 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22095 if (note != NULL_RTX)
22096 {
22097 /* PROB is the difference from 50%. */
22098 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22099 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22100
22101 /* Only hint for highly probable/improbable branches on newer cpus when
22102 we have real profile data, as static prediction overrides processor
22103 dynamic prediction. For older cpus we may as well always hint, but
22104 assume not taken for branches that are very close to 50% as a
22105 mispredicted taken branch is more expensive than a
22106 mispredicted not-taken branch. */
22107 if (rs6000_always_hint
22108 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22109 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22110 && br_prob_note_reliable_p (note)))
22111 {
22112 if (abs (prob) > REG_BR_PROB_BASE / 20
22113 && ((prob > 0) ^ need_longbranch))
22114 pred = "+";
22115 else
22116 pred = "-";
22117 }
22118 }
22119
22120 if (label == NULL)
22121 s += sprintf (s, "b%slr%s ", ccode, pred);
22122 else
22123 s += sprintf (s, "b%s%s ", ccode, pred);
22124
22125 /* We need to escape any '%' characters in the reg_names string.
22126 Assume they'd only be the first character.... */
22127 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22128 *s++ = '%';
22129 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22130
22131 if (label != NULL)
22132 {
22133 /* If the branch distance was too far, we may have to use an
22134 unconditional branch to go the distance. */
22135 if (need_longbranch)
22136 s += sprintf (s, ",$+8\n\tb %s", label);
22137 else
22138 s += sprintf (s, ",%s", label);
22139 }
22140
22141 return string;
22142 }
22143
22144 /* Return insn for VSX or Altivec comparisons. */
22145
22146 static rtx
22147 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22148 {
22149 rtx mask;
22150 machine_mode mode = GET_MODE (op0);
22151
22152 switch (code)
22153 {
22154 default:
22155 break;
22156
22157 case GE:
22158 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22159 return NULL_RTX;
22160 /* FALLTHRU */
22161
22162 case EQ:
22163 case GT:
22164 case GTU:
22165 case ORDERED:
22166 case UNORDERED:
22167 case UNEQ:
22168 case LTGT:
22169 mask = gen_reg_rtx (mode);
22170 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22171 return mask;
22172 }
22173
22174 return NULL_RTX;
22175 }
22176
22177 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22178 DMODE is expected destination mode. This is a recursive function. */
22179
22180 static rtx
22181 rs6000_emit_vector_compare (enum rtx_code rcode,
22182 rtx op0, rtx op1,
22183 machine_mode dmode)
22184 {
22185 rtx mask;
22186 bool swap_operands = false;
22187 bool try_again = false;
22188
22189 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22190 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22191
22192 /* See if the comparison works as is. */
22193 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22194 if (mask)
22195 return mask;
22196
22197 switch (rcode)
22198 {
22199 case LT:
22200 rcode = GT;
22201 swap_operands = true;
22202 try_again = true;
22203 break;
22204 case LTU:
22205 rcode = GTU;
22206 swap_operands = true;
22207 try_again = true;
22208 break;
22209 case NE:
22210 case UNLE:
22211 case UNLT:
22212 case UNGE:
22213 case UNGT:
22214 /* Invert condition and try again.
22215 e.g., A != B becomes ~(A==B). */
22216 {
22217 enum rtx_code rev_code;
22218 enum insn_code nor_code;
22219 rtx mask2;
22220
22221 rev_code = reverse_condition_maybe_unordered (rcode);
22222 if (rev_code == UNKNOWN)
22223 return NULL_RTX;
22224
22225 nor_code = optab_handler (one_cmpl_optab, dmode);
22226 if (nor_code == CODE_FOR_nothing)
22227 return NULL_RTX;
22228
22229 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22230 if (!mask2)
22231 return NULL_RTX;
22232
22233 mask = gen_reg_rtx (dmode);
22234 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22235 return mask;
22236 }
22237 break;
22238 case GE:
22239 case GEU:
22240 case LE:
22241 case LEU:
22242 /* Try GT/GTU/LT/LTU OR EQ */
22243 {
22244 rtx c_rtx, eq_rtx;
22245 enum insn_code ior_code;
22246 enum rtx_code new_code;
22247
22248 switch (rcode)
22249 {
22250 case GE:
22251 new_code = GT;
22252 break;
22253
22254 case GEU:
22255 new_code = GTU;
22256 break;
22257
22258 case LE:
22259 new_code = LT;
22260 break;
22261
22262 case LEU:
22263 new_code = LTU;
22264 break;
22265
22266 default:
22267 gcc_unreachable ();
22268 }
22269
22270 ior_code = optab_handler (ior_optab, dmode);
22271 if (ior_code == CODE_FOR_nothing)
22272 return NULL_RTX;
22273
22274 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22275 if (!c_rtx)
22276 return NULL_RTX;
22277
22278 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22279 if (!eq_rtx)
22280 return NULL_RTX;
22281
22282 mask = gen_reg_rtx (dmode);
22283 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22284 return mask;
22285 }
22286 break;
22287 default:
22288 return NULL_RTX;
22289 }
22290
22291 if (try_again)
22292 {
22293 if (swap_operands)
22294 std::swap (op0, op1);
22295
22296 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22297 if (mask)
22298 return mask;
22299 }
22300
22301 /* You only get two chances. */
22302 return NULL_RTX;
22303 }
22304
22305 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22306 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22307 operands for the relation operation COND. */
22308
22309 int
22310 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22311 rtx cond, rtx cc_op0, rtx cc_op1)
22312 {
22313 machine_mode dest_mode = GET_MODE (dest);
22314 machine_mode mask_mode = GET_MODE (cc_op0);
22315 enum rtx_code rcode = GET_CODE (cond);
22316 machine_mode cc_mode = CCmode;
22317 rtx mask;
22318 rtx cond2;
22319 bool invert_move = false;
22320
22321 if (VECTOR_UNIT_NONE_P (dest_mode))
22322 return 0;
22323
22324 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22325 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22326
22327 switch (rcode)
22328 {
22329 /* Swap operands if we can, and fall back to doing the operation as
22330 specified, and doing a NOR to invert the test. */
22331 case NE:
22332 case UNLE:
22333 case UNLT:
22334 case UNGE:
22335 case UNGT:
22336 /* Invert condition and try again.
22337 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22338 invert_move = true;
22339 rcode = reverse_condition_maybe_unordered (rcode);
22340 if (rcode == UNKNOWN)
22341 return 0;
22342 break;
22343
22344 case GE:
22345 case LE:
22346 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22347 {
22348 /* Invert condition to avoid compound test. */
22349 invert_move = true;
22350 rcode = reverse_condition (rcode);
22351 }
22352 break;
22353
22354 case GTU:
22355 case GEU:
22356 case LTU:
22357 case LEU:
22358 /* Mark unsigned tests with CCUNSmode. */
22359 cc_mode = CCUNSmode;
22360
22361 /* Invert condition to avoid compound test if necessary. */
22362 if (rcode == GEU || rcode == LEU)
22363 {
22364 invert_move = true;
22365 rcode = reverse_condition (rcode);
22366 }
22367 break;
22368
22369 default:
22370 break;
22371 }
22372
22373 /* Get the vector mask for the given relational operations. */
22374 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22375
22376 if (!mask)
22377 return 0;
22378
22379 if (invert_move)
22380 std::swap (op_true, op_false);
22381
22382 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22383 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22384 && (GET_CODE (op_true) == CONST_VECTOR
22385 || GET_CODE (op_false) == CONST_VECTOR))
22386 {
22387 rtx constant_0 = CONST0_RTX (dest_mode);
22388 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22389
22390 if (op_true == constant_m1 && op_false == constant_0)
22391 {
22392 emit_move_insn (dest, mask);
22393 return 1;
22394 }
22395
22396 else if (op_true == constant_0 && op_false == constant_m1)
22397 {
22398 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22399 return 1;
22400 }
22401
22402 /* If we can't use the vector comparison directly, perhaps we can use
22403 the mask for the true or false fields, instead of loading up a
22404 constant. */
22405 if (op_true == constant_m1)
22406 op_true = mask;
22407
22408 if (op_false == constant_0)
22409 op_false = mask;
22410 }
22411
22412 if (!REG_P (op_true) && !SUBREG_P (op_true))
22413 op_true = force_reg (dest_mode, op_true);
22414
22415 if (!REG_P (op_false) && !SUBREG_P (op_false))
22416 op_false = force_reg (dest_mode, op_false);
22417
22418 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22419 CONST0_RTX (dest_mode));
22420 emit_insn (gen_rtx_SET (dest,
22421 gen_rtx_IF_THEN_ELSE (dest_mode,
22422 cond2,
22423 op_true,
22424 op_false)));
22425 return 1;
22426 }
22427
22428 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22429 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22430 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22431 hardware has no such operation. */
22432
22433 static int
22434 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22435 {
22436 enum rtx_code code = GET_CODE (op);
22437 rtx op0 = XEXP (op, 0);
22438 rtx op1 = XEXP (op, 1);
22439 machine_mode compare_mode = GET_MODE (op0);
22440 machine_mode result_mode = GET_MODE (dest);
22441 bool max_p = false;
22442
22443 if (result_mode != compare_mode)
22444 return 0;
22445
22446 if (code == GE || code == GT)
22447 max_p = true;
22448 else if (code == LE || code == LT)
22449 max_p = false;
22450 else
22451 return 0;
22452
22453 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22454 ;
22455
22456 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22457 max_p = !max_p;
22458
22459 else
22460 return 0;
22461
22462 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22463 return 1;
22464 }
22465
22466 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22467 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22468 operands of the last comparison is nonzero/true, FALSE_COND if it is
22469 zero/false. Return 0 if the hardware has no such operation. */
22470
22471 static int
22472 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22473 {
22474 enum rtx_code code = GET_CODE (op);
22475 rtx op0 = XEXP (op, 0);
22476 rtx op1 = XEXP (op, 1);
22477 machine_mode result_mode = GET_MODE (dest);
22478 rtx compare_rtx;
22479 rtx cmove_rtx;
22480 rtx clobber_rtx;
22481
22482 if (!can_create_pseudo_p ())
22483 return 0;
22484
22485 switch (code)
22486 {
22487 case EQ:
22488 case GE:
22489 case GT:
22490 break;
22491
22492 case NE:
22493 case LT:
22494 case LE:
22495 code = swap_condition (code);
22496 std::swap (op0, op1);
22497 break;
22498
22499 default:
22500 return 0;
22501 }
22502
22503 /* Generate: [(parallel [(set (dest)
22504 (if_then_else (op (cmp1) (cmp2))
22505 (true)
22506 (false)))
22507 (clobber (scratch))])]. */
22508
22509 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22510 cmove_rtx = gen_rtx_SET (dest,
22511 gen_rtx_IF_THEN_ELSE (result_mode,
22512 compare_rtx,
22513 true_cond,
22514 false_cond));
22515
22516 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22517 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22518 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22519
22520 return 1;
22521 }
22522
22523 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22524 operands of the last comparison is nonzero/true, FALSE_COND if it
22525 is zero/false. Return 0 if the hardware has no such operation. */
22526
22527 int
22528 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22529 {
22530 enum rtx_code code = GET_CODE (op);
22531 rtx op0 = XEXP (op, 0);
22532 rtx op1 = XEXP (op, 1);
22533 machine_mode compare_mode = GET_MODE (op0);
22534 machine_mode result_mode = GET_MODE (dest);
22535 rtx temp;
22536 bool is_against_zero;
22537
22538 /* These modes should always match. */
22539 if (GET_MODE (op1) != compare_mode
22540 /* In the isel case however, we can use a compare immediate, so
22541 op1 may be a small constant. */
22542 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22543 return 0;
22544 if (GET_MODE (true_cond) != result_mode)
22545 return 0;
22546 if (GET_MODE (false_cond) != result_mode)
22547 return 0;
22548
22549 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22550 if (TARGET_P9_MINMAX
22551 && (compare_mode == SFmode || compare_mode == DFmode)
22552 && (result_mode == SFmode || result_mode == DFmode))
22553 {
22554 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22555 return 1;
22556
22557 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22558 return 1;
22559 }
22560
22561 /* Don't allow using floating point comparisons for integer results for
22562 now. */
22563 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22564 return 0;
22565
22566 /* First, work out if the hardware can do this at all, or
22567 if it's too slow.... */
22568 if (!FLOAT_MODE_P (compare_mode))
22569 {
22570 if (TARGET_ISEL)
22571 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22572 return 0;
22573 }
22574
22575 is_against_zero = op1 == CONST0_RTX (compare_mode);
22576
22577 /* A floating-point subtract might overflow, underflow, or produce
22578 an inexact result, thus changing the floating-point flags, so it
22579 can't be generated if we care about that. It's safe if one side
22580 of the construct is zero, since then no subtract will be
22581 generated. */
22582 if (SCALAR_FLOAT_MODE_P (compare_mode)
22583 && flag_trapping_math && ! is_against_zero)
22584 return 0;
22585
22586 /* Eliminate half of the comparisons by switching operands, this
22587 makes the remaining code simpler. */
22588 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22589 || code == LTGT || code == LT || code == UNLE)
22590 {
22591 code = reverse_condition_maybe_unordered (code);
22592 temp = true_cond;
22593 true_cond = false_cond;
22594 false_cond = temp;
22595 }
22596
22597 /* UNEQ and LTGT take four instructions for a comparison with zero,
22598 it'll probably be faster to use a branch here too. */
22599 if (code == UNEQ && HONOR_NANS (compare_mode))
22600 return 0;
22601
22602 /* We're going to try to implement comparisons by performing
22603 a subtract, then comparing against zero. Unfortunately,
22604 Inf - Inf is NaN which is not zero, and so if we don't
22605 know that the operand is finite and the comparison
22606 would treat EQ different to UNORDERED, we can't do it. */
22607 if (HONOR_INFINITIES (compare_mode)
22608 && code != GT && code != UNGE
22609 && (GET_CODE (op1) != CONST_DOUBLE
22610 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22611 /* Constructs of the form (a OP b ? a : b) are safe. */
22612 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22613 || (! rtx_equal_p (op0, true_cond)
22614 && ! rtx_equal_p (op1, true_cond))))
22615 return 0;
22616
22617 /* At this point we know we can use fsel. */
22618
22619 /* Reduce the comparison to a comparison against zero. */
22620 if (! is_against_zero)
22621 {
22622 temp = gen_reg_rtx (compare_mode);
22623 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22624 op0 = temp;
22625 op1 = CONST0_RTX (compare_mode);
22626 }
22627
22628 /* If we don't care about NaNs we can reduce some of the comparisons
22629 down to faster ones. */
22630 if (! HONOR_NANS (compare_mode))
22631 switch (code)
22632 {
22633 case GT:
22634 code = LE;
22635 temp = true_cond;
22636 true_cond = false_cond;
22637 false_cond = temp;
22638 break;
22639 case UNGE:
22640 code = GE;
22641 break;
22642 case UNEQ:
22643 code = EQ;
22644 break;
22645 default:
22646 break;
22647 }
22648
22649 /* Now, reduce everything down to a GE. */
22650 switch (code)
22651 {
22652 case GE:
22653 break;
22654
22655 case LE:
22656 temp = gen_reg_rtx (compare_mode);
22657 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22658 op0 = temp;
22659 break;
22660
22661 case ORDERED:
22662 temp = gen_reg_rtx (compare_mode);
22663 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22664 op0 = temp;
22665 break;
22666
22667 case EQ:
22668 temp = gen_reg_rtx (compare_mode);
22669 emit_insn (gen_rtx_SET (temp,
22670 gen_rtx_NEG (compare_mode,
22671 gen_rtx_ABS (compare_mode, op0))));
22672 op0 = temp;
22673 break;
22674
22675 case UNGE:
22676 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22677 temp = gen_reg_rtx (result_mode);
22678 emit_insn (gen_rtx_SET (temp,
22679 gen_rtx_IF_THEN_ELSE (result_mode,
22680 gen_rtx_GE (VOIDmode,
22681 op0, op1),
22682 true_cond, false_cond)));
22683 false_cond = true_cond;
22684 true_cond = temp;
22685
22686 temp = gen_reg_rtx (compare_mode);
22687 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22688 op0 = temp;
22689 break;
22690
22691 case GT:
22692 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22693 temp = gen_reg_rtx (result_mode);
22694 emit_insn (gen_rtx_SET (temp,
22695 gen_rtx_IF_THEN_ELSE (result_mode,
22696 gen_rtx_GE (VOIDmode,
22697 op0, op1),
22698 true_cond, false_cond)));
22699 true_cond = false_cond;
22700 false_cond = temp;
22701
22702 temp = gen_reg_rtx (compare_mode);
22703 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22704 op0 = temp;
22705 break;
22706
22707 default:
22708 gcc_unreachable ();
22709 }
22710
22711 emit_insn (gen_rtx_SET (dest,
22712 gen_rtx_IF_THEN_ELSE (result_mode,
22713 gen_rtx_GE (VOIDmode,
22714 op0, op1),
22715 true_cond, false_cond)));
22716 return 1;
22717 }
22718
22719 /* Same as above, but for ints (isel). */
22720
22721 int
22722 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22723 {
22724 rtx condition_rtx, cr;
22725 machine_mode mode = GET_MODE (dest);
22726 enum rtx_code cond_code;
22727 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22728 bool signedp;
22729
22730 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22731 return 0;
22732
22733 /* We still have to do the compare, because isel doesn't do a
22734 compare, it just looks at the CRx bits set by a previous compare
22735 instruction. */
22736 condition_rtx = rs6000_generate_compare (op, mode);
22737 cond_code = GET_CODE (condition_rtx);
22738 cr = XEXP (condition_rtx, 0);
22739 signedp = GET_MODE (cr) == CCmode;
22740
22741 isel_func = (mode == SImode
22742 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22743 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22744
22745 switch (cond_code)
22746 {
22747 case LT: case GT: case LTU: case GTU: case EQ:
22748 /* isel handles these directly. */
22749 break;
22750
22751 default:
22752 /* We need to swap the sense of the comparison. */
22753 {
22754 std::swap (false_cond, true_cond);
22755 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22756 }
22757 break;
22758 }
22759
22760 false_cond = force_reg (mode, false_cond);
22761 if (true_cond != const0_rtx)
22762 true_cond = force_reg (mode, true_cond);
22763
22764 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22765
22766 return 1;
22767 }
22768
22769 void
22770 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22771 {
22772 machine_mode mode = GET_MODE (op0);
22773 enum rtx_code c;
22774 rtx target;
22775
22776 /* VSX/altivec have direct min/max insns. */
22777 if ((code == SMAX || code == SMIN)
22778 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22779 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22780 {
22781 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22782 return;
22783 }
22784
22785 if (code == SMAX || code == SMIN)
22786 c = GE;
22787 else
22788 c = GEU;
22789
22790 if (code == SMAX || code == UMAX)
22791 target = emit_conditional_move (dest, c, op0, op1, mode,
22792 op0, op1, mode, 0);
22793 else
22794 target = emit_conditional_move (dest, c, op0, op1, mode,
22795 op1, op0, mode, 0);
22796 gcc_assert (target);
22797 if (target != dest)
22798 emit_move_insn (dest, target);
22799 }
22800
22801 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22802 COND is true. Mark the jump as unlikely to be taken. */
22803
22804 static void
22805 emit_unlikely_jump (rtx cond, rtx label)
22806 {
22807 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22808 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22809 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22810 }
22811
22812 /* A subroutine of the atomic operation splitters. Emit a load-locked
22813 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22814 the zero_extend operation. */
22815
22816 static void
22817 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22818 {
22819 rtx (*fn) (rtx, rtx) = NULL;
22820
22821 switch (mode)
22822 {
22823 case E_QImode:
22824 fn = gen_load_lockedqi;
22825 break;
22826 case E_HImode:
22827 fn = gen_load_lockedhi;
22828 break;
22829 case E_SImode:
22830 if (GET_MODE (mem) == QImode)
22831 fn = gen_load_lockedqi_si;
22832 else if (GET_MODE (mem) == HImode)
22833 fn = gen_load_lockedhi_si;
22834 else
22835 fn = gen_load_lockedsi;
22836 break;
22837 case E_DImode:
22838 fn = gen_load_lockeddi;
22839 break;
22840 case E_TImode:
22841 fn = gen_load_lockedti;
22842 break;
22843 default:
22844 gcc_unreachable ();
22845 }
22846 emit_insn (fn (reg, mem));
22847 }
22848
22849 /* A subroutine of the atomic operation splitters. Emit a store-conditional
22850 instruction in MODE. */
22851
22852 static void
22853 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
22854 {
22855 rtx (*fn) (rtx, rtx, rtx) = NULL;
22856
22857 switch (mode)
22858 {
22859 case E_QImode:
22860 fn = gen_store_conditionalqi;
22861 break;
22862 case E_HImode:
22863 fn = gen_store_conditionalhi;
22864 break;
22865 case E_SImode:
22866 fn = gen_store_conditionalsi;
22867 break;
22868 case E_DImode:
22869 fn = gen_store_conditionaldi;
22870 break;
22871 case E_TImode:
22872 fn = gen_store_conditionalti;
22873 break;
22874 default:
22875 gcc_unreachable ();
22876 }
22877
22878 /* Emit sync before stwcx. to address PPC405 Erratum. */
22879 if (PPC405_ERRATUM77)
22880 emit_insn (gen_hwsync ());
22881
22882 emit_insn (fn (res, mem, val));
22883 }
22884
22885 /* Expand barriers before and after a load_locked/store_cond sequence. */
22886
22887 static rtx
22888 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
22889 {
22890 rtx addr = XEXP (mem, 0);
22891
22892 if (!legitimate_indirect_address_p (addr, reload_completed)
22893 && !legitimate_indexed_address_p (addr, reload_completed))
22894 {
22895 addr = force_reg (Pmode, addr);
22896 mem = replace_equiv_address_nv (mem, addr);
22897 }
22898
22899 switch (model)
22900 {
22901 case MEMMODEL_RELAXED:
22902 case MEMMODEL_CONSUME:
22903 case MEMMODEL_ACQUIRE:
22904 break;
22905 case MEMMODEL_RELEASE:
22906 case MEMMODEL_ACQ_REL:
22907 emit_insn (gen_lwsync ());
22908 break;
22909 case MEMMODEL_SEQ_CST:
22910 emit_insn (gen_hwsync ());
22911 break;
22912 default:
22913 gcc_unreachable ();
22914 }
22915 return mem;
22916 }
22917
22918 static void
22919 rs6000_post_atomic_barrier (enum memmodel model)
22920 {
22921 switch (model)
22922 {
22923 case MEMMODEL_RELAXED:
22924 case MEMMODEL_CONSUME:
22925 case MEMMODEL_RELEASE:
22926 break;
22927 case MEMMODEL_ACQUIRE:
22928 case MEMMODEL_ACQ_REL:
22929 case MEMMODEL_SEQ_CST:
22930 emit_insn (gen_isync ());
22931 break;
22932 default:
22933 gcc_unreachable ();
22934 }
22935 }
22936
22937 /* A subroutine of the various atomic expanders. For sub-word operations,
22938 we must adjust things to operate on SImode. Given the original MEM,
22939 return a new aligned memory. Also build and return the quantities by
22940 which to shift and mask. */
22941
22942 static rtx
22943 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
22944 {
22945 rtx addr, align, shift, mask, mem;
22946 HOST_WIDE_INT shift_mask;
22947 machine_mode mode = GET_MODE (orig_mem);
22948
22949 /* For smaller modes, we have to implement this via SImode. */
22950 shift_mask = (mode == QImode ? 0x18 : 0x10);
22951
22952 addr = XEXP (orig_mem, 0);
22953 addr = force_reg (GET_MODE (addr), addr);
22954
22955 /* Aligned memory containing subword. Generate a new memory. We
22956 do not want any of the existing MEM_ATTR data, as we're now
22957 accessing memory outside the original object. */
22958 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
22959 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22960 mem = gen_rtx_MEM (SImode, align);
22961 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
22962 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
22963 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
22964
22965 /* Shift amount for subword relative to aligned word. */
22966 shift = gen_reg_rtx (SImode);
22967 addr = gen_lowpart (SImode, addr);
22968 rtx tmp = gen_reg_rtx (SImode);
22969 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
22970 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
22971 if (BYTES_BIG_ENDIAN)
22972 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
22973 shift, 1, OPTAB_LIB_WIDEN);
22974 *pshift = shift;
22975
22976 /* Mask for insertion. */
22977 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
22978 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
22979 *pmask = mask;
22980
22981 return mem;
22982 }
22983
22984 /* A subroutine of the various atomic expanders. For sub-word operands,
22985 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
22986
22987 static rtx
22988 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
22989 {
22990 rtx x;
22991
22992 x = gen_reg_rtx (SImode);
22993 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
22994 gen_rtx_NOT (SImode, mask),
22995 oldval)));
22996
22997 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
22998
22999 return x;
23000 }
23001
23002 /* A subroutine of the various atomic expanders. For sub-word operands,
23003 extract WIDE to NARROW via SHIFT. */
23004
23005 static void
23006 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23007 {
23008 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23009 wide, 1, OPTAB_LIB_WIDEN);
23010 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23011 }
23012
23013 /* Expand an atomic compare and swap operation. */
23014
23015 void
23016 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23017 {
23018 rtx boolval, retval, mem, oldval, newval, cond;
23019 rtx label1, label2, x, mask, shift;
23020 machine_mode mode, orig_mode;
23021 enum memmodel mod_s, mod_f;
23022 bool is_weak;
23023
23024 boolval = operands[0];
23025 retval = operands[1];
23026 mem = operands[2];
23027 oldval = operands[3];
23028 newval = operands[4];
23029 is_weak = (INTVAL (operands[5]) != 0);
23030 mod_s = memmodel_base (INTVAL (operands[6]));
23031 mod_f = memmodel_base (INTVAL (operands[7]));
23032 orig_mode = mode = GET_MODE (mem);
23033
23034 mask = shift = NULL_RTX;
23035 if (mode == QImode || mode == HImode)
23036 {
23037 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23038 lwarx and shift/mask operations. With power8, we need to do the
23039 comparison in SImode, but the store is still done in QI/HImode. */
23040 oldval = convert_modes (SImode, mode, oldval, 1);
23041
23042 if (!TARGET_SYNC_HI_QI)
23043 {
23044 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23045
23046 /* Shift and mask OLDVAL into position with the word. */
23047 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23048 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23049
23050 /* Shift and mask NEWVAL into position within the word. */
23051 newval = convert_modes (SImode, mode, newval, 1);
23052 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23053 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23054 }
23055
23056 /* Prepare to adjust the return value. */
23057 retval = gen_reg_rtx (SImode);
23058 mode = SImode;
23059 }
23060 else if (reg_overlap_mentioned_p (retval, oldval))
23061 oldval = copy_to_reg (oldval);
23062
23063 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23064 oldval = copy_to_mode_reg (mode, oldval);
23065
23066 if (reg_overlap_mentioned_p (retval, newval))
23067 newval = copy_to_reg (newval);
23068
23069 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23070
23071 label1 = NULL_RTX;
23072 if (!is_weak)
23073 {
23074 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23075 emit_label (XEXP (label1, 0));
23076 }
23077 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23078
23079 emit_load_locked (mode, retval, mem);
23080
23081 x = retval;
23082 if (mask)
23083 x = expand_simple_binop (SImode, AND, retval, mask,
23084 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23085
23086 cond = gen_reg_rtx (CCmode);
23087 /* If we have TImode, synthesize a comparison. */
23088 if (mode != TImode)
23089 x = gen_rtx_COMPARE (CCmode, x, oldval);
23090 else
23091 {
23092 rtx xor1_result = gen_reg_rtx (DImode);
23093 rtx xor2_result = gen_reg_rtx (DImode);
23094 rtx or_result = gen_reg_rtx (DImode);
23095 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23096 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23097 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23098 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23099
23100 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23101 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23102 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23103 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23104 }
23105
23106 emit_insn (gen_rtx_SET (cond, x));
23107
23108 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23109 emit_unlikely_jump (x, label2);
23110
23111 x = newval;
23112 if (mask)
23113 x = rs6000_mask_atomic_subword (retval, newval, mask);
23114
23115 emit_store_conditional (orig_mode, cond, mem, x);
23116
23117 if (!is_weak)
23118 {
23119 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23120 emit_unlikely_jump (x, label1);
23121 }
23122
23123 if (!is_mm_relaxed (mod_f))
23124 emit_label (XEXP (label2, 0));
23125
23126 rs6000_post_atomic_barrier (mod_s);
23127
23128 if (is_mm_relaxed (mod_f))
23129 emit_label (XEXP (label2, 0));
23130
23131 if (shift)
23132 rs6000_finish_atomic_subword (operands[1], retval, shift);
23133 else if (mode != GET_MODE (operands[1]))
23134 convert_move (operands[1], retval, 1);
23135
23136 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23137 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23138 emit_insn (gen_rtx_SET (boolval, x));
23139 }
23140
23141 /* Expand an atomic exchange operation. */
23142
23143 void
23144 rs6000_expand_atomic_exchange (rtx operands[])
23145 {
23146 rtx retval, mem, val, cond;
23147 machine_mode mode;
23148 enum memmodel model;
23149 rtx label, x, mask, shift;
23150
23151 retval = operands[0];
23152 mem = operands[1];
23153 val = operands[2];
23154 model = memmodel_base (INTVAL (operands[3]));
23155 mode = GET_MODE (mem);
23156
23157 mask = shift = NULL_RTX;
23158 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23159 {
23160 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23161
23162 /* Shift and mask VAL into position with the word. */
23163 val = convert_modes (SImode, mode, val, 1);
23164 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23165 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23166
23167 /* Prepare to adjust the return value. */
23168 retval = gen_reg_rtx (SImode);
23169 mode = SImode;
23170 }
23171
23172 mem = rs6000_pre_atomic_barrier (mem, model);
23173
23174 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23175 emit_label (XEXP (label, 0));
23176
23177 emit_load_locked (mode, retval, mem);
23178
23179 x = val;
23180 if (mask)
23181 x = rs6000_mask_atomic_subword (retval, val, mask);
23182
23183 cond = gen_reg_rtx (CCmode);
23184 emit_store_conditional (mode, cond, mem, x);
23185
23186 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23187 emit_unlikely_jump (x, label);
23188
23189 rs6000_post_atomic_barrier (model);
23190
23191 if (shift)
23192 rs6000_finish_atomic_subword (operands[0], retval, shift);
23193 }
23194
23195 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23196 to perform. MEM is the memory on which to operate. VAL is the second
23197 operand of the binary operator. BEFORE and AFTER are optional locations to
23198 return the value of MEM either before of after the operation. MODEL_RTX
23199 is a CONST_INT containing the memory model to use. */
23200
23201 void
23202 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23203 rtx orig_before, rtx orig_after, rtx model_rtx)
23204 {
23205 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23206 machine_mode mode = GET_MODE (mem);
23207 machine_mode store_mode = mode;
23208 rtx label, x, cond, mask, shift;
23209 rtx before = orig_before, after = orig_after;
23210
23211 mask = shift = NULL_RTX;
23212 /* On power8, we want to use SImode for the operation. On previous systems,
23213 use the operation in a subword and shift/mask to get the proper byte or
23214 halfword. */
23215 if (mode == QImode || mode == HImode)
23216 {
23217 if (TARGET_SYNC_HI_QI)
23218 {
23219 val = convert_modes (SImode, mode, val, 1);
23220
23221 /* Prepare to adjust the return value. */
23222 before = gen_reg_rtx (SImode);
23223 if (after)
23224 after = gen_reg_rtx (SImode);
23225 mode = SImode;
23226 }
23227 else
23228 {
23229 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23230
23231 /* Shift and mask VAL into position with the word. */
23232 val = convert_modes (SImode, mode, val, 1);
23233 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23234 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23235
23236 switch (code)
23237 {
23238 case IOR:
23239 case XOR:
23240 /* We've already zero-extended VAL. That is sufficient to
23241 make certain that it does not affect other bits. */
23242 mask = NULL;
23243 break;
23244
23245 case AND:
23246 /* If we make certain that all of the other bits in VAL are
23247 set, that will be sufficient to not affect other bits. */
23248 x = gen_rtx_NOT (SImode, mask);
23249 x = gen_rtx_IOR (SImode, x, val);
23250 emit_insn (gen_rtx_SET (val, x));
23251 mask = NULL;
23252 break;
23253
23254 case NOT:
23255 case PLUS:
23256 case MINUS:
23257 /* These will all affect bits outside the field and need
23258 adjustment via MASK within the loop. */
23259 break;
23260
23261 default:
23262 gcc_unreachable ();
23263 }
23264
23265 /* Prepare to adjust the return value. */
23266 before = gen_reg_rtx (SImode);
23267 if (after)
23268 after = gen_reg_rtx (SImode);
23269 store_mode = mode = SImode;
23270 }
23271 }
23272
23273 mem = rs6000_pre_atomic_barrier (mem, model);
23274
23275 label = gen_label_rtx ();
23276 emit_label (label);
23277 label = gen_rtx_LABEL_REF (VOIDmode, label);
23278
23279 if (before == NULL_RTX)
23280 before = gen_reg_rtx (mode);
23281
23282 emit_load_locked (mode, before, mem);
23283
23284 if (code == NOT)
23285 {
23286 x = expand_simple_binop (mode, AND, before, val,
23287 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23288 after = expand_simple_unop (mode, NOT, x, after, 1);
23289 }
23290 else
23291 {
23292 after = expand_simple_binop (mode, code, before, val,
23293 after, 1, OPTAB_LIB_WIDEN);
23294 }
23295
23296 x = after;
23297 if (mask)
23298 {
23299 x = expand_simple_binop (SImode, AND, after, mask,
23300 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23301 x = rs6000_mask_atomic_subword (before, x, mask);
23302 }
23303 else if (store_mode != mode)
23304 x = convert_modes (store_mode, mode, x, 1);
23305
23306 cond = gen_reg_rtx (CCmode);
23307 emit_store_conditional (store_mode, cond, mem, x);
23308
23309 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23310 emit_unlikely_jump (x, label);
23311
23312 rs6000_post_atomic_barrier (model);
23313
23314 if (shift)
23315 {
23316 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23317 then do the calcuations in a SImode register. */
23318 if (orig_before)
23319 rs6000_finish_atomic_subword (orig_before, before, shift);
23320 if (orig_after)
23321 rs6000_finish_atomic_subword (orig_after, after, shift);
23322 }
23323 else if (store_mode != mode)
23324 {
23325 /* QImode/HImode on machines with lbarx/lharx where we do the native
23326 operation and then do the calcuations in a SImode register. */
23327 if (orig_before)
23328 convert_move (orig_before, before, 1);
23329 if (orig_after)
23330 convert_move (orig_after, after, 1);
23331 }
23332 else if (orig_after && after != orig_after)
23333 emit_move_insn (orig_after, after);
23334 }
23335
23336 /* Emit instructions to move SRC to DST. Called by splitters for
23337 multi-register moves. It will emit at most one instruction for
23338 each register that is accessed; that is, it won't emit li/lis pairs
23339 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23340 register. */
23341
23342 void
23343 rs6000_split_multireg_move (rtx dst, rtx src)
23344 {
23345 /* The register number of the first register being moved. */
23346 int reg;
23347 /* The mode that is to be moved. */
23348 machine_mode mode;
23349 /* The mode that the move is being done in, and its size. */
23350 machine_mode reg_mode;
23351 int reg_mode_size;
23352 /* The number of registers that will be moved. */
23353 int nregs;
23354
23355 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23356 mode = GET_MODE (dst);
23357 nregs = hard_regno_nregs (reg, mode);
23358 if (FP_REGNO_P (reg))
23359 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23360 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23361 else if (ALTIVEC_REGNO_P (reg))
23362 reg_mode = V16QImode;
23363 else
23364 reg_mode = word_mode;
23365 reg_mode_size = GET_MODE_SIZE (reg_mode);
23366
23367 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23368
23369 /* TDmode residing in FP registers is special, since the ISA requires that
23370 the lower-numbered word of a register pair is always the most significant
23371 word, even in little-endian mode. This does not match the usual subreg
23372 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23373 the appropriate constituent registers "by hand" in little-endian mode.
23374
23375 Note we do not need to check for destructive overlap here since TDmode
23376 can only reside in even/odd register pairs. */
23377 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23378 {
23379 rtx p_src, p_dst;
23380 int i;
23381
23382 for (i = 0; i < nregs; i++)
23383 {
23384 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23385 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23386 else
23387 p_src = simplify_gen_subreg (reg_mode, src, mode,
23388 i * reg_mode_size);
23389
23390 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23391 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23392 else
23393 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23394 i * reg_mode_size);
23395
23396 emit_insn (gen_rtx_SET (p_dst, p_src));
23397 }
23398
23399 return;
23400 }
23401
23402 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23403 {
23404 /* Move register range backwards, if we might have destructive
23405 overlap. */
23406 int i;
23407 for (i = nregs - 1; i >= 0; i--)
23408 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23409 i * reg_mode_size),
23410 simplify_gen_subreg (reg_mode, src, mode,
23411 i * reg_mode_size)));
23412 }
23413 else
23414 {
23415 int i;
23416 int j = -1;
23417 bool used_update = false;
23418 rtx restore_basereg = NULL_RTX;
23419
23420 if (MEM_P (src) && INT_REGNO_P (reg))
23421 {
23422 rtx breg;
23423
23424 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23425 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23426 {
23427 rtx delta_rtx;
23428 breg = XEXP (XEXP (src, 0), 0);
23429 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23430 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23431 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23432 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23433 src = replace_equiv_address (src, breg);
23434 }
23435 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23436 {
23437 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23438 {
23439 rtx basereg = XEXP (XEXP (src, 0), 0);
23440 if (TARGET_UPDATE)
23441 {
23442 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23443 emit_insn (gen_rtx_SET (ndst,
23444 gen_rtx_MEM (reg_mode,
23445 XEXP (src, 0))));
23446 used_update = true;
23447 }
23448 else
23449 emit_insn (gen_rtx_SET (basereg,
23450 XEXP (XEXP (src, 0), 1)));
23451 src = replace_equiv_address (src, basereg);
23452 }
23453 else
23454 {
23455 rtx basereg = gen_rtx_REG (Pmode, reg);
23456 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23457 src = replace_equiv_address (src, basereg);
23458 }
23459 }
23460
23461 breg = XEXP (src, 0);
23462 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23463 breg = XEXP (breg, 0);
23464
23465 /* If the base register we are using to address memory is
23466 also a destination reg, then change that register last. */
23467 if (REG_P (breg)
23468 && REGNO (breg) >= REGNO (dst)
23469 && REGNO (breg) < REGNO (dst) + nregs)
23470 j = REGNO (breg) - REGNO (dst);
23471 }
23472 else if (MEM_P (dst) && INT_REGNO_P (reg))
23473 {
23474 rtx breg;
23475
23476 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23477 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23478 {
23479 rtx delta_rtx;
23480 breg = XEXP (XEXP (dst, 0), 0);
23481 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23482 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23483 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23484
23485 /* We have to update the breg before doing the store.
23486 Use store with update, if available. */
23487
23488 if (TARGET_UPDATE)
23489 {
23490 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23491 emit_insn (TARGET_32BIT
23492 ? (TARGET_POWERPC64
23493 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23494 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23495 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23496 used_update = true;
23497 }
23498 else
23499 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23500 dst = replace_equiv_address (dst, breg);
23501 }
23502 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23503 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23504 {
23505 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23506 {
23507 rtx basereg = XEXP (XEXP (dst, 0), 0);
23508 if (TARGET_UPDATE)
23509 {
23510 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23511 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23512 XEXP (dst, 0)),
23513 nsrc));
23514 used_update = true;
23515 }
23516 else
23517 emit_insn (gen_rtx_SET (basereg,
23518 XEXP (XEXP (dst, 0), 1)));
23519 dst = replace_equiv_address (dst, basereg);
23520 }
23521 else
23522 {
23523 rtx basereg = XEXP (XEXP (dst, 0), 0);
23524 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23525 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23526 && REG_P (basereg)
23527 && REG_P (offsetreg)
23528 && REGNO (basereg) != REGNO (offsetreg));
23529 if (REGNO (basereg) == 0)
23530 {
23531 rtx tmp = offsetreg;
23532 offsetreg = basereg;
23533 basereg = tmp;
23534 }
23535 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23536 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23537 dst = replace_equiv_address (dst, basereg);
23538 }
23539 }
23540 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23541 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23542 }
23543
23544 for (i = 0; i < nregs; i++)
23545 {
23546 /* Calculate index to next subword. */
23547 ++j;
23548 if (j == nregs)
23549 j = 0;
23550
23551 /* If compiler already emitted move of first word by
23552 store with update, no need to do anything. */
23553 if (j == 0 && used_update)
23554 continue;
23555
23556 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23557 j * reg_mode_size),
23558 simplify_gen_subreg (reg_mode, src, mode,
23559 j * reg_mode_size)));
23560 }
23561 if (restore_basereg != NULL_RTX)
23562 emit_insn (restore_basereg);
23563 }
23564 }
23565
23566 \f
23567 /* This page contains routines that are used to determine what the
23568 function prologue and epilogue code will do and write them out. */
23569
23570 /* Determine whether the REG is really used. */
23571
23572 static bool
23573 save_reg_p (int reg)
23574 {
23575 /* We need to mark the PIC offset register live for the same conditions
23576 as it is set up, or otherwise it won't be saved before we clobber it. */
23577
23578 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23579 {
23580 /* When calling eh_return, we must return true for all the cases
23581 where conditional_register_usage marks the PIC offset reg
23582 call used. */
23583 if (TARGET_TOC && TARGET_MINIMAL_TOC
23584 && (crtl->calls_eh_return
23585 || df_regs_ever_live_p (reg)
23586 || !constant_pool_empty_p ()))
23587 return true;
23588
23589 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23590 && flag_pic)
23591 return true;
23592 }
23593
23594 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23595 }
23596
23597 /* Return the first fixed-point register that is required to be
23598 saved. 32 if none. */
23599
23600 int
23601 first_reg_to_save (void)
23602 {
23603 int first_reg;
23604
23605 /* Find lowest numbered live register. */
23606 for (first_reg = 13; first_reg <= 31; first_reg++)
23607 if (save_reg_p (first_reg))
23608 break;
23609
23610 #if TARGET_MACHO
23611 if (flag_pic
23612 && crtl->uses_pic_offset_table
23613 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
23614 return RS6000_PIC_OFFSET_TABLE_REGNUM;
23615 #endif
23616
23617 return first_reg;
23618 }
23619
23620 /* Similar, for FP regs. */
23621
23622 int
23623 first_fp_reg_to_save (void)
23624 {
23625 int first_reg;
23626
23627 /* Find lowest numbered live register. */
23628 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23629 if (save_reg_p (first_reg))
23630 break;
23631
23632 return first_reg;
23633 }
23634
23635 /* Similar, for AltiVec regs. */
23636
23637 static int
23638 first_altivec_reg_to_save (void)
23639 {
23640 int i;
23641
23642 /* Stack frame remains as is unless we are in AltiVec ABI. */
23643 if (! TARGET_ALTIVEC_ABI)
23644 return LAST_ALTIVEC_REGNO + 1;
23645
23646 /* On Darwin, the unwind routines are compiled without
23647 TARGET_ALTIVEC, and use save_world to save/restore the
23648 altivec registers when necessary. */
23649 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23650 && ! TARGET_ALTIVEC)
23651 return FIRST_ALTIVEC_REGNO + 20;
23652
23653 /* Find lowest numbered live register. */
23654 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23655 if (save_reg_p (i))
23656 break;
23657
23658 return i;
23659 }
23660
23661 /* Return a 32-bit mask of the AltiVec registers we need to set in
23662 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23663 the 32-bit word is 0. */
23664
23665 static unsigned int
23666 compute_vrsave_mask (void)
23667 {
23668 unsigned int i, mask = 0;
23669
23670 /* On Darwin, the unwind routines are compiled without
23671 TARGET_ALTIVEC, and use save_world to save/restore the
23672 call-saved altivec registers when necessary. */
23673 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23674 && ! TARGET_ALTIVEC)
23675 mask |= 0xFFF;
23676
23677 /* First, find out if we use _any_ altivec registers. */
23678 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
23679 if (df_regs_ever_live_p (i))
23680 mask |= ALTIVEC_REG_BIT (i);
23681
23682 if (mask == 0)
23683 return mask;
23684
23685 /* Next, remove the argument registers from the set. These must
23686 be in the VRSAVE mask set by the caller, so we don't need to add
23687 them in again. More importantly, the mask we compute here is
23688 used to generate CLOBBERs in the set_vrsave insn, and we do not
23689 wish the argument registers to die. */
23690 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
23691 mask &= ~ALTIVEC_REG_BIT (i);
23692
23693 /* Similarly, remove the return value from the set. */
23694 {
23695 bool yes = false;
23696 diddle_return_value (is_altivec_return_reg, &yes);
23697 if (yes)
23698 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
23699 }
23700
23701 return mask;
23702 }
23703
23704 /* For a very restricted set of circumstances, we can cut down the
23705 size of prologues/epilogues by calling our own save/restore-the-world
23706 routines. */
23707
23708 static void
23709 compute_save_world_info (rs6000_stack_t *info)
23710 {
23711 info->world_save_p = 1;
23712 info->world_save_p
23713 = (WORLD_SAVE_P (info)
23714 && DEFAULT_ABI == ABI_DARWIN
23715 && !cfun->has_nonlocal_label
23716 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
23717 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
23718 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
23719 && info->cr_save_p);
23720
23721 /* This will not work in conjunction with sibcalls. Make sure there
23722 are none. (This check is expensive, but seldom executed.) */
23723 if (WORLD_SAVE_P (info))
23724 {
23725 rtx_insn *insn;
23726 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
23727 if (CALL_P (insn) && SIBLING_CALL_P (insn))
23728 {
23729 info->world_save_p = 0;
23730 break;
23731 }
23732 }
23733
23734 if (WORLD_SAVE_P (info))
23735 {
23736 /* Even if we're not touching VRsave, make sure there's room on the
23737 stack for it, if it looks like we're calling SAVE_WORLD, which
23738 will attempt to save it. */
23739 info->vrsave_size = 4;
23740
23741 /* If we are going to save the world, we need to save the link register too. */
23742 info->lr_save_p = 1;
23743
23744 /* "Save" the VRsave register too if we're saving the world. */
23745 if (info->vrsave_mask == 0)
23746 info->vrsave_mask = compute_vrsave_mask ();
23747
23748 /* Because the Darwin register save/restore routines only handle
23749 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
23750 check. */
23751 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
23752 && (info->first_altivec_reg_save
23753 >= FIRST_SAVED_ALTIVEC_REGNO));
23754 }
23755
23756 return;
23757 }
23758
23759
23760 static void
23761 is_altivec_return_reg (rtx reg, void *xyes)
23762 {
23763 bool *yes = (bool *) xyes;
23764 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
23765 *yes = true;
23766 }
23767
23768 \f
23769 /* Return whether REG is a global user reg or has been specifed by
23770 -ffixed-REG. We should not restore these, and so cannot use
23771 lmw or out-of-line restore functions if there are any. We also
23772 can't save them (well, emit frame notes for them), because frame
23773 unwinding during exception handling will restore saved registers. */
23774
23775 static bool
23776 fixed_reg_p (int reg)
23777 {
23778 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
23779 backend sets it, overriding anything the user might have given. */
23780 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23781 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23782 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23783 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23784 return false;
23785
23786 return fixed_regs[reg];
23787 }
23788
23789 /* Determine the strategy for savings/restoring registers. */
23790
23791 enum {
23792 SAVE_MULTIPLE = 0x1,
23793 SAVE_INLINE_GPRS = 0x2,
23794 SAVE_INLINE_FPRS = 0x4,
23795 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
23796 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
23797 SAVE_INLINE_VRS = 0x20,
23798 REST_MULTIPLE = 0x100,
23799 REST_INLINE_GPRS = 0x200,
23800 REST_INLINE_FPRS = 0x400,
23801 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
23802 REST_INLINE_VRS = 0x1000
23803 };
23804
23805 static int
23806 rs6000_savres_strategy (rs6000_stack_t *info,
23807 bool using_static_chain_p)
23808 {
23809 int strategy = 0;
23810
23811 /* Select between in-line and out-of-line save and restore of regs.
23812 First, all the obvious cases where we don't use out-of-line. */
23813 if (crtl->calls_eh_return
23814 || cfun->machine->ra_need_lr)
23815 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
23816 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
23817 | SAVE_INLINE_VRS | REST_INLINE_VRS);
23818
23819 if (info->first_gp_reg_save == 32)
23820 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23821
23822 if (info->first_fp_reg_save == 64)
23823 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23824
23825 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
23826 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23827
23828 /* Define cutoff for using out-of-line functions to save registers. */
23829 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
23830 {
23831 if (!optimize_size)
23832 {
23833 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23834 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23835 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23836 }
23837 else
23838 {
23839 /* Prefer out-of-line restore if it will exit. */
23840 if (info->first_fp_reg_save > 61)
23841 strategy |= SAVE_INLINE_FPRS;
23842 if (info->first_gp_reg_save > 29)
23843 {
23844 if (info->first_fp_reg_save == 64)
23845 strategy |= SAVE_INLINE_GPRS;
23846 else
23847 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23848 }
23849 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
23850 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23851 }
23852 }
23853 else if (DEFAULT_ABI == ABI_DARWIN)
23854 {
23855 if (info->first_fp_reg_save > 60)
23856 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23857 if (info->first_gp_reg_save > 29)
23858 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23859 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23860 }
23861 else
23862 {
23863 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
23864 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
23865 || info->first_fp_reg_save > 61)
23866 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23867 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23868 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23869 }
23870
23871 /* Don't bother to try to save things out-of-line if r11 is occupied
23872 by the static chain. It would require too much fiddling and the
23873 static chain is rarely used anyway. FPRs are saved w.r.t the stack
23874 pointer on Darwin, and AIX uses r1 or r12. */
23875 if (using_static_chain_p
23876 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
23877 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
23878 | SAVE_INLINE_GPRS
23879 | SAVE_INLINE_VRS);
23880
23881 /* Don't ever restore fixed regs. That means we can't use the
23882 out-of-line register restore functions if a fixed reg is in the
23883 range of regs restored. */
23884 if (!(strategy & REST_INLINE_FPRS))
23885 for (int i = info->first_fp_reg_save; i < 64; i++)
23886 if (fixed_regs[i])
23887 {
23888 strategy |= REST_INLINE_FPRS;
23889 break;
23890 }
23891
23892 /* We can only use the out-of-line routines to restore fprs if we've
23893 saved all the registers from first_fp_reg_save in the prologue.
23894 Otherwise, we risk loading garbage. Of course, if we have saved
23895 out-of-line then we know we haven't skipped any fprs. */
23896 if ((strategy & SAVE_INLINE_FPRS)
23897 && !(strategy & REST_INLINE_FPRS))
23898 for (int i = info->first_fp_reg_save; i < 64; i++)
23899 if (!save_reg_p (i))
23900 {
23901 strategy |= REST_INLINE_FPRS;
23902 break;
23903 }
23904
23905 /* Similarly, for altivec regs. */
23906 if (!(strategy & REST_INLINE_VRS))
23907 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
23908 if (fixed_regs[i])
23909 {
23910 strategy |= REST_INLINE_VRS;
23911 break;
23912 }
23913
23914 if ((strategy & SAVE_INLINE_VRS)
23915 && !(strategy & REST_INLINE_VRS))
23916 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
23917 if (!save_reg_p (i))
23918 {
23919 strategy |= REST_INLINE_VRS;
23920 break;
23921 }
23922
23923 /* info->lr_save_p isn't yet set if the only reason lr needs to be
23924 saved is an out-of-line save or restore. Set up the value for
23925 the next test (excluding out-of-line gprs). */
23926 bool lr_save_p = (info->lr_save_p
23927 || !(strategy & SAVE_INLINE_FPRS)
23928 || !(strategy & SAVE_INLINE_VRS)
23929 || !(strategy & REST_INLINE_FPRS)
23930 || !(strategy & REST_INLINE_VRS));
23931
23932 if (TARGET_MULTIPLE
23933 && !TARGET_POWERPC64
23934 && info->first_gp_reg_save < 31
23935 && !(flag_shrink_wrap
23936 && flag_shrink_wrap_separate
23937 && optimize_function_for_speed_p (cfun)))
23938 {
23939 int count = 0;
23940 for (int i = info->first_gp_reg_save; i < 32; i++)
23941 if (save_reg_p (i))
23942 count++;
23943
23944 if (count <= 1)
23945 /* Don't use store multiple if only one reg needs to be
23946 saved. This can occur for example when the ABI_V4 pic reg
23947 (r30) needs to be saved to make calls, but r31 is not
23948 used. */
23949 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23950 else
23951 {
23952 /* Prefer store multiple for saves over out-of-line
23953 routines, since the store-multiple instruction will
23954 always be smaller. */
23955 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
23956
23957 /* The situation is more complicated with load multiple.
23958 We'd prefer to use the out-of-line routines for restores,
23959 since the "exit" out-of-line routines can handle the
23960 restore of LR and the frame teardown. However if doesn't
23961 make sense to use the out-of-line routine if that is the
23962 only reason we'd need to save LR, and we can't use the
23963 "exit" out-of-line gpr restore if we have saved some
23964 fprs; In those cases it is advantageous to use load
23965 multiple when available. */
23966 if (info->first_fp_reg_save != 64 || !lr_save_p)
23967 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
23968 }
23969 }
23970
23971 /* Using the "exit" out-of-line routine does not improve code size
23972 if using it would require lr to be saved and if only saving one
23973 or two gprs. */
23974 else if (!lr_save_p && info->first_gp_reg_save > 29)
23975 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23976
23977 /* Don't ever restore fixed regs. */
23978 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
23979 for (int i = info->first_gp_reg_save; i < 32; i++)
23980 if (fixed_reg_p (i))
23981 {
23982 strategy |= REST_INLINE_GPRS;
23983 strategy &= ~REST_MULTIPLE;
23984 break;
23985 }
23986
23987 /* We can only use load multiple or the out-of-line routines to
23988 restore gprs if we've saved all the registers from
23989 first_gp_reg_save. Otherwise, we risk loading garbage.
23990 Of course, if we have saved out-of-line or used stmw then we know
23991 we haven't skipped any gprs. */
23992 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
23993 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
23994 for (int i = info->first_gp_reg_save; i < 32; i++)
23995 if (!save_reg_p (i))
23996 {
23997 strategy |= REST_INLINE_GPRS;
23998 strategy &= ~REST_MULTIPLE;
23999 break;
24000 }
24001
24002 if (TARGET_ELF && TARGET_64BIT)
24003 {
24004 if (!(strategy & SAVE_INLINE_FPRS))
24005 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24006 else if (!(strategy & SAVE_INLINE_GPRS)
24007 && info->first_fp_reg_save == 64)
24008 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24009 }
24010 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24011 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24012
24013 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24014 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24015
24016 return strategy;
24017 }
24018
24019 /* Calculate the stack information for the current function. This is
24020 complicated by having two separate calling sequences, the AIX calling
24021 sequence and the V.4 calling sequence.
24022
24023 AIX (and Darwin/Mac OS X) stack frames look like:
24024 32-bit 64-bit
24025 SP----> +---------------------------------------+
24026 | back chain to caller | 0 0
24027 +---------------------------------------+
24028 | saved CR | 4 8 (8-11)
24029 +---------------------------------------+
24030 | saved LR | 8 16
24031 +---------------------------------------+
24032 | reserved for compilers | 12 24
24033 +---------------------------------------+
24034 | reserved for binders | 16 32
24035 +---------------------------------------+
24036 | saved TOC pointer | 20 40
24037 +---------------------------------------+
24038 | Parameter save area (+padding*) (P) | 24 48
24039 +---------------------------------------+
24040 | Alloca space (A) | 24+P etc.
24041 +---------------------------------------+
24042 | Local variable space (L) | 24+P+A
24043 +---------------------------------------+
24044 | Float/int conversion temporary (X) | 24+P+A+L
24045 +---------------------------------------+
24046 | Save area for AltiVec registers (W) | 24+P+A+L+X
24047 +---------------------------------------+
24048 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24049 +---------------------------------------+
24050 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24051 +---------------------------------------+
24052 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24053 +---------------------------------------+
24054 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24055 +---------------------------------------+
24056 old SP->| back chain to caller's caller |
24057 +---------------------------------------+
24058
24059 * If the alloca area is present, the parameter save area is
24060 padded so that the former starts 16-byte aligned.
24061
24062 The required alignment for AIX configurations is two words (i.e., 8
24063 or 16 bytes).
24064
24065 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24066
24067 SP----> +---------------------------------------+
24068 | Back chain to caller | 0
24069 +---------------------------------------+
24070 | Save area for CR | 8
24071 +---------------------------------------+
24072 | Saved LR | 16
24073 +---------------------------------------+
24074 | Saved TOC pointer | 24
24075 +---------------------------------------+
24076 | Parameter save area (+padding*) (P) | 32
24077 +---------------------------------------+
24078 | Alloca space (A) | 32+P
24079 +---------------------------------------+
24080 | Local variable space (L) | 32+P+A
24081 +---------------------------------------+
24082 | Save area for AltiVec registers (W) | 32+P+A+L
24083 +---------------------------------------+
24084 | AltiVec alignment padding (Y) | 32+P+A+L+W
24085 +---------------------------------------+
24086 | Save area for GP registers (G) | 32+P+A+L+W+Y
24087 +---------------------------------------+
24088 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24089 +---------------------------------------+
24090 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24091 +---------------------------------------+
24092
24093 * If the alloca area is present, the parameter save area is
24094 padded so that the former starts 16-byte aligned.
24095
24096 V.4 stack frames look like:
24097
24098 SP----> +---------------------------------------+
24099 | back chain to caller | 0
24100 +---------------------------------------+
24101 | caller's saved LR | 4
24102 +---------------------------------------+
24103 | Parameter save area (+padding*) (P) | 8
24104 +---------------------------------------+
24105 | Alloca space (A) | 8+P
24106 +---------------------------------------+
24107 | Varargs save area (V) | 8+P+A
24108 +---------------------------------------+
24109 | Local variable space (L) | 8+P+A+V
24110 +---------------------------------------+
24111 | Float/int conversion temporary (X) | 8+P+A+V+L
24112 +---------------------------------------+
24113 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24114 +---------------------------------------+
24115 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24116 +---------------------------------------+
24117 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24118 +---------------------------------------+
24119 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24120 +---------------------------------------+
24121 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24122 +---------------------------------------+
24123 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24124 +---------------------------------------+
24125 old SP->| back chain to caller's caller |
24126 +---------------------------------------+
24127
24128 * If the alloca area is present and the required alignment is
24129 16 bytes, the parameter save area is padded so that the
24130 alloca area starts 16-byte aligned.
24131
24132 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24133 given. (But note below and in sysv4.h that we require only 8 and
24134 may round up the size of our stack frame anyways. The historical
24135 reason is early versions of powerpc-linux which didn't properly
24136 align the stack at program startup. A happy side-effect is that
24137 -mno-eabi libraries can be used with -meabi programs.)
24138
24139 The EABI configuration defaults to the V.4 layout. However,
24140 the stack alignment requirements may differ. If -mno-eabi is not
24141 given, the required stack alignment is 8 bytes; if -mno-eabi is
24142 given, the required alignment is 16 bytes. (But see V.4 comment
24143 above.) */
24144
24145 #ifndef ABI_STACK_BOUNDARY
24146 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24147 #endif
24148
24149 static rs6000_stack_t *
24150 rs6000_stack_info (void)
24151 {
24152 /* We should never be called for thunks, we are not set up for that. */
24153 gcc_assert (!cfun->is_thunk);
24154
24155 rs6000_stack_t *info = &stack_info;
24156 int reg_size = TARGET_32BIT ? 4 : 8;
24157 int ehrd_size;
24158 int ehcr_size;
24159 int save_align;
24160 int first_gp;
24161 HOST_WIDE_INT non_fixed_size;
24162 bool using_static_chain_p;
24163
24164 if (reload_completed && info->reload_completed)
24165 return info;
24166
24167 memset (info, 0, sizeof (*info));
24168 info->reload_completed = reload_completed;
24169
24170 /* Select which calling sequence. */
24171 info->abi = DEFAULT_ABI;
24172
24173 /* Calculate which registers need to be saved & save area size. */
24174 info->first_gp_reg_save = first_reg_to_save ();
24175 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24176 even if it currently looks like we won't. Reload may need it to
24177 get at a constant; if so, it will have already created a constant
24178 pool entry for it. */
24179 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24180 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24181 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24182 && crtl->uses_const_pool
24183 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24184 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24185 else
24186 first_gp = info->first_gp_reg_save;
24187
24188 info->gp_size = reg_size * (32 - first_gp);
24189
24190 info->first_fp_reg_save = first_fp_reg_to_save ();
24191 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24192
24193 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24194 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24195 - info->first_altivec_reg_save);
24196
24197 /* Does this function call anything? */
24198 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24199
24200 /* Determine if we need to save the condition code registers. */
24201 if (save_reg_p (CR2_REGNO)
24202 || save_reg_p (CR3_REGNO)
24203 || save_reg_p (CR4_REGNO))
24204 {
24205 info->cr_save_p = 1;
24206 if (DEFAULT_ABI == ABI_V4)
24207 info->cr_size = reg_size;
24208 }
24209
24210 /* If the current function calls __builtin_eh_return, then we need
24211 to allocate stack space for registers that will hold data for
24212 the exception handler. */
24213 if (crtl->calls_eh_return)
24214 {
24215 unsigned int i;
24216 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24217 continue;
24218
24219 ehrd_size = i * UNITS_PER_WORD;
24220 }
24221 else
24222 ehrd_size = 0;
24223
24224 /* In the ELFv2 ABI, we also need to allocate space for separate
24225 CR field save areas if the function calls __builtin_eh_return. */
24226 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24227 {
24228 /* This hard-codes that we have three call-saved CR fields. */
24229 ehcr_size = 3 * reg_size;
24230 /* We do *not* use the regular CR save mechanism. */
24231 info->cr_save_p = 0;
24232 }
24233 else
24234 ehcr_size = 0;
24235
24236 /* Determine various sizes. */
24237 info->reg_size = reg_size;
24238 info->fixed_size = RS6000_SAVE_AREA;
24239 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24240 if (cfun->calls_alloca)
24241 info->parm_size =
24242 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24243 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24244 else
24245 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24246 TARGET_ALTIVEC ? 16 : 8);
24247 if (FRAME_GROWS_DOWNWARD)
24248 info->vars_size
24249 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24250 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24251 - (info->fixed_size + info->vars_size + info->parm_size);
24252
24253 if (TARGET_ALTIVEC_ABI)
24254 info->vrsave_mask = compute_vrsave_mask ();
24255
24256 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24257 info->vrsave_size = 4;
24258
24259 compute_save_world_info (info);
24260
24261 /* Calculate the offsets. */
24262 switch (DEFAULT_ABI)
24263 {
24264 case ABI_NONE:
24265 default:
24266 gcc_unreachable ();
24267
24268 case ABI_AIX:
24269 case ABI_ELFv2:
24270 case ABI_DARWIN:
24271 info->fp_save_offset = -info->fp_size;
24272 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24273
24274 if (TARGET_ALTIVEC_ABI)
24275 {
24276 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24277
24278 /* Align stack so vector save area is on a quadword boundary.
24279 The padding goes above the vectors. */
24280 if (info->altivec_size != 0)
24281 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24282
24283 info->altivec_save_offset = info->vrsave_save_offset
24284 - info->altivec_padding_size
24285 - info->altivec_size;
24286 gcc_assert (info->altivec_size == 0
24287 || info->altivec_save_offset % 16 == 0);
24288
24289 /* Adjust for AltiVec case. */
24290 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24291 }
24292 else
24293 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24294
24295 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24296 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24297 info->lr_save_offset = 2*reg_size;
24298 break;
24299
24300 case ABI_V4:
24301 info->fp_save_offset = -info->fp_size;
24302 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24303 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24304
24305 if (TARGET_ALTIVEC_ABI)
24306 {
24307 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24308
24309 /* Align stack so vector save area is on a quadword boundary. */
24310 if (info->altivec_size != 0)
24311 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24312
24313 info->altivec_save_offset = info->vrsave_save_offset
24314 - info->altivec_padding_size
24315 - info->altivec_size;
24316
24317 /* Adjust for AltiVec case. */
24318 info->ehrd_offset = info->altivec_save_offset;
24319 }
24320 else
24321 info->ehrd_offset = info->cr_save_offset;
24322
24323 info->ehrd_offset -= ehrd_size;
24324 info->lr_save_offset = reg_size;
24325 }
24326
24327 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24328 info->save_size = RS6000_ALIGN (info->fp_size
24329 + info->gp_size
24330 + info->altivec_size
24331 + info->altivec_padding_size
24332 + ehrd_size
24333 + ehcr_size
24334 + info->cr_size
24335 + info->vrsave_size,
24336 save_align);
24337
24338 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24339
24340 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24341 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24342
24343 /* Determine if we need to save the link register. */
24344 if (info->calls_p
24345 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24346 && crtl->profile
24347 && !TARGET_PROFILE_KERNEL)
24348 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24349 #ifdef TARGET_RELOCATABLE
24350 || (DEFAULT_ABI == ABI_V4
24351 && (TARGET_RELOCATABLE || flag_pic > 1)
24352 && !constant_pool_empty_p ())
24353 #endif
24354 || rs6000_ra_ever_killed ())
24355 info->lr_save_p = 1;
24356
24357 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24358 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24359 && call_used_regs[STATIC_CHAIN_REGNUM]);
24360 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24361
24362 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24363 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24364 || !(info->savres_strategy & SAVE_INLINE_VRS)
24365 || !(info->savres_strategy & REST_INLINE_GPRS)
24366 || !(info->savres_strategy & REST_INLINE_FPRS)
24367 || !(info->savres_strategy & REST_INLINE_VRS))
24368 info->lr_save_p = 1;
24369
24370 if (info->lr_save_p)
24371 df_set_regs_ever_live (LR_REGNO, true);
24372
24373 /* Determine if we need to allocate any stack frame:
24374
24375 For AIX we need to push the stack if a frame pointer is needed
24376 (because the stack might be dynamically adjusted), if we are
24377 debugging, if we make calls, or if the sum of fp_save, gp_save,
24378 and local variables are more than the space needed to save all
24379 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24380 + 18*8 = 288 (GPR13 reserved).
24381
24382 For V.4 we don't have the stack cushion that AIX uses, but assume
24383 that the debugger can handle stackless frames. */
24384
24385 if (info->calls_p)
24386 info->push_p = 1;
24387
24388 else if (DEFAULT_ABI == ABI_V4)
24389 info->push_p = non_fixed_size != 0;
24390
24391 else if (frame_pointer_needed)
24392 info->push_p = 1;
24393
24394 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24395 info->push_p = 1;
24396
24397 else
24398 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24399
24400 return info;
24401 }
24402
24403 static void
24404 debug_stack_info (rs6000_stack_t *info)
24405 {
24406 const char *abi_string;
24407
24408 if (! info)
24409 info = rs6000_stack_info ();
24410
24411 fprintf (stderr, "\nStack information for function %s:\n",
24412 ((current_function_decl && DECL_NAME (current_function_decl))
24413 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24414 : "<unknown>"));
24415
24416 switch (info->abi)
24417 {
24418 default: abi_string = "Unknown"; break;
24419 case ABI_NONE: abi_string = "NONE"; break;
24420 case ABI_AIX: abi_string = "AIX"; break;
24421 case ABI_ELFv2: abi_string = "ELFv2"; break;
24422 case ABI_DARWIN: abi_string = "Darwin"; break;
24423 case ABI_V4: abi_string = "V.4"; break;
24424 }
24425
24426 fprintf (stderr, "\tABI = %5s\n", abi_string);
24427
24428 if (TARGET_ALTIVEC_ABI)
24429 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24430
24431 if (info->first_gp_reg_save != 32)
24432 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24433
24434 if (info->first_fp_reg_save != 64)
24435 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24436
24437 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24438 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24439 info->first_altivec_reg_save);
24440
24441 if (info->lr_save_p)
24442 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24443
24444 if (info->cr_save_p)
24445 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24446
24447 if (info->vrsave_mask)
24448 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24449
24450 if (info->push_p)
24451 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24452
24453 if (info->calls_p)
24454 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24455
24456 if (info->gp_size)
24457 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24458
24459 if (info->fp_size)
24460 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24461
24462 if (info->altivec_size)
24463 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24464 info->altivec_save_offset);
24465
24466 if (info->vrsave_size)
24467 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24468 info->vrsave_save_offset);
24469
24470 if (info->lr_save_p)
24471 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24472
24473 if (info->cr_save_p)
24474 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24475
24476 if (info->varargs_save_offset)
24477 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24478
24479 if (info->total_size)
24480 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24481 info->total_size);
24482
24483 if (info->vars_size)
24484 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24485 info->vars_size);
24486
24487 if (info->parm_size)
24488 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24489
24490 if (info->fixed_size)
24491 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24492
24493 if (info->gp_size)
24494 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24495
24496 if (info->fp_size)
24497 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24498
24499 if (info->altivec_size)
24500 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24501
24502 if (info->vrsave_size)
24503 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24504
24505 if (info->altivec_padding_size)
24506 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24507 info->altivec_padding_size);
24508
24509 if (info->cr_size)
24510 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24511
24512 if (info->save_size)
24513 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24514
24515 if (info->reg_size != 4)
24516 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24517
24518 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24519
24520 fprintf (stderr, "\n");
24521 }
24522
24523 rtx
24524 rs6000_return_addr (int count, rtx frame)
24525 {
24526 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24527 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24528 if (count != 0
24529 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24530 {
24531 cfun->machine->ra_needs_full_frame = 1;
24532
24533 if (count == 0)
24534 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24535 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24536 frame = stack_pointer_rtx;
24537 rtx prev_frame_addr = memory_address (Pmode, frame);
24538 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24539 rtx lr_save_off = plus_constant (Pmode,
24540 prev_frame, RETURN_ADDRESS_OFFSET);
24541 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24542 return gen_rtx_MEM (Pmode, lr_save_addr);
24543 }
24544
24545 cfun->machine->ra_need_lr = 1;
24546 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24547 }
24548
24549 /* Say whether a function is a candidate for sibcall handling or not. */
24550
24551 static bool
24552 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24553 {
24554 tree fntype;
24555
24556 /* The sibcall epilogue may clobber the static chain register.
24557 ??? We could work harder and avoid that, but it's probably
24558 not worth the hassle in practice. */
24559 if (CALL_EXPR_STATIC_CHAIN (exp))
24560 return false;
24561
24562 if (decl)
24563 fntype = TREE_TYPE (decl);
24564 else
24565 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24566
24567 /* We can't do it if the called function has more vector parameters
24568 than the current function; there's nowhere to put the VRsave code. */
24569 if (TARGET_ALTIVEC_ABI
24570 && TARGET_ALTIVEC_VRSAVE
24571 && !(decl && decl == current_function_decl))
24572 {
24573 function_args_iterator args_iter;
24574 tree type;
24575 int nvreg = 0;
24576
24577 /* Functions with vector parameters are required to have a
24578 prototype, so the argument type info must be available
24579 here. */
24580 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24581 if (TREE_CODE (type) == VECTOR_TYPE
24582 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24583 nvreg++;
24584
24585 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24586 if (TREE_CODE (type) == VECTOR_TYPE
24587 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24588 nvreg--;
24589
24590 if (nvreg > 0)
24591 return false;
24592 }
24593
24594 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24595 functions, because the callee may have a different TOC pointer to
24596 the caller and there's no way to ensure we restore the TOC when
24597 we return. With the secure-plt SYSV ABI we can't make non-local
24598 calls when -fpic/PIC because the plt call stubs use r30. */
24599 if (DEFAULT_ABI == ABI_DARWIN
24600 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24601 && decl
24602 && !DECL_EXTERNAL (decl)
24603 && !DECL_WEAK (decl)
24604 && (*targetm.binds_local_p) (decl))
24605 || (DEFAULT_ABI == ABI_V4
24606 && (!TARGET_SECURE_PLT
24607 || !flag_pic
24608 || (decl
24609 && (*targetm.binds_local_p) (decl)))))
24610 {
24611 tree attr_list = TYPE_ATTRIBUTES (fntype);
24612
24613 if (!lookup_attribute ("longcall", attr_list)
24614 || lookup_attribute ("shortcall", attr_list))
24615 return true;
24616 }
24617
24618 return false;
24619 }
24620
24621 static int
24622 rs6000_ra_ever_killed (void)
24623 {
24624 rtx_insn *top;
24625 rtx reg;
24626 rtx_insn *insn;
24627
24628 if (cfun->is_thunk)
24629 return 0;
24630
24631 if (cfun->machine->lr_save_state)
24632 return cfun->machine->lr_save_state - 1;
24633
24634 /* regs_ever_live has LR marked as used if any sibcalls are present,
24635 but this should not force saving and restoring in the
24636 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24637 clobbers LR, so that is inappropriate. */
24638
24639 /* Also, the prologue can generate a store into LR that
24640 doesn't really count, like this:
24641
24642 move LR->R0
24643 bcl to set PIC register
24644 move LR->R31
24645 move R0->LR
24646
24647 When we're called from the epilogue, we need to avoid counting
24648 this as a store. */
24649
24650 push_topmost_sequence ();
24651 top = get_insns ();
24652 pop_topmost_sequence ();
24653 reg = gen_rtx_REG (Pmode, LR_REGNO);
24654
24655 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24656 {
24657 if (INSN_P (insn))
24658 {
24659 if (CALL_P (insn))
24660 {
24661 if (!SIBLING_CALL_P (insn))
24662 return 1;
24663 }
24664 else if (find_regno_note (insn, REG_INC, LR_REGNO))
24665 return 1;
24666 else if (set_of (reg, insn) != NULL_RTX
24667 && !prologue_epilogue_contains (insn))
24668 return 1;
24669 }
24670 }
24671 return 0;
24672 }
24673 \f
24674 /* Emit instructions needed to load the TOC register.
24675 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24676 a constant pool; or for SVR4 -fpic. */
24677
24678 void
24679 rs6000_emit_load_toc_table (int fromprolog)
24680 {
24681 rtx dest;
24682 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
24683
24684 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
24685 {
24686 char buf[30];
24687 rtx lab, tmp1, tmp2, got;
24688
24689 lab = gen_label_rtx ();
24690 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
24691 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24692 if (flag_pic == 2)
24693 {
24694 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24695 need_toc_init = 1;
24696 }
24697 else
24698 got = rs6000_got_sym ();
24699 tmp1 = tmp2 = dest;
24700 if (!fromprolog)
24701 {
24702 tmp1 = gen_reg_rtx (Pmode);
24703 tmp2 = gen_reg_rtx (Pmode);
24704 }
24705 emit_insn (gen_load_toc_v4_PIC_1 (lab));
24706 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
24707 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
24708 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
24709 }
24710 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
24711 {
24712 emit_insn (gen_load_toc_v4_pic_si ());
24713 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24714 }
24715 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
24716 {
24717 char buf[30];
24718 rtx temp0 = (fromprolog
24719 ? gen_rtx_REG (Pmode, 0)
24720 : gen_reg_rtx (Pmode));
24721
24722 if (fromprolog)
24723 {
24724 rtx symF, symL;
24725
24726 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
24727 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24728
24729 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
24730 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24731
24732 emit_insn (gen_load_toc_v4_PIC_1 (symF));
24733 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24734 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
24735 }
24736 else
24737 {
24738 rtx tocsym, lab;
24739
24740 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24741 need_toc_init = 1;
24742 lab = gen_label_rtx ();
24743 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
24744 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24745 if (TARGET_LINK_STACK)
24746 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
24747 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
24748 }
24749 emit_insn (gen_addsi3 (dest, temp0, dest));
24750 }
24751 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
24752 {
24753 /* This is for AIX code running in non-PIC ELF32. */
24754 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24755
24756 need_toc_init = 1;
24757 emit_insn (gen_elf_high (dest, realsym));
24758 emit_insn (gen_elf_low (dest, dest, realsym));
24759 }
24760 else
24761 {
24762 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24763
24764 if (TARGET_32BIT)
24765 emit_insn (gen_load_toc_aix_si (dest));
24766 else
24767 emit_insn (gen_load_toc_aix_di (dest));
24768 }
24769 }
24770
24771 /* Emit instructions to restore the link register after determining where
24772 its value has been stored. */
24773
24774 void
24775 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
24776 {
24777 rs6000_stack_t *info = rs6000_stack_info ();
24778 rtx operands[2];
24779
24780 operands[0] = source;
24781 operands[1] = scratch;
24782
24783 if (info->lr_save_p)
24784 {
24785 rtx frame_rtx = stack_pointer_rtx;
24786 HOST_WIDE_INT sp_offset = 0;
24787 rtx tmp;
24788
24789 if (frame_pointer_needed
24790 || cfun->calls_alloca
24791 || info->total_size > 32767)
24792 {
24793 tmp = gen_frame_mem (Pmode, frame_rtx);
24794 emit_move_insn (operands[1], tmp);
24795 frame_rtx = operands[1];
24796 }
24797 else if (info->push_p)
24798 sp_offset = info->total_size;
24799
24800 tmp = plus_constant (Pmode, frame_rtx,
24801 info->lr_save_offset + sp_offset);
24802 tmp = gen_frame_mem (Pmode, tmp);
24803 emit_move_insn (tmp, operands[0]);
24804 }
24805 else
24806 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
24807
24808 /* Freeze lr_save_p. We've just emitted rtl that depends on the
24809 state of lr_save_p so any change from here on would be a bug. In
24810 particular, stop rs6000_ra_ever_killed from considering the SET
24811 of lr we may have added just above. */
24812 cfun->machine->lr_save_state = info->lr_save_p + 1;
24813 }
24814
24815 static GTY(()) alias_set_type set = -1;
24816
24817 alias_set_type
24818 get_TOC_alias_set (void)
24819 {
24820 if (set == -1)
24821 set = new_alias_set ();
24822 return set;
24823 }
24824
24825 /* This returns nonzero if the current function uses the TOC. This is
24826 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
24827 is generated by the ABI_V4 load_toc_* patterns.
24828 Return 2 instead of 1 if the load_toc_* pattern is in the function
24829 partition that doesn't start the function. */
24830 #if TARGET_ELF
24831 static int
24832 uses_TOC (void)
24833 {
24834 rtx_insn *insn;
24835 int ret = 1;
24836
24837 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
24838 {
24839 if (INSN_P (insn))
24840 {
24841 rtx pat = PATTERN (insn);
24842 int i;
24843
24844 if (GET_CODE (pat) == PARALLEL)
24845 for (i = 0; i < XVECLEN (pat, 0); i++)
24846 {
24847 rtx sub = XVECEXP (pat, 0, i);
24848 if (GET_CODE (sub) == USE)
24849 {
24850 sub = XEXP (sub, 0);
24851 if (GET_CODE (sub) == UNSPEC
24852 && XINT (sub, 1) == UNSPEC_TOC)
24853 return ret;
24854 }
24855 }
24856 }
24857 else if (crtl->has_bb_partition
24858 && NOTE_P (insn)
24859 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
24860 ret = 2;
24861 }
24862 return 0;
24863 }
24864 #endif
24865
24866 rtx
24867 create_TOC_reference (rtx symbol, rtx largetoc_reg)
24868 {
24869 rtx tocrel, tocreg, hi;
24870
24871 if (TARGET_DEBUG_ADDR)
24872 {
24873 if (GET_CODE (symbol) == SYMBOL_REF)
24874 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
24875 XSTR (symbol, 0));
24876 else
24877 {
24878 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
24879 GET_RTX_NAME (GET_CODE (symbol)));
24880 debug_rtx (symbol);
24881 }
24882 }
24883
24884 if (!can_create_pseudo_p ())
24885 df_set_regs_ever_live (TOC_REGISTER, true);
24886
24887 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
24888 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
24889 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
24890 return tocrel;
24891
24892 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
24893 if (largetoc_reg != NULL)
24894 {
24895 emit_move_insn (largetoc_reg, hi);
24896 hi = largetoc_reg;
24897 }
24898 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
24899 }
24900
24901 /* Issue assembly directives that create a reference to the given DWARF
24902 FRAME_TABLE_LABEL from the current function section. */
24903 void
24904 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
24905 {
24906 fprintf (asm_out_file, "\t.ref %s\n",
24907 (* targetm.strip_name_encoding) (frame_table_label));
24908 }
24909 \f
24910 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
24911 and the change to the stack pointer. */
24912
24913 static void
24914 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
24915 {
24916 rtvec p;
24917 int i;
24918 rtx regs[3];
24919
24920 i = 0;
24921 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
24922 if (hard_frame_needed)
24923 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
24924 if (!(REGNO (fp) == STACK_POINTER_REGNUM
24925 || (hard_frame_needed
24926 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
24927 regs[i++] = fp;
24928
24929 p = rtvec_alloc (i);
24930 while (--i >= 0)
24931 {
24932 rtx mem = gen_frame_mem (BLKmode, regs[i]);
24933 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
24934 }
24935
24936 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
24937 }
24938
24939 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
24940 and set the appropriate attributes for the generated insn. Return the
24941 first insn which adjusts the stack pointer or the last insn before
24942 the stack adjustment loop.
24943
24944 SIZE_INT is used to create the CFI note for the allocation.
24945
24946 SIZE_RTX is an rtx containing the size of the adjustment. Note that
24947 since stacks grow to lower addresses its runtime value is -SIZE_INT.
24948
24949 ORIG_SP contains the backchain value that must be stored at *sp. */
24950
24951 static rtx_insn *
24952 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
24953 {
24954 rtx_insn *insn;
24955
24956 rtx size_rtx = GEN_INT (-size_int);
24957 if (size_int > 32767)
24958 {
24959 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
24960 /* Need a note here so that try_split doesn't get confused. */
24961 if (get_last_insn () == NULL_RTX)
24962 emit_note (NOTE_INSN_DELETED);
24963 insn = emit_move_insn (tmp_reg, size_rtx);
24964 try_split (PATTERN (insn), insn, 0);
24965 size_rtx = tmp_reg;
24966 }
24967
24968 if (Pmode == SImode)
24969 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
24970 stack_pointer_rtx,
24971 size_rtx,
24972 orig_sp));
24973 else
24974 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
24975 stack_pointer_rtx,
24976 size_rtx,
24977 orig_sp));
24978 rtx par = PATTERN (insn);
24979 gcc_assert (GET_CODE (par) == PARALLEL);
24980 rtx set = XVECEXP (par, 0, 0);
24981 gcc_assert (GET_CODE (set) == SET);
24982 rtx mem = SET_DEST (set);
24983 gcc_assert (MEM_P (mem));
24984 MEM_NOTRAP_P (mem) = 1;
24985 set_mem_alias_set (mem, get_frame_alias_set ());
24986
24987 RTX_FRAME_RELATED_P (insn) = 1;
24988 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
24989 gen_rtx_SET (stack_pointer_rtx,
24990 gen_rtx_PLUS (Pmode,
24991 stack_pointer_rtx,
24992 GEN_INT (-size_int))));
24993
24994 /* Emit a blockage to ensure the allocation/probing insns are
24995 not optimized, combined, removed, etc. Add REG_STACK_CHECK
24996 note for similar reasons. */
24997 if (flag_stack_clash_protection)
24998 {
24999 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25000 emit_insn (gen_blockage ());
25001 }
25002
25003 return insn;
25004 }
25005
25006 static HOST_WIDE_INT
25007 get_stack_clash_protection_probe_interval (void)
25008 {
25009 return (HOST_WIDE_INT_1U
25010 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25011 }
25012
25013 static HOST_WIDE_INT
25014 get_stack_clash_protection_guard_size (void)
25015 {
25016 return (HOST_WIDE_INT_1U
25017 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25018 }
25019
25020 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25021 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25022
25023 COPY_REG, if non-null, should contain a copy of the original
25024 stack pointer at exit from this function.
25025
25026 This is subtly different than the Ada probing in that it tries hard to
25027 prevent attacks that jump the stack guard. Thus it is never allowed to
25028 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25029 space without a suitable probe. */
25030 static rtx_insn *
25031 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25032 rtx copy_reg)
25033 {
25034 rtx orig_sp = copy_reg;
25035
25036 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25037
25038 /* Round the size down to a multiple of PROBE_INTERVAL. */
25039 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25040
25041 /* If explicitly requested,
25042 or the rounded size is not the same as the original size
25043 or the the rounded size is greater than a page,
25044 then we will need a copy of the original stack pointer. */
25045 if (rounded_size != orig_size
25046 || rounded_size > probe_interval
25047 || copy_reg)
25048 {
25049 /* If the caller did not request a copy of the incoming stack
25050 pointer, then we use r0 to hold the copy. */
25051 if (!copy_reg)
25052 orig_sp = gen_rtx_REG (Pmode, 0);
25053 emit_move_insn (orig_sp, stack_pointer_rtx);
25054 }
25055
25056 /* There's three cases here.
25057
25058 One is a single probe which is the most common and most efficiently
25059 implemented as it does not have to have a copy of the original
25060 stack pointer if there are no residuals.
25061
25062 Second is unrolled allocation/probes which we use if there's just
25063 a few of them. It needs to save the original stack pointer into a
25064 temporary for use as a source register in the allocation/probe.
25065
25066 Last is a loop. This is the most uncommon case and least efficient. */
25067 rtx_insn *retval = NULL;
25068 if (rounded_size == probe_interval)
25069 {
25070 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25071
25072 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25073 }
25074 else if (rounded_size <= 8 * probe_interval)
25075 {
25076 /* The ABI requires using the store with update insns to allocate
25077 space and store the backchain into the stack
25078
25079 So we save the current stack pointer into a temporary, then
25080 emit the store-with-update insns to store the saved stack pointer
25081 into the right location in each new page. */
25082 for (int i = 0; i < rounded_size; i += probe_interval)
25083 {
25084 rtx_insn *insn
25085 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25086
25087 /* Save the first stack adjustment in RETVAL. */
25088 if (i == 0)
25089 retval = insn;
25090 }
25091
25092 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25093 }
25094 else
25095 {
25096 /* Compute the ending address. */
25097 rtx end_addr
25098 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25099 rtx rs = GEN_INT (-rounded_size);
25100 rtx_insn *insn;
25101 if (add_operand (rs, Pmode))
25102 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25103 else
25104 {
25105 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25106 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25107 stack_pointer_rtx));
25108 /* Describe the effect of INSN to the CFI engine. */
25109 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25110 gen_rtx_SET (end_addr,
25111 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25112 rs)));
25113 }
25114 RTX_FRAME_RELATED_P (insn) = 1;
25115
25116 /* Emit the loop. */
25117 if (TARGET_64BIT)
25118 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25119 stack_pointer_rtx, orig_sp,
25120 end_addr));
25121 else
25122 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25123 stack_pointer_rtx, orig_sp,
25124 end_addr));
25125 RTX_FRAME_RELATED_P (retval) = 1;
25126 /* Describe the effect of INSN to the CFI engine. */
25127 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25128 gen_rtx_SET (stack_pointer_rtx, end_addr));
25129
25130 /* Emit a blockage to ensure the allocation/probing insns are
25131 not optimized, combined, removed, etc. Other cases handle this
25132 within their call to rs6000_emit_allocate_stack_1. */
25133 emit_insn (gen_blockage ());
25134
25135 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25136 }
25137
25138 if (orig_size != rounded_size)
25139 {
25140 /* Allocate (and implicitly probe) any residual space. */
25141 HOST_WIDE_INT residual = orig_size - rounded_size;
25142
25143 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25144
25145 /* If the residual was the only allocation, then we can return the
25146 allocating insn. */
25147 if (!retval)
25148 retval = insn;
25149 }
25150
25151 return retval;
25152 }
25153
25154 /* Emit the correct code for allocating stack space, as insns.
25155 If COPY_REG, make sure a copy of the old frame is left there.
25156 The generated code may use hard register 0 as a temporary. */
25157
25158 static rtx_insn *
25159 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25160 {
25161 rtx_insn *insn;
25162 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25163 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25164 rtx todec = gen_int_mode (-size, Pmode);
25165
25166 if (INTVAL (todec) != -size)
25167 {
25168 warning (0, "stack frame too large");
25169 emit_insn (gen_trap ());
25170 return 0;
25171 }
25172
25173 if (crtl->limit_stack)
25174 {
25175 if (REG_P (stack_limit_rtx)
25176 && REGNO (stack_limit_rtx) > 1
25177 && REGNO (stack_limit_rtx) <= 31)
25178 {
25179 rtx_insn *insn
25180 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25181 gcc_assert (insn);
25182 emit_insn (insn);
25183 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25184 }
25185 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25186 && TARGET_32BIT
25187 && DEFAULT_ABI == ABI_V4
25188 && !flag_pic)
25189 {
25190 rtx toload = gen_rtx_CONST (VOIDmode,
25191 gen_rtx_PLUS (Pmode,
25192 stack_limit_rtx,
25193 GEN_INT (size)));
25194
25195 emit_insn (gen_elf_high (tmp_reg, toload));
25196 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25197 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25198 const0_rtx));
25199 }
25200 else
25201 warning (0, "stack limit expression is not supported");
25202 }
25203
25204 if (flag_stack_clash_protection)
25205 {
25206 if (size < get_stack_clash_protection_guard_size ())
25207 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25208 else
25209 {
25210 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25211 copy_reg);
25212
25213 /* If we asked for a copy with an offset, then we still need add in
25214 the offset. */
25215 if (copy_reg && copy_off)
25216 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25217 return insn;
25218 }
25219 }
25220
25221 if (copy_reg)
25222 {
25223 if (copy_off != 0)
25224 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25225 else
25226 emit_move_insn (copy_reg, stack_reg);
25227 }
25228
25229 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25230 it now and set the alias set/attributes. The above gen_*_update
25231 calls will generate a PARALLEL with the MEM set being the first
25232 operation. */
25233 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25234 return insn;
25235 }
25236
25237 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25238
25239 #if PROBE_INTERVAL > 32768
25240 #error Cannot use indexed addressing mode for stack probing
25241 #endif
25242
25243 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25244 inclusive. These are offsets from the current stack pointer. */
25245
25246 static void
25247 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25248 {
25249 /* See if we have a constant small number of probes to generate. If so,
25250 that's the easy case. */
25251 if (first + size <= 32768)
25252 {
25253 HOST_WIDE_INT i;
25254
25255 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25256 it exceeds SIZE. If only one probe is needed, this will not
25257 generate any code. Then probe at FIRST + SIZE. */
25258 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25259 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25260 -(first + i)));
25261
25262 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25263 -(first + size)));
25264 }
25265
25266 /* Otherwise, do the same as above, but in a loop. Note that we must be
25267 extra careful with variables wrapping around because we might be at
25268 the very top (or the very bottom) of the address space and we have
25269 to be able to handle this case properly; in particular, we use an
25270 equality test for the loop condition. */
25271 else
25272 {
25273 HOST_WIDE_INT rounded_size;
25274 rtx r12 = gen_rtx_REG (Pmode, 12);
25275 rtx r0 = gen_rtx_REG (Pmode, 0);
25276
25277 /* Sanity check for the addressing mode we're going to use. */
25278 gcc_assert (first <= 32768);
25279
25280 /* Step 1: round SIZE to the previous multiple of the interval. */
25281
25282 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25283
25284
25285 /* Step 2: compute initial and final value of the loop counter. */
25286
25287 /* TEST_ADDR = SP + FIRST. */
25288 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25289 -first)));
25290
25291 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25292 if (rounded_size > 32768)
25293 {
25294 emit_move_insn (r0, GEN_INT (-rounded_size));
25295 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25296 }
25297 else
25298 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25299 -rounded_size)));
25300
25301
25302 /* Step 3: the loop
25303
25304 do
25305 {
25306 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25307 probe at TEST_ADDR
25308 }
25309 while (TEST_ADDR != LAST_ADDR)
25310
25311 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25312 until it is equal to ROUNDED_SIZE. */
25313
25314 if (TARGET_64BIT)
25315 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25316 else
25317 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25318
25319
25320 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25321 that SIZE is equal to ROUNDED_SIZE. */
25322
25323 if (size != rounded_size)
25324 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25325 }
25326 }
25327
25328 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25329 addresses, not offsets. */
25330
25331 static const char *
25332 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25333 {
25334 static int labelno = 0;
25335 char loop_lab[32];
25336 rtx xops[2];
25337
25338 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25339
25340 /* Loop. */
25341 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25342
25343 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25344 xops[0] = reg1;
25345 xops[1] = GEN_INT (-PROBE_INTERVAL);
25346 output_asm_insn ("addi %0,%0,%1", xops);
25347
25348 /* Probe at TEST_ADDR. */
25349 xops[1] = gen_rtx_REG (Pmode, 0);
25350 output_asm_insn ("stw %1,0(%0)", xops);
25351
25352 /* Test if TEST_ADDR == LAST_ADDR. */
25353 xops[1] = reg2;
25354 if (TARGET_64BIT)
25355 output_asm_insn ("cmpd 0,%0,%1", xops);
25356 else
25357 output_asm_insn ("cmpw 0,%0,%1", xops);
25358
25359 /* Branch. */
25360 fputs ("\tbne 0,", asm_out_file);
25361 assemble_name_raw (asm_out_file, loop_lab);
25362 fputc ('\n', asm_out_file);
25363
25364 return "";
25365 }
25366
25367 /* This function is called when rs6000_frame_related is processing
25368 SETs within a PARALLEL, and returns whether the REGNO save ought to
25369 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25370 for out-of-line register save functions, store multiple, and the
25371 Darwin world_save. They may contain registers that don't really
25372 need saving. */
25373
25374 static bool
25375 interesting_frame_related_regno (unsigned int regno)
25376 {
25377 /* Saves apparently of r0 are actually saving LR. It doesn't make
25378 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25379 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25380 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25381 as frame related. */
25382 if (regno == 0)
25383 return true;
25384 /* If we see CR2 then we are here on a Darwin world save. Saves of
25385 CR2 signify the whole CR is being saved. This is a long-standing
25386 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25387 that CR needs to be saved. */
25388 if (regno == CR2_REGNO)
25389 return true;
25390 /* Omit frame info for any user-defined global regs. If frame info
25391 is supplied for them, frame unwinding will restore a user reg.
25392 Also omit frame info for any reg we don't need to save, as that
25393 bloats frame info and can cause problems with shrink wrapping.
25394 Since global regs won't be seen as needing to be saved, both of
25395 these conditions are covered by save_reg_p. */
25396 return save_reg_p (regno);
25397 }
25398
25399 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25400 addresses, not offsets.
25401
25402 REG2 contains the backchain that must be stored into *sp at each allocation.
25403
25404 This is subtly different than the Ada probing above in that it tries hard
25405 to prevent attacks that jump the stack guard. Thus, it is never allowed
25406 to allocate more than PROBE_INTERVAL bytes of stack space without a
25407 suitable probe. */
25408
25409 static const char *
25410 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25411 {
25412 static int labelno = 0;
25413 char loop_lab[32];
25414 rtx xops[3];
25415
25416 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25417
25418 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25419
25420 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25421
25422 /* This allocates and probes. */
25423 xops[0] = reg1;
25424 xops[1] = reg2;
25425 xops[2] = GEN_INT (-probe_interval);
25426 if (TARGET_64BIT)
25427 output_asm_insn ("stdu %1,%2(%0)", xops);
25428 else
25429 output_asm_insn ("stwu %1,%2(%0)", xops);
25430
25431 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25432 xops[0] = reg1;
25433 xops[1] = reg3;
25434 if (TARGET_64BIT)
25435 output_asm_insn ("cmpd 0,%0,%1", xops);
25436 else
25437 output_asm_insn ("cmpw 0,%0,%1", xops);
25438
25439 fputs ("\tbne 0,", asm_out_file);
25440 assemble_name_raw (asm_out_file, loop_lab);
25441 fputc ('\n', asm_out_file);
25442
25443 return "";
25444 }
25445
25446 /* Wrapper around the output_probe_stack_range routines. */
25447 const char *
25448 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25449 {
25450 if (flag_stack_clash_protection)
25451 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25452 else
25453 return output_probe_stack_range_1 (reg1, reg3);
25454 }
25455
25456 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25457 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25458 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25459 deduce these equivalences by itself so it wasn't necessary to hold
25460 its hand so much. Don't be tempted to always supply d2_f_d_e with
25461 the actual cfa register, ie. r31 when we are using a hard frame
25462 pointer. That fails when saving regs off r1, and sched moves the
25463 r31 setup past the reg saves. */
25464
25465 static rtx_insn *
25466 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25467 rtx reg2, rtx repl2)
25468 {
25469 rtx repl;
25470
25471 if (REGNO (reg) == STACK_POINTER_REGNUM)
25472 {
25473 gcc_checking_assert (val == 0);
25474 repl = NULL_RTX;
25475 }
25476 else
25477 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25478 GEN_INT (val));
25479
25480 rtx pat = PATTERN (insn);
25481 if (!repl && !reg2)
25482 {
25483 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25484 if (GET_CODE (pat) == PARALLEL)
25485 for (int i = 0; i < XVECLEN (pat, 0); i++)
25486 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25487 {
25488 rtx set = XVECEXP (pat, 0, i);
25489
25490 if (!REG_P (SET_SRC (set))
25491 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25492 RTX_FRAME_RELATED_P (set) = 1;
25493 }
25494 RTX_FRAME_RELATED_P (insn) = 1;
25495 return insn;
25496 }
25497
25498 /* We expect that 'pat' is either a SET or a PARALLEL containing
25499 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25500 are important so they all have to be marked RTX_FRAME_RELATED_P.
25501 Call simplify_replace_rtx on the SETs rather than the whole insn
25502 so as to leave the other stuff alone (for example USE of r12). */
25503
25504 set_used_flags (pat);
25505 if (GET_CODE (pat) == SET)
25506 {
25507 if (repl)
25508 pat = simplify_replace_rtx (pat, reg, repl);
25509 if (reg2)
25510 pat = simplify_replace_rtx (pat, reg2, repl2);
25511 }
25512 else if (GET_CODE (pat) == PARALLEL)
25513 {
25514 pat = shallow_copy_rtx (pat);
25515 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25516
25517 for (int i = 0; i < XVECLEN (pat, 0); i++)
25518 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25519 {
25520 rtx set = XVECEXP (pat, 0, i);
25521
25522 if (repl)
25523 set = simplify_replace_rtx (set, reg, repl);
25524 if (reg2)
25525 set = simplify_replace_rtx (set, reg2, repl2);
25526 XVECEXP (pat, 0, i) = set;
25527
25528 if (!REG_P (SET_SRC (set))
25529 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25530 RTX_FRAME_RELATED_P (set) = 1;
25531 }
25532 }
25533 else
25534 gcc_unreachable ();
25535
25536 RTX_FRAME_RELATED_P (insn) = 1;
25537 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25538
25539 return insn;
25540 }
25541
25542 /* Returns an insn that has a vrsave set operation with the
25543 appropriate CLOBBERs. */
25544
25545 static rtx
25546 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25547 {
25548 int nclobs, i;
25549 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25550 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25551
25552 clobs[0]
25553 = gen_rtx_SET (vrsave,
25554 gen_rtx_UNSPEC_VOLATILE (SImode,
25555 gen_rtvec (2, reg, vrsave),
25556 UNSPECV_SET_VRSAVE));
25557
25558 nclobs = 1;
25559
25560 /* We need to clobber the registers in the mask so the scheduler
25561 does not move sets to VRSAVE before sets of AltiVec registers.
25562
25563 However, if the function receives nonlocal gotos, reload will set
25564 all call saved registers live. We will end up with:
25565
25566 (set (reg 999) (mem))
25567 (parallel [ (set (reg vrsave) (unspec blah))
25568 (clobber (reg 999))])
25569
25570 The clobber will cause the store into reg 999 to be dead, and
25571 flow will attempt to delete an epilogue insn. In this case, we
25572 need an unspec use/set of the register. */
25573
25574 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25575 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25576 {
25577 if (!epiloguep || call_used_regs [i])
25578 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25579 gen_rtx_REG (V4SImode, i));
25580 else
25581 {
25582 rtx reg = gen_rtx_REG (V4SImode, i);
25583
25584 clobs[nclobs++]
25585 = gen_rtx_SET (reg,
25586 gen_rtx_UNSPEC (V4SImode,
25587 gen_rtvec (1, reg), 27));
25588 }
25589 }
25590
25591 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25592
25593 for (i = 0; i < nclobs; ++i)
25594 XVECEXP (insn, 0, i) = clobs[i];
25595
25596 return insn;
25597 }
25598
25599 static rtx
25600 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25601 {
25602 rtx addr, mem;
25603
25604 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25605 mem = gen_frame_mem (GET_MODE (reg), addr);
25606 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25607 }
25608
25609 static rtx
25610 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25611 {
25612 return gen_frame_set (reg, frame_reg, offset, false);
25613 }
25614
25615 static rtx
25616 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25617 {
25618 return gen_frame_set (reg, frame_reg, offset, true);
25619 }
25620
25621 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25622 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25623
25624 static rtx_insn *
25625 emit_frame_save (rtx frame_reg, machine_mode mode,
25626 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25627 {
25628 rtx reg;
25629
25630 /* Some cases that need register indexed addressing. */
25631 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25632 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25633
25634 reg = gen_rtx_REG (mode, regno);
25635 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25636 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25637 NULL_RTX, NULL_RTX);
25638 }
25639
25640 /* Emit an offset memory reference suitable for a frame store, while
25641 converting to a valid addressing mode. */
25642
25643 static rtx
25644 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25645 {
25646 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25647 }
25648
25649 #ifndef TARGET_FIX_AND_CONTINUE
25650 #define TARGET_FIX_AND_CONTINUE 0
25651 #endif
25652
25653 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25654 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25655 #define LAST_SAVRES_REGISTER 31
25656 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25657
25658 enum {
25659 SAVRES_LR = 0x1,
25660 SAVRES_SAVE = 0x2,
25661 SAVRES_REG = 0x0c,
25662 SAVRES_GPR = 0,
25663 SAVRES_FPR = 4,
25664 SAVRES_VR = 8
25665 };
25666
25667 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25668
25669 /* Temporary holding space for an out-of-line register save/restore
25670 routine name. */
25671 static char savres_routine_name[30];
25672
25673 /* Return the name for an out-of-line register save/restore routine.
25674 We are saving/restoring GPRs if GPR is true. */
25675
25676 static char *
25677 rs6000_savres_routine_name (int regno, int sel)
25678 {
25679 const char *prefix = "";
25680 const char *suffix = "";
25681
25682 /* Different targets are supposed to define
25683 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25684 routine name could be defined with:
25685
25686 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25687
25688 This is a nice idea in practice, but in reality, things are
25689 complicated in several ways:
25690
25691 - ELF targets have save/restore routines for GPRs.
25692
25693 - PPC64 ELF targets have routines for save/restore of GPRs that
25694 differ in what they do with the link register, so having a set
25695 prefix doesn't work. (We only use one of the save routines at
25696 the moment, though.)
25697
25698 - PPC32 elf targets have "exit" versions of the restore routines
25699 that restore the link register and can save some extra space.
25700 These require an extra suffix. (There are also "tail" versions
25701 of the restore routines and "GOT" versions of the save routines,
25702 but we don't generate those at present. Same problems apply,
25703 though.)
25704
25705 We deal with all this by synthesizing our own prefix/suffix and
25706 using that for the simple sprintf call shown above. */
25707 if (DEFAULT_ABI == ABI_V4)
25708 {
25709 if (TARGET_64BIT)
25710 goto aix_names;
25711
25712 if ((sel & SAVRES_REG) == SAVRES_GPR)
25713 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25714 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25715 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25716 else if ((sel & SAVRES_REG) == SAVRES_VR)
25717 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25718 else
25719 abort ();
25720
25721 if ((sel & SAVRES_LR))
25722 suffix = "_x";
25723 }
25724 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25725 {
25726 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25727 /* No out-of-line save/restore routines for GPRs on AIX. */
25728 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25729 #endif
25730
25731 aix_names:
25732 if ((sel & SAVRES_REG) == SAVRES_GPR)
25733 prefix = ((sel & SAVRES_SAVE)
25734 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25735 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25736 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25737 {
25738 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25739 if ((sel & SAVRES_LR))
25740 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25741 else
25742 #endif
25743 {
25744 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25745 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25746 }
25747 }
25748 else if ((sel & SAVRES_REG) == SAVRES_VR)
25749 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25750 else
25751 abort ();
25752 }
25753
25754 if (DEFAULT_ABI == ABI_DARWIN)
25755 {
25756 /* The Darwin approach is (slightly) different, in order to be
25757 compatible with code generated by the system toolchain. There is a
25758 single symbol for the start of save sequence, and the code here
25759 embeds an offset into that code on the basis of the first register
25760 to be saved. */
25761 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25762 if ((sel & SAVRES_REG) == SAVRES_GPR)
25763 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25764 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25765 (regno - 13) * 4, prefix, regno);
25766 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25767 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25768 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25769 else if ((sel & SAVRES_REG) == SAVRES_VR)
25770 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25771 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25772 else
25773 abort ();
25774 }
25775 else
25776 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25777
25778 return savres_routine_name;
25779 }
25780
25781 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25782 We are saving/restoring GPRs if GPR is true. */
25783
25784 static rtx
25785 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25786 {
25787 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25788 ? info->first_gp_reg_save
25789 : (sel & SAVRES_REG) == SAVRES_FPR
25790 ? info->first_fp_reg_save - 32
25791 : (sel & SAVRES_REG) == SAVRES_VR
25792 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25793 : -1);
25794 rtx sym;
25795 int select = sel;
25796
25797 /* Don't generate bogus routine names. */
25798 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25799 && regno <= LAST_SAVRES_REGISTER
25800 && select >= 0 && select <= 12);
25801
25802 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25803
25804 if (sym == NULL)
25805 {
25806 char *name;
25807
25808 name = rs6000_savres_routine_name (regno, sel);
25809
25810 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25811 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25812 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25813 }
25814
25815 return sym;
25816 }
25817
25818 /* Emit a sequence of insns, including a stack tie if needed, for
25819 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25820 reset the stack pointer, but move the base of the frame into
25821 reg UPDT_REGNO for use by out-of-line register restore routines. */
25822
25823 static rtx
25824 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25825 unsigned updt_regno)
25826 {
25827 /* If there is nothing to do, don't do anything. */
25828 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
25829 return NULL_RTX;
25830
25831 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
25832
25833 /* This blockage is needed so that sched doesn't decide to move
25834 the sp change before the register restores. */
25835 if (DEFAULT_ABI == ABI_V4)
25836 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
25837 GEN_INT (frame_off)));
25838
25839 /* If we are restoring registers out-of-line, we will be using the
25840 "exit" variants of the restore routines, which will reset the
25841 stack for us. But we do need to point updt_reg into the
25842 right place for those routines. */
25843 if (frame_off != 0)
25844 return emit_insn (gen_add3_insn (updt_reg_rtx,
25845 frame_reg_rtx, GEN_INT (frame_off)));
25846 else
25847 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
25848
25849 return NULL_RTX;
25850 }
25851
25852 /* Return the register number used as a pointer by out-of-line
25853 save/restore functions. */
25854
25855 static inline unsigned
25856 ptr_regno_for_savres (int sel)
25857 {
25858 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25859 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
25860 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
25861 }
25862
25863 /* Construct a parallel rtx describing the effect of a call to an
25864 out-of-line register save/restore routine, and emit the insn
25865 or jump_insn as appropriate. */
25866
25867 static rtx_insn *
25868 rs6000_emit_savres_rtx (rs6000_stack_t *info,
25869 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
25870 machine_mode reg_mode, int sel)
25871 {
25872 int i;
25873 int offset, start_reg, end_reg, n_regs, use_reg;
25874 int reg_size = GET_MODE_SIZE (reg_mode);
25875 rtx sym;
25876 rtvec p;
25877 rtx par;
25878 rtx_insn *insn;
25879
25880 offset = 0;
25881 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25882 ? info->first_gp_reg_save
25883 : (sel & SAVRES_REG) == SAVRES_FPR
25884 ? info->first_fp_reg_save
25885 : (sel & SAVRES_REG) == SAVRES_VR
25886 ? info->first_altivec_reg_save
25887 : -1);
25888 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25889 ? 32
25890 : (sel & SAVRES_REG) == SAVRES_FPR
25891 ? 64
25892 : (sel & SAVRES_REG) == SAVRES_VR
25893 ? LAST_ALTIVEC_REGNO + 1
25894 : -1);
25895 n_regs = end_reg - start_reg;
25896 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
25897 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
25898 + n_regs);
25899
25900 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25901 RTVEC_ELT (p, offset++) = ret_rtx;
25902
25903 RTVEC_ELT (p, offset++)
25904 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
25905
25906 sym = rs6000_savres_routine_sym (info, sel);
25907 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
25908
25909 use_reg = ptr_regno_for_savres (sel);
25910 if ((sel & SAVRES_REG) == SAVRES_VR)
25911 {
25912 /* Vector regs are saved/restored using [reg+reg] addressing. */
25913 RTVEC_ELT (p, offset++)
25914 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25915 RTVEC_ELT (p, offset++)
25916 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
25917 }
25918 else
25919 RTVEC_ELT (p, offset++)
25920 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25921
25922 for (i = 0; i < end_reg - start_reg; i++)
25923 RTVEC_ELT (p, i + offset)
25924 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
25925 frame_reg_rtx, save_area_offset + reg_size * i,
25926 (sel & SAVRES_SAVE) != 0);
25927
25928 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25929 RTVEC_ELT (p, i + offset)
25930 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
25931
25932 par = gen_rtx_PARALLEL (VOIDmode, p);
25933
25934 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25935 {
25936 insn = emit_jump_insn (par);
25937 JUMP_LABEL (insn) = ret_rtx;
25938 }
25939 else
25940 insn = emit_insn (par);
25941 return insn;
25942 }
25943
25944 /* Emit prologue code to store CR fields that need to be saved into REG. This
25945 function should only be called when moving the non-volatile CRs to REG, it
25946 is not a general purpose routine to move the entire set of CRs to REG.
25947 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
25948 volatile CRs. */
25949
25950 static void
25951 rs6000_emit_prologue_move_from_cr (rtx reg)
25952 {
25953 /* Only the ELFv2 ABI allows storing only selected fields. */
25954 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
25955 {
25956 int i, cr_reg[8], count = 0;
25957
25958 /* Collect CR fields that must be saved. */
25959 for (i = 0; i < 8; i++)
25960 if (save_reg_p (CR0_REGNO + i))
25961 cr_reg[count++] = i;
25962
25963 /* If it's just a single one, use mfcrf. */
25964 if (count == 1)
25965 {
25966 rtvec p = rtvec_alloc (1);
25967 rtvec r = rtvec_alloc (2);
25968 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
25969 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
25970 RTVEC_ELT (p, 0)
25971 = gen_rtx_SET (reg,
25972 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
25973
25974 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
25975 return;
25976 }
25977
25978 /* ??? It might be better to handle count == 2 / 3 cases here
25979 as well, using logical operations to combine the values. */
25980 }
25981
25982 emit_insn (gen_prologue_movesi_from_cr (reg));
25983 }
25984
25985 /* Return whether the split-stack arg pointer (r12) is used. */
25986
25987 static bool
25988 split_stack_arg_pointer_used_p (void)
25989 {
25990 /* If the pseudo holding the arg pointer is no longer a pseudo,
25991 then the arg pointer is used. */
25992 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
25993 && (!REG_P (cfun->machine->split_stack_arg_pointer)
25994 || (REGNO (cfun->machine->split_stack_arg_pointer)
25995 < FIRST_PSEUDO_REGISTER)))
25996 return true;
25997
25998 /* Unfortunately we also need to do some code scanning, since
25999 r12 may have been substituted for the pseudo. */
26000 rtx_insn *insn;
26001 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26002 FOR_BB_INSNS (bb, insn)
26003 if (NONDEBUG_INSN_P (insn))
26004 {
26005 /* A call destroys r12. */
26006 if (CALL_P (insn))
26007 return false;
26008
26009 df_ref use;
26010 FOR_EACH_INSN_USE (use, insn)
26011 {
26012 rtx x = DF_REF_REG (use);
26013 if (REG_P (x) && REGNO (x) == 12)
26014 return true;
26015 }
26016 df_ref def;
26017 FOR_EACH_INSN_DEF (def, insn)
26018 {
26019 rtx x = DF_REF_REG (def);
26020 if (REG_P (x) && REGNO (x) == 12)
26021 return false;
26022 }
26023 }
26024 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26025 }
26026
26027 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26028
26029 static bool
26030 rs6000_global_entry_point_needed_p (void)
26031 {
26032 /* Only needed for the ELFv2 ABI. */
26033 if (DEFAULT_ABI != ABI_ELFv2)
26034 return false;
26035
26036 /* With -msingle-pic-base, we assume the whole program shares the same
26037 TOC, so no global entry point prologues are needed anywhere. */
26038 if (TARGET_SINGLE_PIC_BASE)
26039 return false;
26040
26041 /* Ensure we have a global entry point for thunks. ??? We could
26042 avoid that if the target routine doesn't need a global entry point,
26043 but we do not know whether this is the case at this point. */
26044 if (cfun->is_thunk)
26045 return true;
26046
26047 /* For regular functions, rs6000_emit_prologue sets this flag if the
26048 routine ever uses the TOC pointer. */
26049 return cfun->machine->r2_setup_needed;
26050 }
26051
26052 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26053 static sbitmap
26054 rs6000_get_separate_components (void)
26055 {
26056 rs6000_stack_t *info = rs6000_stack_info ();
26057
26058 if (WORLD_SAVE_P (info))
26059 return NULL;
26060
26061 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26062 && !(info->savres_strategy & REST_MULTIPLE));
26063
26064 /* Component 0 is the save/restore of LR (done via GPR0).
26065 Component 2 is the save of the TOC (GPR2).
26066 Components 13..31 are the save/restore of GPR13..GPR31.
26067 Components 46..63 are the save/restore of FPR14..FPR31. */
26068
26069 cfun->machine->n_components = 64;
26070
26071 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26072 bitmap_clear (components);
26073
26074 int reg_size = TARGET_32BIT ? 4 : 8;
26075 int fp_reg_size = 8;
26076
26077 /* The GPRs we need saved to the frame. */
26078 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26079 && (info->savres_strategy & REST_INLINE_GPRS))
26080 {
26081 int offset = info->gp_save_offset;
26082 if (info->push_p)
26083 offset += info->total_size;
26084
26085 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26086 {
26087 if (IN_RANGE (offset, -0x8000, 0x7fff)
26088 && save_reg_p (regno))
26089 bitmap_set_bit (components, regno);
26090
26091 offset += reg_size;
26092 }
26093 }
26094
26095 /* Don't mess with the hard frame pointer. */
26096 if (frame_pointer_needed)
26097 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26098
26099 /* Don't mess with the fixed TOC register. */
26100 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26101 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26102 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26103 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26104
26105 /* The FPRs we need saved to the frame. */
26106 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26107 && (info->savres_strategy & REST_INLINE_FPRS))
26108 {
26109 int offset = info->fp_save_offset;
26110 if (info->push_p)
26111 offset += info->total_size;
26112
26113 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26114 {
26115 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26116 bitmap_set_bit (components, regno);
26117
26118 offset += fp_reg_size;
26119 }
26120 }
26121
26122 /* Optimize LR save and restore if we can. This is component 0. Any
26123 out-of-line register save/restore routines need LR. */
26124 if (info->lr_save_p
26125 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26126 && (info->savres_strategy & SAVE_INLINE_GPRS)
26127 && (info->savres_strategy & REST_INLINE_GPRS)
26128 && (info->savres_strategy & SAVE_INLINE_FPRS)
26129 && (info->savres_strategy & REST_INLINE_FPRS)
26130 && (info->savres_strategy & SAVE_INLINE_VRS)
26131 && (info->savres_strategy & REST_INLINE_VRS))
26132 {
26133 int offset = info->lr_save_offset;
26134 if (info->push_p)
26135 offset += info->total_size;
26136 if (IN_RANGE (offset, -0x8000, 0x7fff))
26137 bitmap_set_bit (components, 0);
26138 }
26139
26140 /* Optimize saving the TOC. This is component 2. */
26141 if (cfun->machine->save_toc_in_prologue)
26142 bitmap_set_bit (components, 2);
26143
26144 return components;
26145 }
26146
26147 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26148 static sbitmap
26149 rs6000_components_for_bb (basic_block bb)
26150 {
26151 rs6000_stack_t *info = rs6000_stack_info ();
26152
26153 bitmap in = DF_LIVE_IN (bb);
26154 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26155 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26156
26157 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26158 bitmap_clear (components);
26159
26160 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26161
26162 /* GPRs. */
26163 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26164 if (bitmap_bit_p (in, regno)
26165 || bitmap_bit_p (gen, regno)
26166 || bitmap_bit_p (kill, regno))
26167 bitmap_set_bit (components, regno);
26168
26169 /* FPRs. */
26170 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26171 if (bitmap_bit_p (in, regno)
26172 || bitmap_bit_p (gen, regno)
26173 || bitmap_bit_p (kill, regno))
26174 bitmap_set_bit (components, regno);
26175
26176 /* The link register. */
26177 if (bitmap_bit_p (in, LR_REGNO)
26178 || bitmap_bit_p (gen, LR_REGNO)
26179 || bitmap_bit_p (kill, LR_REGNO))
26180 bitmap_set_bit (components, 0);
26181
26182 /* The TOC save. */
26183 if (bitmap_bit_p (in, TOC_REGNUM)
26184 || bitmap_bit_p (gen, TOC_REGNUM)
26185 || bitmap_bit_p (kill, TOC_REGNUM))
26186 bitmap_set_bit (components, 2);
26187
26188 return components;
26189 }
26190
26191 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26192 static void
26193 rs6000_disqualify_components (sbitmap components, edge e,
26194 sbitmap edge_components, bool /*is_prologue*/)
26195 {
26196 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26197 live where we want to place that code. */
26198 if (bitmap_bit_p (edge_components, 0)
26199 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26200 {
26201 if (dump_file)
26202 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26203 "on entry to bb %d\n", e->dest->index);
26204 bitmap_clear_bit (components, 0);
26205 }
26206 }
26207
26208 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26209 static void
26210 rs6000_emit_prologue_components (sbitmap components)
26211 {
26212 rs6000_stack_t *info = rs6000_stack_info ();
26213 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26214 ? HARD_FRAME_POINTER_REGNUM
26215 : STACK_POINTER_REGNUM);
26216
26217 machine_mode reg_mode = Pmode;
26218 int reg_size = TARGET_32BIT ? 4 : 8;
26219 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26220 int fp_reg_size = 8;
26221
26222 /* Prologue for LR. */
26223 if (bitmap_bit_p (components, 0))
26224 {
26225 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26226 rtx reg = gen_rtx_REG (reg_mode, 0);
26227 rtx_insn *insn = emit_move_insn (reg, lr);
26228 RTX_FRAME_RELATED_P (insn) = 1;
26229 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26230
26231 int offset = info->lr_save_offset;
26232 if (info->push_p)
26233 offset += info->total_size;
26234
26235 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26236 RTX_FRAME_RELATED_P (insn) = 1;
26237 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26238 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26239 }
26240
26241 /* Prologue for TOC. */
26242 if (bitmap_bit_p (components, 2))
26243 {
26244 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26245 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26246 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26247 }
26248
26249 /* Prologue for the GPRs. */
26250 int offset = info->gp_save_offset;
26251 if (info->push_p)
26252 offset += info->total_size;
26253
26254 for (int i = info->first_gp_reg_save; i < 32; i++)
26255 {
26256 if (bitmap_bit_p (components, i))
26257 {
26258 rtx reg = gen_rtx_REG (reg_mode, i);
26259 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26260 RTX_FRAME_RELATED_P (insn) = 1;
26261 rtx set = copy_rtx (single_set (insn));
26262 add_reg_note (insn, REG_CFA_OFFSET, set);
26263 }
26264
26265 offset += reg_size;
26266 }
26267
26268 /* Prologue for the FPRs. */
26269 offset = info->fp_save_offset;
26270 if (info->push_p)
26271 offset += info->total_size;
26272
26273 for (int i = info->first_fp_reg_save; i < 64; i++)
26274 {
26275 if (bitmap_bit_p (components, i))
26276 {
26277 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26278 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26279 RTX_FRAME_RELATED_P (insn) = 1;
26280 rtx set = copy_rtx (single_set (insn));
26281 add_reg_note (insn, REG_CFA_OFFSET, set);
26282 }
26283
26284 offset += fp_reg_size;
26285 }
26286 }
26287
26288 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26289 static void
26290 rs6000_emit_epilogue_components (sbitmap components)
26291 {
26292 rs6000_stack_t *info = rs6000_stack_info ();
26293 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26294 ? HARD_FRAME_POINTER_REGNUM
26295 : STACK_POINTER_REGNUM);
26296
26297 machine_mode reg_mode = Pmode;
26298 int reg_size = TARGET_32BIT ? 4 : 8;
26299
26300 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26301 int fp_reg_size = 8;
26302
26303 /* Epilogue for the FPRs. */
26304 int offset = info->fp_save_offset;
26305 if (info->push_p)
26306 offset += info->total_size;
26307
26308 for (int i = info->first_fp_reg_save; i < 64; i++)
26309 {
26310 if (bitmap_bit_p (components, i))
26311 {
26312 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26313 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26314 RTX_FRAME_RELATED_P (insn) = 1;
26315 add_reg_note (insn, REG_CFA_RESTORE, reg);
26316 }
26317
26318 offset += fp_reg_size;
26319 }
26320
26321 /* Epilogue for the GPRs. */
26322 offset = info->gp_save_offset;
26323 if (info->push_p)
26324 offset += info->total_size;
26325
26326 for (int i = info->first_gp_reg_save; i < 32; i++)
26327 {
26328 if (bitmap_bit_p (components, i))
26329 {
26330 rtx reg = gen_rtx_REG (reg_mode, i);
26331 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26332 RTX_FRAME_RELATED_P (insn) = 1;
26333 add_reg_note (insn, REG_CFA_RESTORE, reg);
26334 }
26335
26336 offset += reg_size;
26337 }
26338
26339 /* Epilogue for LR. */
26340 if (bitmap_bit_p (components, 0))
26341 {
26342 int offset = info->lr_save_offset;
26343 if (info->push_p)
26344 offset += info->total_size;
26345
26346 rtx reg = gen_rtx_REG (reg_mode, 0);
26347 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26348
26349 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26350 insn = emit_move_insn (lr, reg);
26351 RTX_FRAME_RELATED_P (insn) = 1;
26352 add_reg_note (insn, REG_CFA_RESTORE, lr);
26353 }
26354 }
26355
26356 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26357 static void
26358 rs6000_set_handled_components (sbitmap components)
26359 {
26360 rs6000_stack_t *info = rs6000_stack_info ();
26361
26362 for (int i = info->first_gp_reg_save; i < 32; i++)
26363 if (bitmap_bit_p (components, i))
26364 cfun->machine->gpr_is_wrapped_separately[i] = true;
26365
26366 for (int i = info->first_fp_reg_save; i < 64; i++)
26367 if (bitmap_bit_p (components, i))
26368 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26369
26370 if (bitmap_bit_p (components, 0))
26371 cfun->machine->lr_is_wrapped_separately = true;
26372
26373 if (bitmap_bit_p (components, 2))
26374 cfun->machine->toc_is_wrapped_separately = true;
26375 }
26376
26377 /* VRSAVE is a bit vector representing which AltiVec registers
26378 are used. The OS uses this to determine which vector
26379 registers to save on a context switch. We need to save
26380 VRSAVE on the stack frame, add whatever AltiVec registers we
26381 used in this function, and do the corresponding magic in the
26382 epilogue. */
26383 static void
26384 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26385 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26386 {
26387 /* Get VRSAVE into a GPR. */
26388 rtx reg = gen_rtx_REG (SImode, save_regno);
26389 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26390 if (TARGET_MACHO)
26391 emit_insn (gen_get_vrsave_internal (reg));
26392 else
26393 emit_insn (gen_rtx_SET (reg, vrsave));
26394
26395 /* Save VRSAVE. */
26396 int offset = info->vrsave_save_offset + frame_off;
26397 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26398
26399 /* Include the registers in the mask. */
26400 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26401
26402 emit_insn (generate_set_vrsave (reg, info, 0));
26403 }
26404
26405 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26406 called, it left the arg pointer to the old stack in r29. Otherwise, the
26407 arg pointer is the top of the current frame. */
26408 static void
26409 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26410 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26411 {
26412 cfun->machine->split_stack_argp_used = true;
26413
26414 if (sp_adjust)
26415 {
26416 rtx r12 = gen_rtx_REG (Pmode, 12);
26417 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26418 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26419 emit_insn_before (set_r12, sp_adjust);
26420 }
26421 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26422 {
26423 rtx r12 = gen_rtx_REG (Pmode, 12);
26424 if (frame_off == 0)
26425 emit_move_insn (r12, frame_reg_rtx);
26426 else
26427 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26428 }
26429
26430 if (info->push_p)
26431 {
26432 rtx r12 = gen_rtx_REG (Pmode, 12);
26433 rtx r29 = gen_rtx_REG (Pmode, 29);
26434 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26435 rtx not_more = gen_label_rtx ();
26436 rtx jump;
26437
26438 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26439 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26440 gen_rtx_LABEL_REF (VOIDmode, not_more),
26441 pc_rtx);
26442 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26443 JUMP_LABEL (jump) = not_more;
26444 LABEL_NUSES (not_more) += 1;
26445 emit_move_insn (r12, r29);
26446 emit_label (not_more);
26447 }
26448 }
26449
26450 /* Emit function prologue as insns. */
26451
26452 void
26453 rs6000_emit_prologue (void)
26454 {
26455 rs6000_stack_t *info = rs6000_stack_info ();
26456 machine_mode reg_mode = Pmode;
26457 int reg_size = TARGET_32BIT ? 4 : 8;
26458 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26459 int fp_reg_size = 8;
26460 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26461 rtx frame_reg_rtx = sp_reg_rtx;
26462 unsigned int cr_save_regno;
26463 rtx cr_save_rtx = NULL_RTX;
26464 rtx_insn *insn;
26465 int strategy;
26466 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26467 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26468 && call_used_regs[STATIC_CHAIN_REGNUM]);
26469 int using_split_stack = (flag_split_stack
26470 && (lookup_attribute ("no_split_stack",
26471 DECL_ATTRIBUTES (cfun->decl))
26472 == NULL));
26473
26474 /* Offset to top of frame for frame_reg and sp respectively. */
26475 HOST_WIDE_INT frame_off = 0;
26476 HOST_WIDE_INT sp_off = 0;
26477 /* sp_adjust is the stack adjusting instruction, tracked so that the
26478 insn setting up the split-stack arg pointer can be emitted just
26479 prior to it, when r12 is not used here for other purposes. */
26480 rtx_insn *sp_adjust = 0;
26481
26482 #if CHECKING_P
26483 /* Track and check usage of r0, r11, r12. */
26484 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26485 #define START_USE(R) do \
26486 { \
26487 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26488 reg_inuse |= 1 << (R); \
26489 } while (0)
26490 #define END_USE(R) do \
26491 { \
26492 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26493 reg_inuse &= ~(1 << (R)); \
26494 } while (0)
26495 #define NOT_INUSE(R) do \
26496 { \
26497 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26498 } while (0)
26499 #else
26500 #define START_USE(R) do {} while (0)
26501 #define END_USE(R) do {} while (0)
26502 #define NOT_INUSE(R) do {} while (0)
26503 #endif
26504
26505 if (DEFAULT_ABI == ABI_ELFv2
26506 && !TARGET_SINGLE_PIC_BASE)
26507 {
26508 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26509
26510 /* With -mminimal-toc we may generate an extra use of r2 below. */
26511 if (TARGET_TOC && TARGET_MINIMAL_TOC
26512 && !constant_pool_empty_p ())
26513 cfun->machine->r2_setup_needed = true;
26514 }
26515
26516
26517 if (flag_stack_usage_info)
26518 current_function_static_stack_size = info->total_size;
26519
26520 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26521 {
26522 HOST_WIDE_INT size = info->total_size;
26523
26524 if (crtl->is_leaf && !cfun->calls_alloca)
26525 {
26526 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26527 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26528 size - get_stack_check_protect ());
26529 }
26530 else if (size > 0)
26531 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26532 }
26533
26534 if (TARGET_FIX_AND_CONTINUE)
26535 {
26536 /* gdb on darwin arranges to forward a function from the old
26537 address by modifying the first 5 instructions of the function
26538 to branch to the overriding function. This is necessary to
26539 permit function pointers that point to the old function to
26540 actually forward to the new function. */
26541 emit_insn (gen_nop ());
26542 emit_insn (gen_nop ());
26543 emit_insn (gen_nop ());
26544 emit_insn (gen_nop ());
26545 emit_insn (gen_nop ());
26546 }
26547
26548 /* Handle world saves specially here. */
26549 if (WORLD_SAVE_P (info))
26550 {
26551 int i, j, sz;
26552 rtx treg;
26553 rtvec p;
26554 rtx reg0;
26555
26556 /* save_world expects lr in r0. */
26557 reg0 = gen_rtx_REG (Pmode, 0);
26558 if (info->lr_save_p)
26559 {
26560 insn = emit_move_insn (reg0,
26561 gen_rtx_REG (Pmode, LR_REGNO));
26562 RTX_FRAME_RELATED_P (insn) = 1;
26563 }
26564
26565 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26566 assumptions about the offsets of various bits of the stack
26567 frame. */
26568 gcc_assert (info->gp_save_offset == -220
26569 && info->fp_save_offset == -144
26570 && info->lr_save_offset == 8
26571 && info->cr_save_offset == 4
26572 && info->push_p
26573 && info->lr_save_p
26574 && (!crtl->calls_eh_return
26575 || info->ehrd_offset == -432)
26576 && info->vrsave_save_offset == -224
26577 && info->altivec_save_offset == -416);
26578
26579 treg = gen_rtx_REG (SImode, 11);
26580 emit_move_insn (treg, GEN_INT (-info->total_size));
26581
26582 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26583 in R11. It also clobbers R12, so beware! */
26584
26585 /* Preserve CR2 for save_world prologues */
26586 sz = 5;
26587 sz += 32 - info->first_gp_reg_save;
26588 sz += 64 - info->first_fp_reg_save;
26589 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26590 p = rtvec_alloc (sz);
26591 j = 0;
26592 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26593 gen_rtx_REG (SImode,
26594 LR_REGNO));
26595 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26596 gen_rtx_SYMBOL_REF (Pmode,
26597 "*save_world"));
26598 /* We do floats first so that the instruction pattern matches
26599 properly. */
26600 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26601 RTVEC_ELT (p, j++)
26602 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26603 info->first_fp_reg_save + i),
26604 frame_reg_rtx,
26605 info->fp_save_offset + frame_off + 8 * i);
26606 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26607 RTVEC_ELT (p, j++)
26608 = gen_frame_store (gen_rtx_REG (V4SImode,
26609 info->first_altivec_reg_save + i),
26610 frame_reg_rtx,
26611 info->altivec_save_offset + frame_off + 16 * i);
26612 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26613 RTVEC_ELT (p, j++)
26614 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26615 frame_reg_rtx,
26616 info->gp_save_offset + frame_off + reg_size * i);
26617
26618 /* CR register traditionally saved as CR2. */
26619 RTVEC_ELT (p, j++)
26620 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26621 frame_reg_rtx, info->cr_save_offset + frame_off);
26622 /* Explain about use of R0. */
26623 if (info->lr_save_p)
26624 RTVEC_ELT (p, j++)
26625 = gen_frame_store (reg0,
26626 frame_reg_rtx, info->lr_save_offset + frame_off);
26627 /* Explain what happens to the stack pointer. */
26628 {
26629 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26630 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26631 }
26632
26633 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26634 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26635 treg, GEN_INT (-info->total_size));
26636 sp_off = frame_off = info->total_size;
26637 }
26638
26639 strategy = info->savres_strategy;
26640
26641 /* For V.4, update stack before we do any saving and set back pointer. */
26642 if (! WORLD_SAVE_P (info)
26643 && info->push_p
26644 && (DEFAULT_ABI == ABI_V4
26645 || crtl->calls_eh_return))
26646 {
26647 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26648 || !(strategy & SAVE_INLINE_GPRS)
26649 || !(strategy & SAVE_INLINE_VRS));
26650 int ptr_regno = -1;
26651 rtx ptr_reg = NULL_RTX;
26652 int ptr_off = 0;
26653
26654 if (info->total_size < 32767)
26655 frame_off = info->total_size;
26656 else if (need_r11)
26657 ptr_regno = 11;
26658 else if (info->cr_save_p
26659 || info->lr_save_p
26660 || info->first_fp_reg_save < 64
26661 || info->first_gp_reg_save < 32
26662 || info->altivec_size != 0
26663 || info->vrsave_size != 0
26664 || crtl->calls_eh_return)
26665 ptr_regno = 12;
26666 else
26667 {
26668 /* The prologue won't be saving any regs so there is no need
26669 to set up a frame register to access any frame save area.
26670 We also won't be using frame_off anywhere below, but set
26671 the correct value anyway to protect against future
26672 changes to this function. */
26673 frame_off = info->total_size;
26674 }
26675 if (ptr_regno != -1)
26676 {
26677 /* Set up the frame offset to that needed by the first
26678 out-of-line save function. */
26679 START_USE (ptr_regno);
26680 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26681 frame_reg_rtx = ptr_reg;
26682 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26683 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26684 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26685 ptr_off = info->gp_save_offset + info->gp_size;
26686 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26687 ptr_off = info->altivec_save_offset + info->altivec_size;
26688 frame_off = -ptr_off;
26689 }
26690 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26691 ptr_reg, ptr_off);
26692 if (REGNO (frame_reg_rtx) == 12)
26693 sp_adjust = 0;
26694 sp_off = info->total_size;
26695 if (frame_reg_rtx != sp_reg_rtx)
26696 rs6000_emit_stack_tie (frame_reg_rtx, false);
26697 }
26698
26699 /* If we use the link register, get it into r0. */
26700 if (!WORLD_SAVE_P (info) && info->lr_save_p
26701 && !cfun->machine->lr_is_wrapped_separately)
26702 {
26703 rtx addr, reg, mem;
26704
26705 reg = gen_rtx_REG (Pmode, 0);
26706 START_USE (0);
26707 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26708 RTX_FRAME_RELATED_P (insn) = 1;
26709
26710 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26711 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26712 {
26713 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26714 GEN_INT (info->lr_save_offset + frame_off));
26715 mem = gen_rtx_MEM (Pmode, addr);
26716 /* This should not be of rs6000_sr_alias_set, because of
26717 __builtin_return_address. */
26718
26719 insn = emit_move_insn (mem, reg);
26720 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26721 NULL_RTX, NULL_RTX);
26722 END_USE (0);
26723 }
26724 }
26725
26726 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26727 r12 will be needed by out-of-line gpr save. */
26728 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26729 && !(strategy & (SAVE_INLINE_GPRS
26730 | SAVE_NOINLINE_GPRS_SAVES_LR))
26731 ? 11 : 12);
26732 if (!WORLD_SAVE_P (info)
26733 && info->cr_save_p
26734 && REGNO (frame_reg_rtx) != cr_save_regno
26735 && !(using_static_chain_p && cr_save_regno == 11)
26736 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26737 {
26738 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26739 START_USE (cr_save_regno);
26740 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26741 }
26742
26743 /* Do any required saving of fpr's. If only one or two to save, do
26744 it ourselves. Otherwise, call function. */
26745 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26746 {
26747 int offset = info->fp_save_offset + frame_off;
26748 for (int i = info->first_fp_reg_save; i < 64; i++)
26749 {
26750 if (save_reg_p (i)
26751 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26752 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26753 sp_off - frame_off);
26754
26755 offset += fp_reg_size;
26756 }
26757 }
26758 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26759 {
26760 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26761 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26762 unsigned ptr_regno = ptr_regno_for_savres (sel);
26763 rtx ptr_reg = frame_reg_rtx;
26764
26765 if (REGNO (frame_reg_rtx) == ptr_regno)
26766 gcc_checking_assert (frame_off == 0);
26767 else
26768 {
26769 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26770 NOT_INUSE (ptr_regno);
26771 emit_insn (gen_add3_insn (ptr_reg,
26772 frame_reg_rtx, GEN_INT (frame_off)));
26773 }
26774 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26775 info->fp_save_offset,
26776 info->lr_save_offset,
26777 DFmode, sel);
26778 rs6000_frame_related (insn, ptr_reg, sp_off,
26779 NULL_RTX, NULL_RTX);
26780 if (lr)
26781 END_USE (0);
26782 }
26783
26784 /* Save GPRs. This is done as a PARALLEL if we are using
26785 the store-multiple instructions. */
26786 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26787 {
26788 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26789 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26790 unsigned ptr_regno = ptr_regno_for_savres (sel);
26791 rtx ptr_reg = frame_reg_rtx;
26792 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26793 int end_save = info->gp_save_offset + info->gp_size;
26794 int ptr_off;
26795
26796 if (ptr_regno == 12)
26797 sp_adjust = 0;
26798 if (!ptr_set_up)
26799 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26800
26801 /* Need to adjust r11 (r12) if we saved any FPRs. */
26802 if (end_save + frame_off != 0)
26803 {
26804 rtx offset = GEN_INT (end_save + frame_off);
26805
26806 if (ptr_set_up)
26807 frame_off = -end_save;
26808 else
26809 NOT_INUSE (ptr_regno);
26810 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26811 }
26812 else if (!ptr_set_up)
26813 {
26814 NOT_INUSE (ptr_regno);
26815 emit_move_insn (ptr_reg, frame_reg_rtx);
26816 }
26817 ptr_off = -end_save;
26818 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26819 info->gp_save_offset + ptr_off,
26820 info->lr_save_offset + ptr_off,
26821 reg_mode, sel);
26822 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26823 NULL_RTX, NULL_RTX);
26824 if (lr)
26825 END_USE (0);
26826 }
26827 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26828 {
26829 rtvec p;
26830 int i;
26831 p = rtvec_alloc (32 - info->first_gp_reg_save);
26832 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26833 RTVEC_ELT (p, i)
26834 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26835 frame_reg_rtx,
26836 info->gp_save_offset + frame_off + reg_size * i);
26837 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26838 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26839 NULL_RTX, NULL_RTX);
26840 }
26841 else if (!WORLD_SAVE_P (info))
26842 {
26843 int offset = info->gp_save_offset + frame_off;
26844 for (int i = info->first_gp_reg_save; i < 32; i++)
26845 {
26846 if (save_reg_p (i)
26847 && !cfun->machine->gpr_is_wrapped_separately[i])
26848 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
26849 sp_off - frame_off);
26850
26851 offset += reg_size;
26852 }
26853 }
26854
26855 if (crtl->calls_eh_return)
26856 {
26857 unsigned int i;
26858 rtvec p;
26859
26860 for (i = 0; ; ++i)
26861 {
26862 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26863 if (regno == INVALID_REGNUM)
26864 break;
26865 }
26866
26867 p = rtvec_alloc (i);
26868
26869 for (i = 0; ; ++i)
26870 {
26871 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26872 if (regno == INVALID_REGNUM)
26873 break;
26874
26875 rtx set
26876 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
26877 sp_reg_rtx,
26878 info->ehrd_offset + sp_off + reg_size * (int) i);
26879 RTVEC_ELT (p, i) = set;
26880 RTX_FRAME_RELATED_P (set) = 1;
26881 }
26882
26883 insn = emit_insn (gen_blockage ());
26884 RTX_FRAME_RELATED_P (insn) = 1;
26885 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
26886 }
26887
26888 /* In AIX ABI we need to make sure r2 is really saved. */
26889 if (TARGET_AIX && crtl->calls_eh_return)
26890 {
26891 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
26892 rtx join_insn, note;
26893 rtx_insn *save_insn;
26894 long toc_restore_insn;
26895
26896 tmp_reg = gen_rtx_REG (Pmode, 11);
26897 tmp_reg_si = gen_rtx_REG (SImode, 11);
26898 if (using_static_chain_p)
26899 {
26900 START_USE (0);
26901 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
26902 }
26903 else
26904 START_USE (11);
26905 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
26906 /* Peek at instruction to which this function returns. If it's
26907 restoring r2, then we know we've already saved r2. We can't
26908 unconditionally save r2 because the value we have will already
26909 be updated if we arrived at this function via a plt call or
26910 toc adjusting stub. */
26911 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
26912 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
26913 + RS6000_TOC_SAVE_SLOT);
26914 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
26915 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
26916 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
26917 validate_condition_mode (EQ, CCUNSmode);
26918 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
26919 emit_insn (gen_rtx_SET (compare_result,
26920 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
26921 toc_save_done = gen_label_rtx ();
26922 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26923 gen_rtx_EQ (VOIDmode, compare_result,
26924 const0_rtx),
26925 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
26926 pc_rtx);
26927 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26928 JUMP_LABEL (jump) = toc_save_done;
26929 LABEL_NUSES (toc_save_done) += 1;
26930
26931 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
26932 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
26933 sp_off - frame_off);
26934
26935 emit_label (toc_save_done);
26936
26937 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
26938 have a CFG that has different saves along different paths.
26939 Move the note to a dummy blockage insn, which describes that
26940 R2 is unconditionally saved after the label. */
26941 /* ??? An alternate representation might be a special insn pattern
26942 containing both the branch and the store. That might let the
26943 code that minimizes the number of DW_CFA_advance opcodes better
26944 freedom in placing the annotations. */
26945 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
26946 if (note)
26947 remove_note (save_insn, note);
26948 else
26949 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
26950 copy_rtx (PATTERN (save_insn)), NULL_RTX);
26951 RTX_FRAME_RELATED_P (save_insn) = 0;
26952
26953 join_insn = emit_insn (gen_blockage ());
26954 REG_NOTES (join_insn) = note;
26955 RTX_FRAME_RELATED_P (join_insn) = 1;
26956
26957 if (using_static_chain_p)
26958 {
26959 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
26960 END_USE (0);
26961 }
26962 else
26963 END_USE (11);
26964 }
26965
26966 /* Save CR if we use any that must be preserved. */
26967 if (!WORLD_SAVE_P (info) && info->cr_save_p)
26968 {
26969 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26970 GEN_INT (info->cr_save_offset + frame_off));
26971 rtx mem = gen_frame_mem (SImode, addr);
26972
26973 /* If we didn't copy cr before, do so now using r0. */
26974 if (cr_save_rtx == NULL_RTX)
26975 {
26976 START_USE (0);
26977 cr_save_rtx = gen_rtx_REG (SImode, 0);
26978 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26979 }
26980
26981 /* Saving CR requires a two-instruction sequence: one instruction
26982 to move the CR to a general-purpose register, and a second
26983 instruction that stores the GPR to memory.
26984
26985 We do not emit any DWARF CFI records for the first of these,
26986 because we cannot properly represent the fact that CR is saved in
26987 a register. One reason is that we cannot express that multiple
26988 CR fields are saved; another reason is that on 64-bit, the size
26989 of the CR register in DWARF (4 bytes) differs from the size of
26990 a general-purpose register.
26991
26992 This means if any intervening instruction were to clobber one of
26993 the call-saved CR fields, we'd have incorrect CFI. To prevent
26994 this from happening, we mark the store to memory as a use of
26995 those CR fields, which prevents any such instruction from being
26996 scheduled in between the two instructions. */
26997 rtx crsave_v[9];
26998 int n_crsave = 0;
26999 int i;
27000
27001 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27002 for (i = 0; i < 8; i++)
27003 if (save_reg_p (CR0_REGNO + i))
27004 crsave_v[n_crsave++]
27005 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27006
27007 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27008 gen_rtvec_v (n_crsave, crsave_v)));
27009 END_USE (REGNO (cr_save_rtx));
27010
27011 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27012 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27013 so we need to construct a frame expression manually. */
27014 RTX_FRAME_RELATED_P (insn) = 1;
27015
27016 /* Update address to be stack-pointer relative, like
27017 rs6000_frame_related would do. */
27018 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27019 GEN_INT (info->cr_save_offset + sp_off));
27020 mem = gen_frame_mem (SImode, addr);
27021
27022 if (DEFAULT_ABI == ABI_ELFv2)
27023 {
27024 /* In the ELFv2 ABI we generate separate CFI records for each
27025 CR field that was actually saved. They all point to the
27026 same 32-bit stack slot. */
27027 rtx crframe[8];
27028 int n_crframe = 0;
27029
27030 for (i = 0; i < 8; i++)
27031 if (save_reg_p (CR0_REGNO + i))
27032 {
27033 crframe[n_crframe]
27034 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27035
27036 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27037 n_crframe++;
27038 }
27039
27040 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27041 gen_rtx_PARALLEL (VOIDmode,
27042 gen_rtvec_v (n_crframe, crframe)));
27043 }
27044 else
27045 {
27046 /* In other ABIs, by convention, we use a single CR regnum to
27047 represent the fact that all call-saved CR fields are saved.
27048 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27049 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27050 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27051 }
27052 }
27053
27054 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27055 *separate* slots if the routine calls __builtin_eh_return, so
27056 that they can be independently restored by the unwinder. */
27057 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27058 {
27059 int i, cr_off = info->ehcr_offset;
27060 rtx crsave;
27061
27062 /* ??? We might get better performance by using multiple mfocrf
27063 instructions. */
27064 crsave = gen_rtx_REG (SImode, 0);
27065 emit_insn (gen_prologue_movesi_from_cr (crsave));
27066
27067 for (i = 0; i < 8; i++)
27068 if (!call_used_regs[CR0_REGNO + i])
27069 {
27070 rtvec p = rtvec_alloc (2);
27071 RTVEC_ELT (p, 0)
27072 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27073 RTVEC_ELT (p, 1)
27074 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27075
27076 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27077
27078 RTX_FRAME_RELATED_P (insn) = 1;
27079 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27080 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27081 sp_reg_rtx, cr_off + sp_off));
27082
27083 cr_off += reg_size;
27084 }
27085 }
27086
27087 /* If we are emitting stack probes, but allocate no stack, then
27088 just note that in the dump file. */
27089 if (flag_stack_clash_protection
27090 && dump_file
27091 && !info->push_p)
27092 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27093
27094 /* Update stack and set back pointer unless this is V.4,
27095 for which it was done previously. */
27096 if (!WORLD_SAVE_P (info) && info->push_p
27097 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27098 {
27099 rtx ptr_reg = NULL;
27100 int ptr_off = 0;
27101
27102 /* If saving altivec regs we need to be able to address all save
27103 locations using a 16-bit offset. */
27104 if ((strategy & SAVE_INLINE_VRS) == 0
27105 || (info->altivec_size != 0
27106 && (info->altivec_save_offset + info->altivec_size - 16
27107 + info->total_size - frame_off) > 32767)
27108 || (info->vrsave_size != 0
27109 && (info->vrsave_save_offset
27110 + info->total_size - frame_off) > 32767))
27111 {
27112 int sel = SAVRES_SAVE | SAVRES_VR;
27113 unsigned ptr_regno = ptr_regno_for_savres (sel);
27114
27115 if (using_static_chain_p
27116 && ptr_regno == STATIC_CHAIN_REGNUM)
27117 ptr_regno = 12;
27118 if (REGNO (frame_reg_rtx) != ptr_regno)
27119 START_USE (ptr_regno);
27120 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27121 frame_reg_rtx = ptr_reg;
27122 ptr_off = info->altivec_save_offset + info->altivec_size;
27123 frame_off = -ptr_off;
27124 }
27125 else if (REGNO (frame_reg_rtx) == 1)
27126 frame_off = info->total_size;
27127 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27128 ptr_reg, ptr_off);
27129 if (REGNO (frame_reg_rtx) == 12)
27130 sp_adjust = 0;
27131 sp_off = info->total_size;
27132 if (frame_reg_rtx != sp_reg_rtx)
27133 rs6000_emit_stack_tie (frame_reg_rtx, false);
27134 }
27135
27136 /* Set frame pointer, if needed. */
27137 if (frame_pointer_needed)
27138 {
27139 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27140 sp_reg_rtx);
27141 RTX_FRAME_RELATED_P (insn) = 1;
27142 }
27143
27144 /* Save AltiVec registers if needed. Save here because the red zone does
27145 not always include AltiVec registers. */
27146 if (!WORLD_SAVE_P (info)
27147 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27148 {
27149 int end_save = info->altivec_save_offset + info->altivec_size;
27150 int ptr_off;
27151 /* Oddly, the vector save/restore functions point r0 at the end
27152 of the save area, then use r11 or r12 to load offsets for
27153 [reg+reg] addressing. */
27154 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27155 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27156 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27157
27158 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27159 NOT_INUSE (0);
27160 if (scratch_regno == 12)
27161 sp_adjust = 0;
27162 if (end_save + frame_off != 0)
27163 {
27164 rtx offset = GEN_INT (end_save + frame_off);
27165
27166 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27167 }
27168 else
27169 emit_move_insn (ptr_reg, frame_reg_rtx);
27170
27171 ptr_off = -end_save;
27172 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27173 info->altivec_save_offset + ptr_off,
27174 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27175 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27176 NULL_RTX, NULL_RTX);
27177 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27178 {
27179 /* The oddity mentioned above clobbered our frame reg. */
27180 emit_move_insn (frame_reg_rtx, ptr_reg);
27181 frame_off = ptr_off;
27182 }
27183 }
27184 else if (!WORLD_SAVE_P (info)
27185 && info->altivec_size != 0)
27186 {
27187 int i;
27188
27189 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27190 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27191 {
27192 rtx areg, savereg, mem;
27193 HOST_WIDE_INT offset;
27194
27195 offset = (info->altivec_save_offset + frame_off
27196 + 16 * (i - info->first_altivec_reg_save));
27197
27198 savereg = gen_rtx_REG (V4SImode, i);
27199
27200 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27201 {
27202 mem = gen_frame_mem (V4SImode,
27203 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27204 GEN_INT (offset)));
27205 insn = emit_insn (gen_rtx_SET (mem, savereg));
27206 areg = NULL_RTX;
27207 }
27208 else
27209 {
27210 NOT_INUSE (0);
27211 areg = gen_rtx_REG (Pmode, 0);
27212 emit_move_insn (areg, GEN_INT (offset));
27213
27214 /* AltiVec addressing mode is [reg+reg]. */
27215 mem = gen_frame_mem (V4SImode,
27216 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27217
27218 /* Rather than emitting a generic move, force use of the stvx
27219 instruction, which we always want on ISA 2.07 (power8) systems.
27220 In particular we don't want xxpermdi/stxvd2x for little
27221 endian. */
27222 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27223 }
27224
27225 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27226 areg, GEN_INT (offset));
27227 }
27228 }
27229
27230 /* VRSAVE is a bit vector representing which AltiVec registers
27231 are used. The OS uses this to determine which vector
27232 registers to save on a context switch. We need to save
27233 VRSAVE on the stack frame, add whatever AltiVec registers we
27234 used in this function, and do the corresponding magic in the
27235 epilogue. */
27236
27237 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27238 {
27239 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27240 be using r12 as frame_reg_rtx and r11 as the static chain
27241 pointer for nested functions. */
27242 int save_regno = 12;
27243 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27244 && !using_static_chain_p)
27245 save_regno = 11;
27246 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27247 {
27248 save_regno = 11;
27249 if (using_static_chain_p)
27250 save_regno = 0;
27251 }
27252 NOT_INUSE (save_regno);
27253
27254 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27255 }
27256
27257 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27258 if (!TARGET_SINGLE_PIC_BASE
27259 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27260 && !constant_pool_empty_p ())
27261 || (DEFAULT_ABI == ABI_V4
27262 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27263 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27264 {
27265 /* If emit_load_toc_table will use the link register, we need to save
27266 it. We use R12 for this purpose because emit_load_toc_table
27267 can use register 0. This allows us to use a plain 'blr' to return
27268 from the procedure more often. */
27269 int save_LR_around_toc_setup = (TARGET_ELF
27270 && DEFAULT_ABI == ABI_V4
27271 && flag_pic
27272 && ! info->lr_save_p
27273 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27274 if (save_LR_around_toc_setup)
27275 {
27276 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27277 rtx tmp = gen_rtx_REG (Pmode, 12);
27278
27279 sp_adjust = 0;
27280 insn = emit_move_insn (tmp, lr);
27281 RTX_FRAME_RELATED_P (insn) = 1;
27282
27283 rs6000_emit_load_toc_table (TRUE);
27284
27285 insn = emit_move_insn (lr, tmp);
27286 add_reg_note (insn, REG_CFA_RESTORE, lr);
27287 RTX_FRAME_RELATED_P (insn) = 1;
27288 }
27289 else
27290 rs6000_emit_load_toc_table (TRUE);
27291 }
27292
27293 #if TARGET_MACHO
27294 if (!TARGET_SINGLE_PIC_BASE
27295 && DEFAULT_ABI == ABI_DARWIN
27296 && flag_pic && crtl->uses_pic_offset_table)
27297 {
27298 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27299 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27300
27301 /* Save and restore LR locally around this call (in R0). */
27302 if (!info->lr_save_p)
27303 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27304
27305 emit_insn (gen_load_macho_picbase (src));
27306
27307 emit_move_insn (gen_rtx_REG (Pmode,
27308 RS6000_PIC_OFFSET_TABLE_REGNUM),
27309 lr);
27310
27311 if (!info->lr_save_p)
27312 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27313 }
27314 #endif
27315
27316 /* If we need to, save the TOC register after doing the stack setup.
27317 Do not emit eh frame info for this save. The unwinder wants info,
27318 conceptually attached to instructions in this function, about
27319 register values in the caller of this function. This R2 may have
27320 already been changed from the value in the caller.
27321 We don't attempt to write accurate DWARF EH frame info for R2
27322 because code emitted by gcc for a (non-pointer) function call
27323 doesn't save and restore R2. Instead, R2 is managed out-of-line
27324 by a linker generated plt call stub when the function resides in
27325 a shared library. This behavior is costly to describe in DWARF,
27326 both in terms of the size of DWARF info and the time taken in the
27327 unwinder to interpret it. R2 changes, apart from the
27328 calls_eh_return case earlier in this function, are handled by
27329 linux-unwind.h frob_update_context. */
27330 if (rs6000_save_toc_in_prologue_p ()
27331 && !cfun->machine->toc_is_wrapped_separately)
27332 {
27333 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27334 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27335 }
27336
27337 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27338 if (using_split_stack && split_stack_arg_pointer_used_p ())
27339 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27340 }
27341
27342 /* Output .extern statements for the save/restore routines we use. */
27343
27344 static void
27345 rs6000_output_savres_externs (FILE *file)
27346 {
27347 rs6000_stack_t *info = rs6000_stack_info ();
27348
27349 if (TARGET_DEBUG_STACK)
27350 debug_stack_info (info);
27351
27352 /* Write .extern for any function we will call to save and restore
27353 fp values. */
27354 if (info->first_fp_reg_save < 64
27355 && !TARGET_MACHO
27356 && !TARGET_ELF)
27357 {
27358 char *name;
27359 int regno = info->first_fp_reg_save - 32;
27360
27361 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27362 {
27363 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27364 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27365 name = rs6000_savres_routine_name (regno, sel);
27366 fprintf (file, "\t.extern %s\n", name);
27367 }
27368 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27369 {
27370 bool lr = (info->savres_strategy
27371 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27372 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27373 name = rs6000_savres_routine_name (regno, sel);
27374 fprintf (file, "\t.extern %s\n", name);
27375 }
27376 }
27377 }
27378
27379 /* Write function prologue. */
27380
27381 static void
27382 rs6000_output_function_prologue (FILE *file)
27383 {
27384 if (!cfun->is_thunk)
27385 rs6000_output_savres_externs (file);
27386
27387 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27388 immediately after the global entry point label. */
27389 if (rs6000_global_entry_point_needed_p ())
27390 {
27391 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27392
27393 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27394
27395 if (TARGET_CMODEL != CMODEL_LARGE)
27396 {
27397 /* In the small and medium code models, we assume the TOC is less
27398 2 GB away from the text section, so it can be computed via the
27399 following two-instruction sequence. */
27400 char buf[256];
27401
27402 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27403 fprintf (file, "0:\taddis 2,12,.TOC.-");
27404 assemble_name (file, buf);
27405 fprintf (file, "@ha\n");
27406 fprintf (file, "\taddi 2,2,.TOC.-");
27407 assemble_name (file, buf);
27408 fprintf (file, "@l\n");
27409 }
27410 else
27411 {
27412 /* In the large code model, we allow arbitrary offsets between the
27413 TOC and the text section, so we have to load the offset from
27414 memory. The data field is emitted directly before the global
27415 entry point in rs6000_elf_declare_function_name. */
27416 char buf[256];
27417
27418 #ifdef HAVE_AS_ENTRY_MARKERS
27419 /* If supported by the linker, emit a marker relocation. If the
27420 total code size of the final executable or shared library
27421 happens to fit into 2 GB after all, the linker will replace
27422 this code sequence with the sequence for the small or medium
27423 code model. */
27424 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27425 #endif
27426 fprintf (file, "\tld 2,");
27427 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27428 assemble_name (file, buf);
27429 fprintf (file, "-");
27430 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27431 assemble_name (file, buf);
27432 fprintf (file, "(12)\n");
27433 fprintf (file, "\tadd 2,2,12\n");
27434 }
27435
27436 fputs ("\t.localentry\t", file);
27437 assemble_name (file, name);
27438 fputs (",.-", file);
27439 assemble_name (file, name);
27440 fputs ("\n", file);
27441 }
27442
27443 /* Output -mprofile-kernel code. This needs to be done here instead of
27444 in output_function_profile since it must go after the ELFv2 ABI
27445 local entry point. */
27446 if (TARGET_PROFILE_KERNEL && crtl->profile)
27447 {
27448 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27449 gcc_assert (!TARGET_32BIT);
27450
27451 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27452
27453 /* In the ELFv2 ABI we have no compiler stack word. It must be
27454 the resposibility of _mcount to preserve the static chain
27455 register if required. */
27456 if (DEFAULT_ABI != ABI_ELFv2
27457 && cfun->static_chain_decl != NULL)
27458 {
27459 asm_fprintf (file, "\tstd %s,24(%s)\n",
27460 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27461 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27462 asm_fprintf (file, "\tld %s,24(%s)\n",
27463 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27464 }
27465 else
27466 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27467 }
27468
27469 rs6000_pic_labelno++;
27470 }
27471
27472 /* -mprofile-kernel code calls mcount before the function prolog,
27473 so a profiled leaf function should stay a leaf function. */
27474 static bool
27475 rs6000_keep_leaf_when_profiled ()
27476 {
27477 return TARGET_PROFILE_KERNEL;
27478 }
27479
27480 /* Non-zero if vmx regs are restored before the frame pop, zero if
27481 we restore after the pop when possible. */
27482 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27483
27484 /* Restoring cr is a two step process: loading a reg from the frame
27485 save, then moving the reg to cr. For ABI_V4 we must let the
27486 unwinder know that the stack location is no longer valid at or
27487 before the stack deallocation, but we can't emit a cfa_restore for
27488 cr at the stack deallocation like we do for other registers.
27489 The trouble is that it is possible for the move to cr to be
27490 scheduled after the stack deallocation. So say exactly where cr
27491 is located on each of the two insns. */
27492
27493 static rtx
27494 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27495 {
27496 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27497 rtx reg = gen_rtx_REG (SImode, regno);
27498 rtx_insn *insn = emit_move_insn (reg, mem);
27499
27500 if (!exit_func && DEFAULT_ABI == ABI_V4)
27501 {
27502 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27503 rtx set = gen_rtx_SET (reg, cr);
27504
27505 add_reg_note (insn, REG_CFA_REGISTER, set);
27506 RTX_FRAME_RELATED_P (insn) = 1;
27507 }
27508 return reg;
27509 }
27510
27511 /* Reload CR from REG. */
27512
27513 static void
27514 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27515 {
27516 int count = 0;
27517 int i;
27518
27519 if (using_mfcr_multiple)
27520 {
27521 for (i = 0; i < 8; i++)
27522 if (save_reg_p (CR0_REGNO + i))
27523 count++;
27524 gcc_assert (count);
27525 }
27526
27527 if (using_mfcr_multiple && count > 1)
27528 {
27529 rtx_insn *insn;
27530 rtvec p;
27531 int ndx;
27532
27533 p = rtvec_alloc (count);
27534
27535 ndx = 0;
27536 for (i = 0; i < 8; i++)
27537 if (save_reg_p (CR0_REGNO + i))
27538 {
27539 rtvec r = rtvec_alloc (2);
27540 RTVEC_ELT (r, 0) = reg;
27541 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27542 RTVEC_ELT (p, ndx) =
27543 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27544 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27545 ndx++;
27546 }
27547 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27548 gcc_assert (ndx == count);
27549
27550 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27551 CR field separately. */
27552 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27553 {
27554 for (i = 0; i < 8; i++)
27555 if (save_reg_p (CR0_REGNO + i))
27556 add_reg_note (insn, REG_CFA_RESTORE,
27557 gen_rtx_REG (SImode, CR0_REGNO + i));
27558
27559 RTX_FRAME_RELATED_P (insn) = 1;
27560 }
27561 }
27562 else
27563 for (i = 0; i < 8; i++)
27564 if (save_reg_p (CR0_REGNO + i))
27565 {
27566 rtx insn = emit_insn (gen_movsi_to_cr_one
27567 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27568
27569 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27570 CR field separately, attached to the insn that in fact
27571 restores this particular CR field. */
27572 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27573 {
27574 add_reg_note (insn, REG_CFA_RESTORE,
27575 gen_rtx_REG (SImode, CR0_REGNO + i));
27576
27577 RTX_FRAME_RELATED_P (insn) = 1;
27578 }
27579 }
27580
27581 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27582 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27583 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27584 {
27585 rtx_insn *insn = get_last_insn ();
27586 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27587
27588 add_reg_note (insn, REG_CFA_RESTORE, cr);
27589 RTX_FRAME_RELATED_P (insn) = 1;
27590 }
27591 }
27592
27593 /* Like cr, the move to lr instruction can be scheduled after the
27594 stack deallocation, but unlike cr, its stack frame save is still
27595 valid. So we only need to emit the cfa_restore on the correct
27596 instruction. */
27597
27598 static void
27599 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27600 {
27601 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27602 rtx reg = gen_rtx_REG (Pmode, regno);
27603
27604 emit_move_insn (reg, mem);
27605 }
27606
27607 static void
27608 restore_saved_lr (int regno, bool exit_func)
27609 {
27610 rtx reg = gen_rtx_REG (Pmode, regno);
27611 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27612 rtx_insn *insn = emit_move_insn (lr, reg);
27613
27614 if (!exit_func && flag_shrink_wrap)
27615 {
27616 add_reg_note (insn, REG_CFA_RESTORE, lr);
27617 RTX_FRAME_RELATED_P (insn) = 1;
27618 }
27619 }
27620
27621 static rtx
27622 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27623 {
27624 if (DEFAULT_ABI == ABI_ELFv2)
27625 {
27626 int i;
27627 for (i = 0; i < 8; i++)
27628 if (save_reg_p (CR0_REGNO + i))
27629 {
27630 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27631 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27632 cfa_restores);
27633 }
27634 }
27635 else if (info->cr_save_p)
27636 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27637 gen_rtx_REG (SImode, CR2_REGNO),
27638 cfa_restores);
27639
27640 if (info->lr_save_p)
27641 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27642 gen_rtx_REG (Pmode, LR_REGNO),
27643 cfa_restores);
27644 return cfa_restores;
27645 }
27646
27647 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27648 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27649 below stack pointer not cloberred by signals. */
27650
27651 static inline bool
27652 offset_below_red_zone_p (HOST_WIDE_INT offset)
27653 {
27654 return offset < (DEFAULT_ABI == ABI_V4
27655 ? 0
27656 : TARGET_32BIT ? -220 : -288);
27657 }
27658
27659 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27660
27661 static void
27662 emit_cfa_restores (rtx cfa_restores)
27663 {
27664 rtx_insn *insn = get_last_insn ();
27665 rtx *loc = &REG_NOTES (insn);
27666
27667 while (*loc)
27668 loc = &XEXP (*loc, 1);
27669 *loc = cfa_restores;
27670 RTX_FRAME_RELATED_P (insn) = 1;
27671 }
27672
27673 /* Emit function epilogue as insns. */
27674
27675 void
27676 rs6000_emit_epilogue (int sibcall)
27677 {
27678 rs6000_stack_t *info;
27679 int restoring_GPRs_inline;
27680 int restoring_FPRs_inline;
27681 int using_load_multiple;
27682 int using_mtcr_multiple;
27683 int use_backchain_to_restore_sp;
27684 int restore_lr;
27685 int strategy;
27686 HOST_WIDE_INT frame_off = 0;
27687 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27688 rtx frame_reg_rtx = sp_reg_rtx;
27689 rtx cfa_restores = NULL_RTX;
27690 rtx insn;
27691 rtx cr_save_reg = NULL_RTX;
27692 machine_mode reg_mode = Pmode;
27693 int reg_size = TARGET_32BIT ? 4 : 8;
27694 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
27695 int fp_reg_size = 8;
27696 int i;
27697 bool exit_func;
27698 unsigned ptr_regno;
27699
27700 info = rs6000_stack_info ();
27701
27702 strategy = info->savres_strategy;
27703 using_load_multiple = strategy & REST_MULTIPLE;
27704 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27705 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27706 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
27707 || rs6000_tune == PROCESSOR_PPC603
27708 || rs6000_tune == PROCESSOR_PPC750
27709 || optimize_size);
27710 /* Restore via the backchain when we have a large frame, since this
27711 is more efficient than an addis, addi pair. The second condition
27712 here will not trigger at the moment; We don't actually need a
27713 frame pointer for alloca, but the generic parts of the compiler
27714 give us one anyway. */
27715 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27716 ? info->lr_save_offset
27717 : 0) > 32767
27718 || (cfun->calls_alloca
27719 && !frame_pointer_needed));
27720 restore_lr = (info->lr_save_p
27721 && (restoring_FPRs_inline
27722 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27723 && (restoring_GPRs_inline
27724 || info->first_fp_reg_save < 64)
27725 && !cfun->machine->lr_is_wrapped_separately);
27726
27727
27728 if (WORLD_SAVE_P (info))
27729 {
27730 int i, j;
27731 char rname[30];
27732 const char *alloc_rname;
27733 rtvec p;
27734
27735 /* eh_rest_world_r10 will return to the location saved in the LR
27736 stack slot (which is not likely to be our caller.)
27737 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27738 rest_world is similar, except any R10 parameter is ignored.
27739 The exception-handling stuff that was here in 2.95 is no
27740 longer necessary. */
27741
27742 p = rtvec_alloc (9
27743 + 32 - info->first_gp_reg_save
27744 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27745 + 63 + 1 - info->first_fp_reg_save);
27746
27747 strcpy (rname, ((crtl->calls_eh_return) ?
27748 "*eh_rest_world_r10" : "*rest_world"));
27749 alloc_rname = ggc_strdup (rname);
27750
27751 j = 0;
27752 RTVEC_ELT (p, j++) = ret_rtx;
27753 RTVEC_ELT (p, j++)
27754 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27755 /* The instruction pattern requires a clobber here;
27756 it is shared with the restVEC helper. */
27757 RTVEC_ELT (p, j++)
27758 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27759
27760 {
27761 /* CR register traditionally saved as CR2. */
27762 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27763 RTVEC_ELT (p, j++)
27764 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27765 if (flag_shrink_wrap)
27766 {
27767 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27768 gen_rtx_REG (Pmode, LR_REGNO),
27769 cfa_restores);
27770 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27771 }
27772 }
27773
27774 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27775 {
27776 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27777 RTVEC_ELT (p, j++)
27778 = gen_frame_load (reg,
27779 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27780 if (flag_shrink_wrap
27781 && save_reg_p (info->first_gp_reg_save + i))
27782 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27783 }
27784 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27785 {
27786 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27787 RTVEC_ELT (p, j++)
27788 = gen_frame_load (reg,
27789 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27790 if (flag_shrink_wrap
27791 && save_reg_p (info->first_altivec_reg_save + i))
27792 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27793 }
27794 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27795 {
27796 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27797 info->first_fp_reg_save + i);
27798 RTVEC_ELT (p, j++)
27799 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27800 if (flag_shrink_wrap
27801 && save_reg_p (info->first_fp_reg_save + i))
27802 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27803 }
27804 RTVEC_ELT (p, j++)
27805 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27806 RTVEC_ELT (p, j++)
27807 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27808 RTVEC_ELT (p, j++)
27809 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27810 RTVEC_ELT (p, j++)
27811 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27812 RTVEC_ELT (p, j++)
27813 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27814 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27815
27816 if (flag_shrink_wrap)
27817 {
27818 REG_NOTES (insn) = cfa_restores;
27819 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27820 RTX_FRAME_RELATED_P (insn) = 1;
27821 }
27822 return;
27823 }
27824
27825 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27826 if (info->push_p)
27827 frame_off = info->total_size;
27828
27829 /* Restore AltiVec registers if we must do so before adjusting the
27830 stack. */
27831 if (info->altivec_size != 0
27832 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27833 || (DEFAULT_ABI != ABI_V4
27834 && offset_below_red_zone_p (info->altivec_save_offset))))
27835 {
27836 int i;
27837 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27838
27839 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27840 if (use_backchain_to_restore_sp)
27841 {
27842 int frame_regno = 11;
27843
27844 if ((strategy & REST_INLINE_VRS) == 0)
27845 {
27846 /* Of r11 and r12, select the one not clobbered by an
27847 out-of-line restore function for the frame register. */
27848 frame_regno = 11 + 12 - scratch_regno;
27849 }
27850 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27851 emit_move_insn (frame_reg_rtx,
27852 gen_rtx_MEM (Pmode, sp_reg_rtx));
27853 frame_off = 0;
27854 }
27855 else if (frame_pointer_needed)
27856 frame_reg_rtx = hard_frame_pointer_rtx;
27857
27858 if ((strategy & REST_INLINE_VRS) == 0)
27859 {
27860 int end_save = info->altivec_save_offset + info->altivec_size;
27861 int ptr_off;
27862 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27863 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27864
27865 if (end_save + frame_off != 0)
27866 {
27867 rtx offset = GEN_INT (end_save + frame_off);
27868
27869 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27870 }
27871 else
27872 emit_move_insn (ptr_reg, frame_reg_rtx);
27873
27874 ptr_off = -end_save;
27875 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27876 info->altivec_save_offset + ptr_off,
27877 0, V4SImode, SAVRES_VR);
27878 }
27879 else
27880 {
27881 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27882 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27883 {
27884 rtx addr, areg, mem, insn;
27885 rtx reg = gen_rtx_REG (V4SImode, i);
27886 HOST_WIDE_INT offset
27887 = (info->altivec_save_offset + frame_off
27888 + 16 * (i - info->first_altivec_reg_save));
27889
27890 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27891 {
27892 mem = gen_frame_mem (V4SImode,
27893 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27894 GEN_INT (offset)));
27895 insn = gen_rtx_SET (reg, mem);
27896 }
27897 else
27898 {
27899 areg = gen_rtx_REG (Pmode, 0);
27900 emit_move_insn (areg, GEN_INT (offset));
27901
27902 /* AltiVec addressing mode is [reg+reg]. */
27903 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27904 mem = gen_frame_mem (V4SImode, addr);
27905
27906 /* Rather than emitting a generic move, force use of the
27907 lvx instruction, which we always want. In particular we
27908 don't want lxvd2x/xxpermdi for little endian. */
27909 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27910 }
27911
27912 (void) emit_insn (insn);
27913 }
27914 }
27915
27916 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27917 if (((strategy & REST_INLINE_VRS) == 0
27918 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27919 && (flag_shrink_wrap
27920 || (offset_below_red_zone_p
27921 (info->altivec_save_offset
27922 + 16 * (i - info->first_altivec_reg_save))))
27923 && save_reg_p (i))
27924 {
27925 rtx reg = gen_rtx_REG (V4SImode, i);
27926 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27927 }
27928 }
27929
27930 /* Restore VRSAVE if we must do so before adjusting the stack. */
27931 if (info->vrsave_size != 0
27932 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27933 || (DEFAULT_ABI != ABI_V4
27934 && offset_below_red_zone_p (info->vrsave_save_offset))))
27935 {
27936 rtx reg;
27937
27938 if (frame_reg_rtx == sp_reg_rtx)
27939 {
27940 if (use_backchain_to_restore_sp)
27941 {
27942 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27943 emit_move_insn (frame_reg_rtx,
27944 gen_rtx_MEM (Pmode, sp_reg_rtx));
27945 frame_off = 0;
27946 }
27947 else if (frame_pointer_needed)
27948 frame_reg_rtx = hard_frame_pointer_rtx;
27949 }
27950
27951 reg = gen_rtx_REG (SImode, 12);
27952 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27953 info->vrsave_save_offset + frame_off));
27954
27955 emit_insn (generate_set_vrsave (reg, info, 1));
27956 }
27957
27958 insn = NULL_RTX;
27959 /* If we have a large stack frame, restore the old stack pointer
27960 using the backchain. */
27961 if (use_backchain_to_restore_sp)
27962 {
27963 if (frame_reg_rtx == sp_reg_rtx)
27964 {
27965 /* Under V.4, don't reset the stack pointer until after we're done
27966 loading the saved registers. */
27967 if (DEFAULT_ABI == ABI_V4)
27968 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27969
27970 insn = emit_move_insn (frame_reg_rtx,
27971 gen_rtx_MEM (Pmode, sp_reg_rtx));
27972 frame_off = 0;
27973 }
27974 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27975 && DEFAULT_ABI == ABI_V4)
27976 /* frame_reg_rtx has been set up by the altivec restore. */
27977 ;
27978 else
27979 {
27980 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
27981 frame_reg_rtx = sp_reg_rtx;
27982 }
27983 }
27984 /* If we have a frame pointer, we can restore the old stack pointer
27985 from it. */
27986 else if (frame_pointer_needed)
27987 {
27988 frame_reg_rtx = sp_reg_rtx;
27989 if (DEFAULT_ABI == ABI_V4)
27990 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27991 /* Prevent reordering memory accesses against stack pointer restore. */
27992 else if (cfun->calls_alloca
27993 || offset_below_red_zone_p (-info->total_size))
27994 rs6000_emit_stack_tie (frame_reg_rtx, true);
27995
27996 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
27997 GEN_INT (info->total_size)));
27998 frame_off = 0;
27999 }
28000 else if (info->push_p
28001 && DEFAULT_ABI != ABI_V4
28002 && !crtl->calls_eh_return)
28003 {
28004 /* Prevent reordering memory accesses against stack pointer restore. */
28005 if (cfun->calls_alloca
28006 || offset_below_red_zone_p (-info->total_size))
28007 rs6000_emit_stack_tie (frame_reg_rtx, false);
28008 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28009 GEN_INT (info->total_size)));
28010 frame_off = 0;
28011 }
28012 if (insn && frame_reg_rtx == sp_reg_rtx)
28013 {
28014 if (cfa_restores)
28015 {
28016 REG_NOTES (insn) = cfa_restores;
28017 cfa_restores = NULL_RTX;
28018 }
28019 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28020 RTX_FRAME_RELATED_P (insn) = 1;
28021 }
28022
28023 /* Restore AltiVec registers if we have not done so already. */
28024 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28025 && info->altivec_size != 0
28026 && (DEFAULT_ABI == ABI_V4
28027 || !offset_below_red_zone_p (info->altivec_save_offset)))
28028 {
28029 int i;
28030
28031 if ((strategy & REST_INLINE_VRS) == 0)
28032 {
28033 int end_save = info->altivec_save_offset + info->altivec_size;
28034 int ptr_off;
28035 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28036 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28037 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28038
28039 if (end_save + frame_off != 0)
28040 {
28041 rtx offset = GEN_INT (end_save + frame_off);
28042
28043 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28044 }
28045 else
28046 emit_move_insn (ptr_reg, frame_reg_rtx);
28047
28048 ptr_off = -end_save;
28049 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28050 info->altivec_save_offset + ptr_off,
28051 0, V4SImode, SAVRES_VR);
28052 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28053 {
28054 /* Frame reg was clobbered by out-of-line save. Restore it
28055 from ptr_reg, and if we are calling out-of-line gpr or
28056 fpr restore set up the correct pointer and offset. */
28057 unsigned newptr_regno = 1;
28058 if (!restoring_GPRs_inline)
28059 {
28060 bool lr = info->gp_save_offset + info->gp_size == 0;
28061 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28062 newptr_regno = ptr_regno_for_savres (sel);
28063 end_save = info->gp_save_offset + info->gp_size;
28064 }
28065 else if (!restoring_FPRs_inline)
28066 {
28067 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28068 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28069 newptr_regno = ptr_regno_for_savres (sel);
28070 end_save = info->fp_save_offset + info->fp_size;
28071 }
28072
28073 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28074 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28075
28076 if (end_save + ptr_off != 0)
28077 {
28078 rtx offset = GEN_INT (end_save + ptr_off);
28079
28080 frame_off = -end_save;
28081 if (TARGET_32BIT)
28082 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28083 ptr_reg, offset));
28084 else
28085 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28086 ptr_reg, offset));
28087 }
28088 else
28089 {
28090 frame_off = ptr_off;
28091 emit_move_insn (frame_reg_rtx, ptr_reg);
28092 }
28093 }
28094 }
28095 else
28096 {
28097 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28098 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28099 {
28100 rtx addr, areg, mem, insn;
28101 rtx reg = gen_rtx_REG (V4SImode, i);
28102 HOST_WIDE_INT offset
28103 = (info->altivec_save_offset + frame_off
28104 + 16 * (i - info->first_altivec_reg_save));
28105
28106 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28107 {
28108 mem = gen_frame_mem (V4SImode,
28109 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28110 GEN_INT (offset)));
28111 insn = gen_rtx_SET (reg, mem);
28112 }
28113 else
28114 {
28115 areg = gen_rtx_REG (Pmode, 0);
28116 emit_move_insn (areg, GEN_INT (offset));
28117
28118 /* AltiVec addressing mode is [reg+reg]. */
28119 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28120 mem = gen_frame_mem (V4SImode, addr);
28121
28122 /* Rather than emitting a generic move, force use of the
28123 lvx instruction, which we always want. In particular we
28124 don't want lxvd2x/xxpermdi for little endian. */
28125 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28126 }
28127
28128 (void) emit_insn (insn);
28129 }
28130 }
28131
28132 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28133 if (((strategy & REST_INLINE_VRS) == 0
28134 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28135 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28136 && save_reg_p (i))
28137 {
28138 rtx reg = gen_rtx_REG (V4SImode, i);
28139 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28140 }
28141 }
28142
28143 /* Restore VRSAVE if we have not done so already. */
28144 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28145 && info->vrsave_size != 0
28146 && (DEFAULT_ABI == ABI_V4
28147 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28148 {
28149 rtx reg;
28150
28151 reg = gen_rtx_REG (SImode, 12);
28152 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28153 info->vrsave_save_offset + frame_off));
28154
28155 emit_insn (generate_set_vrsave (reg, info, 1));
28156 }
28157
28158 /* If we exit by an out-of-line restore function on ABI_V4 then that
28159 function will deallocate the stack, so we don't need to worry
28160 about the unwinder restoring cr from an invalid stack frame
28161 location. */
28162 exit_func = (!restoring_FPRs_inline
28163 || (!restoring_GPRs_inline
28164 && info->first_fp_reg_save == 64));
28165
28166 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28167 *separate* slots if the routine calls __builtin_eh_return, so
28168 that they can be independently restored by the unwinder. */
28169 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28170 {
28171 int i, cr_off = info->ehcr_offset;
28172
28173 for (i = 0; i < 8; i++)
28174 if (!call_used_regs[CR0_REGNO + i])
28175 {
28176 rtx reg = gen_rtx_REG (SImode, 0);
28177 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28178 cr_off + frame_off));
28179
28180 insn = emit_insn (gen_movsi_to_cr_one
28181 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28182
28183 if (!exit_func && flag_shrink_wrap)
28184 {
28185 add_reg_note (insn, REG_CFA_RESTORE,
28186 gen_rtx_REG (SImode, CR0_REGNO + i));
28187
28188 RTX_FRAME_RELATED_P (insn) = 1;
28189 }
28190
28191 cr_off += reg_size;
28192 }
28193 }
28194
28195 /* Get the old lr if we saved it. If we are restoring registers
28196 out-of-line, then the out-of-line routines can do this for us. */
28197 if (restore_lr && restoring_GPRs_inline)
28198 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28199
28200 /* Get the old cr if we saved it. */
28201 if (info->cr_save_p)
28202 {
28203 unsigned cr_save_regno = 12;
28204
28205 if (!restoring_GPRs_inline)
28206 {
28207 /* Ensure we don't use the register used by the out-of-line
28208 gpr register restore below. */
28209 bool lr = info->gp_save_offset + info->gp_size == 0;
28210 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28211 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28212
28213 if (gpr_ptr_regno == 12)
28214 cr_save_regno = 11;
28215 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28216 }
28217 else if (REGNO (frame_reg_rtx) == 12)
28218 cr_save_regno = 11;
28219
28220 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28221 info->cr_save_offset + frame_off,
28222 exit_func);
28223 }
28224
28225 /* Set LR here to try to overlap restores below. */
28226 if (restore_lr && restoring_GPRs_inline)
28227 restore_saved_lr (0, exit_func);
28228
28229 /* Load exception handler data registers, if needed. */
28230 if (crtl->calls_eh_return)
28231 {
28232 unsigned int i, regno;
28233
28234 if (TARGET_AIX)
28235 {
28236 rtx reg = gen_rtx_REG (reg_mode, 2);
28237 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28238 frame_off + RS6000_TOC_SAVE_SLOT));
28239 }
28240
28241 for (i = 0; ; ++i)
28242 {
28243 rtx mem;
28244
28245 regno = EH_RETURN_DATA_REGNO (i);
28246 if (regno == INVALID_REGNUM)
28247 break;
28248
28249 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28250 info->ehrd_offset + frame_off
28251 + reg_size * (int) i);
28252
28253 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28254 }
28255 }
28256
28257 /* Restore GPRs. This is done as a PARALLEL if we are using
28258 the load-multiple instructions. */
28259 if (!restoring_GPRs_inline)
28260 {
28261 /* We are jumping to an out-of-line function. */
28262 rtx ptr_reg;
28263 int end_save = info->gp_save_offset + info->gp_size;
28264 bool can_use_exit = end_save == 0;
28265 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28266 int ptr_off;
28267
28268 /* Emit stack reset code if we need it. */
28269 ptr_regno = ptr_regno_for_savres (sel);
28270 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28271 if (can_use_exit)
28272 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28273 else if (end_save + frame_off != 0)
28274 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28275 GEN_INT (end_save + frame_off)));
28276 else if (REGNO (frame_reg_rtx) != ptr_regno)
28277 emit_move_insn (ptr_reg, frame_reg_rtx);
28278 if (REGNO (frame_reg_rtx) == ptr_regno)
28279 frame_off = -end_save;
28280
28281 if (can_use_exit && info->cr_save_p)
28282 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28283
28284 ptr_off = -end_save;
28285 rs6000_emit_savres_rtx (info, ptr_reg,
28286 info->gp_save_offset + ptr_off,
28287 info->lr_save_offset + ptr_off,
28288 reg_mode, sel);
28289 }
28290 else if (using_load_multiple)
28291 {
28292 rtvec p;
28293 p = rtvec_alloc (32 - info->first_gp_reg_save);
28294 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28295 RTVEC_ELT (p, i)
28296 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28297 frame_reg_rtx,
28298 info->gp_save_offset + frame_off + reg_size * i);
28299 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28300 }
28301 else
28302 {
28303 int offset = info->gp_save_offset + frame_off;
28304 for (i = info->first_gp_reg_save; i < 32; i++)
28305 {
28306 if (save_reg_p (i)
28307 && !cfun->machine->gpr_is_wrapped_separately[i])
28308 {
28309 rtx reg = gen_rtx_REG (reg_mode, i);
28310 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28311 }
28312
28313 offset += reg_size;
28314 }
28315 }
28316
28317 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28318 {
28319 /* If the frame pointer was used then we can't delay emitting
28320 a REG_CFA_DEF_CFA note. This must happen on the insn that
28321 restores the frame pointer, r31. We may have already emitted
28322 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28323 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28324 be harmless if emitted. */
28325 if (frame_pointer_needed)
28326 {
28327 insn = get_last_insn ();
28328 add_reg_note (insn, REG_CFA_DEF_CFA,
28329 plus_constant (Pmode, frame_reg_rtx, frame_off));
28330 RTX_FRAME_RELATED_P (insn) = 1;
28331 }
28332
28333 /* Set up cfa_restores. We always need these when
28334 shrink-wrapping. If not shrink-wrapping then we only need
28335 the cfa_restore when the stack location is no longer valid.
28336 The cfa_restores must be emitted on or before the insn that
28337 invalidates the stack, and of course must not be emitted
28338 before the insn that actually does the restore. The latter
28339 is why it is a bad idea to emit the cfa_restores as a group
28340 on the last instruction here that actually does a restore:
28341 That insn may be reordered with respect to others doing
28342 restores. */
28343 if (flag_shrink_wrap
28344 && !restoring_GPRs_inline
28345 && info->first_fp_reg_save == 64)
28346 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28347
28348 for (i = info->first_gp_reg_save; i < 32; i++)
28349 if (save_reg_p (i)
28350 && !cfun->machine->gpr_is_wrapped_separately[i])
28351 {
28352 rtx reg = gen_rtx_REG (reg_mode, i);
28353 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28354 }
28355 }
28356
28357 if (!restoring_GPRs_inline
28358 && info->first_fp_reg_save == 64)
28359 {
28360 /* We are jumping to an out-of-line function. */
28361 if (cfa_restores)
28362 emit_cfa_restores (cfa_restores);
28363 return;
28364 }
28365
28366 if (restore_lr && !restoring_GPRs_inline)
28367 {
28368 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28369 restore_saved_lr (0, exit_func);
28370 }
28371
28372 /* Restore fpr's if we need to do it without calling a function. */
28373 if (restoring_FPRs_inline)
28374 {
28375 int offset = info->fp_save_offset + frame_off;
28376 for (i = info->first_fp_reg_save; i < 64; i++)
28377 {
28378 if (save_reg_p (i)
28379 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28380 {
28381 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28382 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28383 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28384 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28385 cfa_restores);
28386 }
28387
28388 offset += fp_reg_size;
28389 }
28390 }
28391
28392 /* If we saved cr, restore it here. Just those that were used. */
28393 if (info->cr_save_p)
28394 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28395
28396 /* If this is V.4, unwind the stack pointer after all of the loads
28397 have been done, or set up r11 if we are restoring fp out of line. */
28398 ptr_regno = 1;
28399 if (!restoring_FPRs_inline)
28400 {
28401 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28402 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28403 ptr_regno = ptr_regno_for_savres (sel);
28404 }
28405
28406 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28407 if (REGNO (frame_reg_rtx) == ptr_regno)
28408 frame_off = 0;
28409
28410 if (insn && restoring_FPRs_inline)
28411 {
28412 if (cfa_restores)
28413 {
28414 REG_NOTES (insn) = cfa_restores;
28415 cfa_restores = NULL_RTX;
28416 }
28417 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28418 RTX_FRAME_RELATED_P (insn) = 1;
28419 }
28420
28421 if (crtl->calls_eh_return)
28422 {
28423 rtx sa = EH_RETURN_STACKADJ_RTX;
28424 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28425 }
28426
28427 if (!sibcall && restoring_FPRs_inline)
28428 {
28429 if (cfa_restores)
28430 {
28431 /* We can't hang the cfa_restores off a simple return,
28432 since the shrink-wrap code sometimes uses an existing
28433 return. This means there might be a path from
28434 pre-prologue code to this return, and dwarf2cfi code
28435 wants the eh_frame unwinder state to be the same on
28436 all paths to any point. So we need to emit the
28437 cfa_restores before the return. For -m64 we really
28438 don't need epilogue cfa_restores at all, except for
28439 this irritating dwarf2cfi with shrink-wrap
28440 requirement; The stack red-zone means eh_frame info
28441 from the prologue telling the unwinder to restore
28442 from the stack is perfectly good right to the end of
28443 the function. */
28444 emit_insn (gen_blockage ());
28445 emit_cfa_restores (cfa_restores);
28446 cfa_restores = NULL_RTX;
28447 }
28448
28449 emit_jump_insn (targetm.gen_simple_return ());
28450 }
28451
28452 if (!sibcall && !restoring_FPRs_inline)
28453 {
28454 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28455 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28456 int elt = 0;
28457 RTVEC_ELT (p, elt++) = ret_rtx;
28458 if (lr)
28459 RTVEC_ELT (p, elt++)
28460 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28461
28462 /* We have to restore more than two FP registers, so branch to the
28463 restore function. It will return to our caller. */
28464 int i;
28465 int reg;
28466 rtx sym;
28467
28468 if (flag_shrink_wrap)
28469 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28470
28471 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28472 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28473 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28474 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28475
28476 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28477 {
28478 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28479
28480 RTVEC_ELT (p, elt++)
28481 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28482 if (flag_shrink_wrap
28483 && save_reg_p (info->first_fp_reg_save + i))
28484 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28485 }
28486
28487 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28488 }
28489
28490 if (cfa_restores)
28491 {
28492 if (sibcall)
28493 /* Ensure the cfa_restores are hung off an insn that won't
28494 be reordered above other restores. */
28495 emit_insn (gen_blockage ());
28496
28497 emit_cfa_restores (cfa_restores);
28498 }
28499 }
28500
28501 /* Write function epilogue. */
28502
28503 static void
28504 rs6000_output_function_epilogue (FILE *file)
28505 {
28506 #if TARGET_MACHO
28507 macho_branch_islands ();
28508
28509 {
28510 rtx_insn *insn = get_last_insn ();
28511 rtx_insn *deleted_debug_label = NULL;
28512
28513 /* Mach-O doesn't support labels at the end of objects, so if
28514 it looks like we might want one, take special action.
28515
28516 First, collect any sequence of deleted debug labels. */
28517 while (insn
28518 && NOTE_P (insn)
28519 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28520 {
28521 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28522 notes only, instead set their CODE_LABEL_NUMBER to -1,
28523 otherwise there would be code generation differences
28524 in between -g and -g0. */
28525 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28526 deleted_debug_label = insn;
28527 insn = PREV_INSN (insn);
28528 }
28529
28530 /* Second, if we have:
28531 label:
28532 barrier
28533 then this needs to be detected, so skip past the barrier. */
28534
28535 if (insn && BARRIER_P (insn))
28536 insn = PREV_INSN (insn);
28537
28538 /* Up to now we've only seen notes or barriers. */
28539 if (insn)
28540 {
28541 if (LABEL_P (insn)
28542 || (NOTE_P (insn)
28543 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28544 /* Trailing label: <barrier>. */
28545 fputs ("\tnop\n", file);
28546 else
28547 {
28548 /* Lastly, see if we have a completely empty function body. */
28549 while (insn && ! INSN_P (insn))
28550 insn = PREV_INSN (insn);
28551 /* If we don't find any insns, we've got an empty function body;
28552 I.e. completely empty - without a return or branch. This is
28553 taken as the case where a function body has been removed
28554 because it contains an inline __builtin_unreachable(). GCC
28555 states that reaching __builtin_unreachable() means UB so we're
28556 not obliged to do anything special; however, we want
28557 non-zero-sized function bodies. To meet this, and help the
28558 user out, let's trap the case. */
28559 if (insn == NULL)
28560 fputs ("\ttrap\n", file);
28561 }
28562 }
28563 else if (deleted_debug_label)
28564 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28565 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28566 CODE_LABEL_NUMBER (insn) = -1;
28567 }
28568 #endif
28569
28570 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28571 on its format.
28572
28573 We don't output a traceback table if -finhibit-size-directive was
28574 used. The documentation for -finhibit-size-directive reads
28575 ``don't output a @code{.size} assembler directive, or anything
28576 else that would cause trouble if the function is split in the
28577 middle, and the two halves are placed at locations far apart in
28578 memory.'' The traceback table has this property, since it
28579 includes the offset from the start of the function to the
28580 traceback table itself.
28581
28582 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28583 different traceback table. */
28584 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28585 && ! flag_inhibit_size_directive
28586 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28587 {
28588 const char *fname = NULL;
28589 const char *language_string = lang_hooks.name;
28590 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28591 int i;
28592 int optional_tbtab;
28593 rs6000_stack_t *info = rs6000_stack_info ();
28594
28595 if (rs6000_traceback == traceback_full)
28596 optional_tbtab = 1;
28597 else if (rs6000_traceback == traceback_part)
28598 optional_tbtab = 0;
28599 else
28600 optional_tbtab = !optimize_size && !TARGET_ELF;
28601
28602 if (optional_tbtab)
28603 {
28604 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28605 while (*fname == '.') /* V.4 encodes . in the name */
28606 fname++;
28607
28608 /* Need label immediately before tbtab, so we can compute
28609 its offset from the function start. */
28610 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28611 ASM_OUTPUT_LABEL (file, fname);
28612 }
28613
28614 /* The .tbtab pseudo-op can only be used for the first eight
28615 expressions, since it can't handle the possibly variable
28616 length fields that follow. However, if you omit the optional
28617 fields, the assembler outputs zeros for all optional fields
28618 anyways, giving each variable length field is minimum length
28619 (as defined in sys/debug.h). Thus we can not use the .tbtab
28620 pseudo-op at all. */
28621
28622 /* An all-zero word flags the start of the tbtab, for debuggers
28623 that have to find it by searching forward from the entry
28624 point or from the current pc. */
28625 fputs ("\t.long 0\n", file);
28626
28627 /* Tbtab format type. Use format type 0. */
28628 fputs ("\t.byte 0,", file);
28629
28630 /* Language type. Unfortunately, there does not seem to be any
28631 official way to discover the language being compiled, so we
28632 use language_string.
28633 C is 0. Fortran is 1. Ada is 3. C++ is 9.
28634 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28635 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
28636 numbers either, so for now use 0. */
28637 if (lang_GNU_C ()
28638 || ! strcmp (language_string, "GNU GIMPLE")
28639 || ! strcmp (language_string, "GNU Go")
28640 || ! strcmp (language_string, "GNU D")
28641 || ! strcmp (language_string, "libgccjit"))
28642 i = 0;
28643 else if (! strcmp (language_string, "GNU F77")
28644 || lang_GNU_Fortran ())
28645 i = 1;
28646 else if (! strcmp (language_string, "GNU Ada"))
28647 i = 3;
28648 else if (lang_GNU_CXX ()
28649 || ! strcmp (language_string, "GNU Objective-C++"))
28650 i = 9;
28651 else if (! strcmp (language_string, "GNU Java"))
28652 i = 13;
28653 else if (! strcmp (language_string, "GNU Objective-C"))
28654 i = 14;
28655 else
28656 gcc_unreachable ();
28657 fprintf (file, "%d,", i);
28658
28659 /* 8 single bit fields: global linkage (not set for C extern linkage,
28660 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28661 from start of procedure stored in tbtab, internal function, function
28662 has controlled storage, function has no toc, function uses fp,
28663 function logs/aborts fp operations. */
28664 /* Assume that fp operations are used if any fp reg must be saved. */
28665 fprintf (file, "%d,",
28666 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28667
28668 /* 6 bitfields: function is interrupt handler, name present in
28669 proc table, function calls alloca, on condition directives
28670 (controls stack walks, 3 bits), saves condition reg, saves
28671 link reg. */
28672 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28673 set up as a frame pointer, even when there is no alloca call. */
28674 fprintf (file, "%d,",
28675 ((optional_tbtab << 6)
28676 | ((optional_tbtab & frame_pointer_needed) << 5)
28677 | (info->cr_save_p << 1)
28678 | (info->lr_save_p)));
28679
28680 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28681 (6 bits). */
28682 fprintf (file, "%d,",
28683 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28684
28685 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28686 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28687
28688 if (optional_tbtab)
28689 {
28690 /* Compute the parameter info from the function decl argument
28691 list. */
28692 tree decl;
28693 int next_parm_info_bit = 31;
28694
28695 for (decl = DECL_ARGUMENTS (current_function_decl);
28696 decl; decl = DECL_CHAIN (decl))
28697 {
28698 rtx parameter = DECL_INCOMING_RTL (decl);
28699 machine_mode mode = GET_MODE (parameter);
28700
28701 if (GET_CODE (parameter) == REG)
28702 {
28703 if (SCALAR_FLOAT_MODE_P (mode))
28704 {
28705 int bits;
28706
28707 float_parms++;
28708
28709 switch (mode)
28710 {
28711 case E_SFmode:
28712 case E_SDmode:
28713 bits = 0x2;
28714 break;
28715
28716 case E_DFmode:
28717 case E_DDmode:
28718 case E_TFmode:
28719 case E_TDmode:
28720 case E_IFmode:
28721 case E_KFmode:
28722 bits = 0x3;
28723 break;
28724
28725 default:
28726 gcc_unreachable ();
28727 }
28728
28729 /* If only one bit will fit, don't or in this entry. */
28730 if (next_parm_info_bit > 0)
28731 parm_info |= (bits << (next_parm_info_bit - 1));
28732 next_parm_info_bit -= 2;
28733 }
28734 else
28735 {
28736 fixed_parms += ((GET_MODE_SIZE (mode)
28737 + (UNITS_PER_WORD - 1))
28738 / UNITS_PER_WORD);
28739 next_parm_info_bit -= 1;
28740 }
28741 }
28742 }
28743 }
28744
28745 /* Number of fixed point parameters. */
28746 /* This is actually the number of words of fixed point parameters; thus
28747 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28748 fprintf (file, "%d,", fixed_parms);
28749
28750 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28751 all on stack. */
28752 /* This is actually the number of fp registers that hold parameters;
28753 and thus the maximum value is 13. */
28754 /* Set parameters on stack bit if parameters are not in their original
28755 registers, regardless of whether they are on the stack? Xlc
28756 seems to set the bit when not optimizing. */
28757 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28758
28759 if (optional_tbtab)
28760 {
28761 /* Optional fields follow. Some are variable length. */
28762
28763 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28764 float, 11 double float. */
28765 /* There is an entry for each parameter in a register, in the order
28766 that they occur in the parameter list. Any intervening arguments
28767 on the stack are ignored. If the list overflows a long (max
28768 possible length 34 bits) then completely leave off all elements
28769 that don't fit. */
28770 /* Only emit this long if there was at least one parameter. */
28771 if (fixed_parms || float_parms)
28772 fprintf (file, "\t.long %d\n", parm_info);
28773
28774 /* Offset from start of code to tb table. */
28775 fputs ("\t.long ", file);
28776 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28777 RS6000_OUTPUT_BASENAME (file, fname);
28778 putc ('-', file);
28779 rs6000_output_function_entry (file, fname);
28780 putc ('\n', file);
28781
28782 /* Interrupt handler mask. */
28783 /* Omit this long, since we never set the interrupt handler bit
28784 above. */
28785
28786 /* Number of CTL (controlled storage) anchors. */
28787 /* Omit this long, since the has_ctl bit is never set above. */
28788
28789 /* Displacement into stack of each CTL anchor. */
28790 /* Omit this list of longs, because there are no CTL anchors. */
28791
28792 /* Length of function name. */
28793 if (*fname == '*')
28794 ++fname;
28795 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28796
28797 /* Function name. */
28798 assemble_string (fname, strlen (fname));
28799
28800 /* Register for alloca automatic storage; this is always reg 31.
28801 Only emit this if the alloca bit was set above. */
28802 if (frame_pointer_needed)
28803 fputs ("\t.byte 31\n", file);
28804
28805 fputs ("\t.align 2\n", file);
28806 }
28807 }
28808
28809 /* Arrange to define .LCTOC1 label, if not already done. */
28810 if (need_toc_init)
28811 {
28812 need_toc_init = 0;
28813 if (!toc_initialized)
28814 {
28815 switch_to_section (toc_section);
28816 switch_to_section (current_function_section ());
28817 }
28818 }
28819 }
28820
28821 /* -fsplit-stack support. */
28822
28823 /* A SYMBOL_REF for __morestack. */
28824 static GTY(()) rtx morestack_ref;
28825
28826 static rtx
28827 gen_add3_const (rtx rt, rtx ra, long c)
28828 {
28829 if (TARGET_64BIT)
28830 return gen_adddi3 (rt, ra, GEN_INT (c));
28831 else
28832 return gen_addsi3 (rt, ra, GEN_INT (c));
28833 }
28834
28835 /* Emit -fsplit-stack prologue, which goes before the regular function
28836 prologue (at local entry point in the case of ELFv2). */
28837
28838 void
28839 rs6000_expand_split_stack_prologue (void)
28840 {
28841 rs6000_stack_t *info = rs6000_stack_info ();
28842 unsigned HOST_WIDE_INT allocate;
28843 long alloc_hi, alloc_lo;
28844 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28845 rtx_insn *insn;
28846
28847 gcc_assert (flag_split_stack && reload_completed);
28848
28849 if (!info->push_p)
28850 return;
28851
28852 if (global_regs[29])
28853 {
28854 error ("%qs uses register r29", "-fsplit-stack");
28855 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
28856 "conflicts with %qD", global_regs_decl[29]);
28857 }
28858
28859 allocate = info->total_size;
28860 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
28861 {
28862 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28863 return;
28864 }
28865 if (morestack_ref == NULL_RTX)
28866 {
28867 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
28868 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
28869 | SYMBOL_FLAG_FUNCTION);
28870 }
28871
28872 r0 = gen_rtx_REG (Pmode, 0);
28873 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28874 r12 = gen_rtx_REG (Pmode, 12);
28875 emit_insn (gen_load_split_stack_limit (r0));
28876 /* Always emit two insns here to calculate the requested stack,
28877 so that the linker can edit them when adjusting size for calling
28878 non-split-stack code. */
28879 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
28880 alloc_lo = -allocate - alloc_hi;
28881 if (alloc_hi != 0)
28882 {
28883 emit_insn (gen_add3_const (r12, r1, alloc_hi));
28884 if (alloc_lo != 0)
28885 emit_insn (gen_add3_const (r12, r12, alloc_lo));
28886 else
28887 emit_insn (gen_nop ());
28888 }
28889 else
28890 {
28891 emit_insn (gen_add3_const (r12, r1, alloc_lo));
28892 emit_insn (gen_nop ());
28893 }
28894
28895 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
28896 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
28897 ok_label = gen_label_rtx ();
28898 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28899 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
28900 gen_rtx_LABEL_REF (VOIDmode, ok_label),
28901 pc_rtx);
28902 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28903 JUMP_LABEL (insn) = ok_label;
28904 /* Mark the jump as very likely to be taken. */
28905 add_reg_br_prob_note (insn, profile_probability::very_likely ());
28906
28907 lr = gen_rtx_REG (Pmode, LR_REGNO);
28908 insn = emit_move_insn (r0, lr);
28909 RTX_FRAME_RELATED_P (insn) = 1;
28910 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
28911 RTX_FRAME_RELATED_P (insn) = 1;
28912
28913 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
28914 const0_rtx, const0_rtx));
28915 call_fusage = NULL_RTX;
28916 use_reg (&call_fusage, r12);
28917 /* Say the call uses r0, even though it doesn't, to stop regrename
28918 from twiddling with the insns saving lr, trashing args for cfun.
28919 The insns restoring lr are similarly protected by making
28920 split_stack_return use r0. */
28921 use_reg (&call_fusage, r0);
28922 add_function_usage_to (insn, call_fusage);
28923 /* Indicate that this function can't jump to non-local gotos. */
28924 make_reg_eh_region_note_nothrow_nononlocal (insn);
28925 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
28926 insn = emit_move_insn (lr, r0);
28927 add_reg_note (insn, REG_CFA_RESTORE, lr);
28928 RTX_FRAME_RELATED_P (insn) = 1;
28929 emit_insn (gen_split_stack_return ());
28930
28931 emit_label (ok_label);
28932 LABEL_NUSES (ok_label) = 1;
28933 }
28934
28935 /* Return the internal arg pointer used for function incoming
28936 arguments. When -fsplit-stack, the arg pointer is r12 so we need
28937 to copy it to a pseudo in order for it to be preserved over calls
28938 and suchlike. We'd really like to use a pseudo here for the
28939 internal arg pointer but data-flow analysis is not prepared to
28940 accept pseudos as live at the beginning of a function. */
28941
28942 static rtx
28943 rs6000_internal_arg_pointer (void)
28944 {
28945 if (flag_split_stack
28946 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
28947 == NULL))
28948
28949 {
28950 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
28951 {
28952 rtx pat;
28953
28954 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
28955 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
28956
28957 /* Put the pseudo initialization right after the note at the
28958 beginning of the function. */
28959 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
28960 gen_rtx_REG (Pmode, 12));
28961 push_topmost_sequence ();
28962 emit_insn_after (pat, get_insns ());
28963 pop_topmost_sequence ();
28964 }
28965 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
28966 FIRST_PARM_OFFSET (current_function_decl));
28967 return copy_to_reg (ret);
28968 }
28969 return virtual_incoming_args_rtx;
28970 }
28971
28972 /* We may have to tell the dataflow pass that the split stack prologue
28973 is initializing a register. */
28974
28975 static void
28976 rs6000_live_on_entry (bitmap regs)
28977 {
28978 if (flag_split_stack)
28979 bitmap_set_bit (regs, 12);
28980 }
28981
28982 /* Emit -fsplit-stack dynamic stack allocation space check. */
28983
28984 void
28985 rs6000_split_stack_space_check (rtx size, rtx label)
28986 {
28987 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28988 rtx limit = gen_reg_rtx (Pmode);
28989 rtx requested = gen_reg_rtx (Pmode);
28990 rtx cmp = gen_reg_rtx (CCUNSmode);
28991 rtx jump;
28992
28993 emit_insn (gen_load_split_stack_limit (limit));
28994 if (CONST_INT_P (size))
28995 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
28996 else
28997 {
28998 size = force_reg (Pmode, size);
28999 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29000 }
29001 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29002 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29003 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29004 gen_rtx_LABEL_REF (VOIDmode, label),
29005 pc_rtx);
29006 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29007 JUMP_LABEL (jump) = label;
29008 }
29009 \f
29010 /* A C compound statement that outputs the assembler code for a thunk
29011 function, used to implement C++ virtual function calls with
29012 multiple inheritance. The thunk acts as a wrapper around a virtual
29013 function, adjusting the implicit object parameter before handing
29014 control off to the real function.
29015
29016 First, emit code to add the integer DELTA to the location that
29017 contains the incoming first argument. Assume that this argument
29018 contains a pointer, and is the one used to pass the `this' pointer
29019 in C++. This is the incoming argument *before* the function
29020 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29021 values of all other incoming arguments.
29022
29023 After the addition, emit code to jump to FUNCTION, which is a
29024 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29025 not touch the return address. Hence returning from FUNCTION will
29026 return to whoever called the current `thunk'.
29027
29028 The effect must be as if FUNCTION had been called directly with the
29029 adjusted first argument. This macro is responsible for emitting
29030 all of the code for a thunk function; output_function_prologue()
29031 and output_function_epilogue() are not invoked.
29032
29033 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29034 been extracted from it.) It might possibly be useful on some
29035 targets, but probably not.
29036
29037 If you do not define this macro, the target-independent code in the
29038 C++ frontend will generate a less efficient heavyweight thunk that
29039 calls FUNCTION instead of jumping to it. The generic approach does
29040 not support varargs. */
29041
29042 static void
29043 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29044 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29045 tree function)
29046 {
29047 rtx this_rtx, funexp;
29048 rtx_insn *insn;
29049
29050 reload_completed = 1;
29051 epilogue_completed = 1;
29052
29053 /* Mark the end of the (empty) prologue. */
29054 emit_note (NOTE_INSN_PROLOGUE_END);
29055
29056 /* Find the "this" pointer. If the function returns a structure,
29057 the structure return pointer is in r3. */
29058 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29059 this_rtx = gen_rtx_REG (Pmode, 4);
29060 else
29061 this_rtx = gen_rtx_REG (Pmode, 3);
29062
29063 /* Apply the constant offset, if required. */
29064 if (delta)
29065 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29066
29067 /* Apply the offset from the vtable, if required. */
29068 if (vcall_offset)
29069 {
29070 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29071 rtx tmp = gen_rtx_REG (Pmode, 12);
29072
29073 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29074 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29075 {
29076 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29077 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29078 }
29079 else
29080 {
29081 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29082
29083 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29084 }
29085 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29086 }
29087
29088 /* Generate a tail call to the target function. */
29089 if (!TREE_USED (function))
29090 {
29091 assemble_external (function);
29092 TREE_USED (function) = 1;
29093 }
29094 funexp = XEXP (DECL_RTL (function), 0);
29095 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29096
29097 #if TARGET_MACHO
29098 if (MACHOPIC_INDIRECT)
29099 funexp = machopic_indirect_call_target (funexp);
29100 #endif
29101
29102 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29103 generate sibcall RTL explicitly. */
29104 insn = emit_call_insn (
29105 gen_rtx_PARALLEL (VOIDmode,
29106 gen_rtvec (3,
29107 gen_rtx_CALL (VOIDmode,
29108 funexp, const0_rtx),
29109 gen_rtx_USE (VOIDmode, const0_rtx),
29110 simple_return_rtx)));
29111 SIBLING_CALL_P (insn) = 1;
29112 emit_barrier ();
29113
29114 /* Run just enough of rest_of_compilation to get the insns emitted.
29115 There's not really enough bulk here to make other passes such as
29116 instruction scheduling worth while. Note that use_thunk calls
29117 assemble_start_function and assemble_end_function. */
29118 insn = get_insns ();
29119 shorten_branches (insn);
29120 final_start_function (insn, file, 1);
29121 final (insn, file, 1);
29122 final_end_function ();
29123
29124 reload_completed = 0;
29125 epilogue_completed = 0;
29126 }
29127 \f
29128 /* A quick summary of the various types of 'constant-pool tables'
29129 under PowerPC:
29130
29131 Target Flags Name One table per
29132 AIX (none) AIX TOC object file
29133 AIX -mfull-toc AIX TOC object file
29134 AIX -mminimal-toc AIX minimal TOC translation unit
29135 SVR4/EABI (none) SVR4 SDATA object file
29136 SVR4/EABI -fpic SVR4 pic object file
29137 SVR4/EABI -fPIC SVR4 PIC translation unit
29138 SVR4/EABI -mrelocatable EABI TOC function
29139 SVR4/EABI -maix AIX TOC object file
29140 SVR4/EABI -maix -mminimal-toc
29141 AIX minimal TOC translation unit
29142
29143 Name Reg. Set by entries contains:
29144 made by addrs? fp? sum?
29145
29146 AIX TOC 2 crt0 as Y option option
29147 AIX minimal TOC 30 prolog gcc Y Y option
29148 SVR4 SDATA 13 crt0 gcc N Y N
29149 SVR4 pic 30 prolog ld Y not yet N
29150 SVR4 PIC 30 prolog gcc Y option option
29151 EABI TOC 30 prolog gcc Y option option
29152
29153 */
29154
29155 /* Hash functions for the hash table. */
29156
29157 static unsigned
29158 rs6000_hash_constant (rtx k)
29159 {
29160 enum rtx_code code = GET_CODE (k);
29161 machine_mode mode = GET_MODE (k);
29162 unsigned result = (code << 3) ^ mode;
29163 const char *format;
29164 int flen, fidx;
29165
29166 format = GET_RTX_FORMAT (code);
29167 flen = strlen (format);
29168 fidx = 0;
29169
29170 switch (code)
29171 {
29172 case LABEL_REF:
29173 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29174
29175 case CONST_WIDE_INT:
29176 {
29177 int i;
29178 flen = CONST_WIDE_INT_NUNITS (k);
29179 for (i = 0; i < flen; i++)
29180 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29181 return result;
29182 }
29183
29184 case CONST_DOUBLE:
29185 if (mode != VOIDmode)
29186 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29187 flen = 2;
29188 break;
29189
29190 case CODE_LABEL:
29191 fidx = 3;
29192 break;
29193
29194 default:
29195 break;
29196 }
29197
29198 for (; fidx < flen; fidx++)
29199 switch (format[fidx])
29200 {
29201 case 's':
29202 {
29203 unsigned i, len;
29204 const char *str = XSTR (k, fidx);
29205 len = strlen (str);
29206 result = result * 613 + len;
29207 for (i = 0; i < len; i++)
29208 result = result * 613 + (unsigned) str[i];
29209 break;
29210 }
29211 case 'u':
29212 case 'e':
29213 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29214 break;
29215 case 'i':
29216 case 'n':
29217 result = result * 613 + (unsigned) XINT (k, fidx);
29218 break;
29219 case 'w':
29220 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29221 result = result * 613 + (unsigned) XWINT (k, fidx);
29222 else
29223 {
29224 size_t i;
29225 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29226 result = result * 613 + (unsigned) (XWINT (k, fidx)
29227 >> CHAR_BIT * i);
29228 }
29229 break;
29230 case '0':
29231 break;
29232 default:
29233 gcc_unreachable ();
29234 }
29235
29236 return result;
29237 }
29238
29239 hashval_t
29240 toc_hasher::hash (toc_hash_struct *thc)
29241 {
29242 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29243 }
29244
29245 /* Compare H1 and H2 for equivalence. */
29246
29247 bool
29248 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29249 {
29250 rtx r1 = h1->key;
29251 rtx r2 = h2->key;
29252
29253 if (h1->key_mode != h2->key_mode)
29254 return 0;
29255
29256 return rtx_equal_p (r1, r2);
29257 }
29258
29259 /* These are the names given by the C++ front-end to vtables, and
29260 vtable-like objects. Ideally, this logic should not be here;
29261 instead, there should be some programmatic way of inquiring as
29262 to whether or not an object is a vtable. */
29263
29264 #define VTABLE_NAME_P(NAME) \
29265 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29266 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29267 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29268 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29269 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29270
29271 #ifdef NO_DOLLAR_IN_LABEL
29272 /* Return a GGC-allocated character string translating dollar signs in
29273 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29274
29275 const char *
29276 rs6000_xcoff_strip_dollar (const char *name)
29277 {
29278 char *strip, *p;
29279 const char *q;
29280 size_t len;
29281
29282 q = (const char *) strchr (name, '$');
29283
29284 if (q == 0 || q == name)
29285 return name;
29286
29287 len = strlen (name);
29288 strip = XALLOCAVEC (char, len + 1);
29289 strcpy (strip, name);
29290 p = strip + (q - name);
29291 while (p)
29292 {
29293 *p = '_';
29294 p = strchr (p + 1, '$');
29295 }
29296
29297 return ggc_alloc_string (strip, len);
29298 }
29299 #endif
29300
29301 void
29302 rs6000_output_symbol_ref (FILE *file, rtx x)
29303 {
29304 const char *name = XSTR (x, 0);
29305
29306 /* Currently C++ toc references to vtables can be emitted before it
29307 is decided whether the vtable is public or private. If this is
29308 the case, then the linker will eventually complain that there is
29309 a reference to an unknown section. Thus, for vtables only,
29310 we emit the TOC reference to reference the identifier and not the
29311 symbol. */
29312 if (VTABLE_NAME_P (name))
29313 {
29314 RS6000_OUTPUT_BASENAME (file, name);
29315 }
29316 else
29317 assemble_name (file, name);
29318 }
29319
29320 /* Output a TOC entry. We derive the entry name from what is being
29321 written. */
29322
29323 void
29324 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29325 {
29326 char buf[256];
29327 const char *name = buf;
29328 rtx base = x;
29329 HOST_WIDE_INT offset = 0;
29330
29331 gcc_assert (!TARGET_NO_TOC);
29332
29333 /* When the linker won't eliminate them, don't output duplicate
29334 TOC entries (this happens on AIX if there is any kind of TOC,
29335 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29336 CODE_LABELs. */
29337 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29338 {
29339 struct toc_hash_struct *h;
29340
29341 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29342 time because GGC is not initialized at that point. */
29343 if (toc_hash_table == NULL)
29344 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29345
29346 h = ggc_alloc<toc_hash_struct> ();
29347 h->key = x;
29348 h->key_mode = mode;
29349 h->labelno = labelno;
29350
29351 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29352 if (*found == NULL)
29353 *found = h;
29354 else /* This is indeed a duplicate.
29355 Set this label equal to that label. */
29356 {
29357 fputs ("\t.set ", file);
29358 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29359 fprintf (file, "%d,", labelno);
29360 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29361 fprintf (file, "%d\n", ((*found)->labelno));
29362
29363 #ifdef HAVE_AS_TLS
29364 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29365 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29366 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29367 {
29368 fputs ("\t.set ", file);
29369 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29370 fprintf (file, "%d,", labelno);
29371 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29372 fprintf (file, "%d\n", ((*found)->labelno));
29373 }
29374 #endif
29375 return;
29376 }
29377 }
29378
29379 /* If we're going to put a double constant in the TOC, make sure it's
29380 aligned properly when strict alignment is on. */
29381 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29382 && STRICT_ALIGNMENT
29383 && GET_MODE_BITSIZE (mode) >= 64
29384 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29385 ASM_OUTPUT_ALIGN (file, 3);
29386 }
29387
29388 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29389
29390 /* Handle FP constants specially. Note that if we have a minimal
29391 TOC, things we put here aren't actually in the TOC, so we can allow
29392 FP constants. */
29393 if (GET_CODE (x) == CONST_DOUBLE &&
29394 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29395 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29396 {
29397 long k[4];
29398
29399 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29400 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29401 else
29402 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29403
29404 if (TARGET_64BIT)
29405 {
29406 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29407 fputs (DOUBLE_INT_ASM_OP, file);
29408 else
29409 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29410 k[0] & 0xffffffff, k[1] & 0xffffffff,
29411 k[2] & 0xffffffff, k[3] & 0xffffffff);
29412 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29413 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29414 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29415 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29416 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29417 return;
29418 }
29419 else
29420 {
29421 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29422 fputs ("\t.long ", file);
29423 else
29424 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29425 k[0] & 0xffffffff, k[1] & 0xffffffff,
29426 k[2] & 0xffffffff, k[3] & 0xffffffff);
29427 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29428 k[0] & 0xffffffff, k[1] & 0xffffffff,
29429 k[2] & 0xffffffff, k[3] & 0xffffffff);
29430 return;
29431 }
29432 }
29433 else if (GET_CODE (x) == CONST_DOUBLE &&
29434 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29435 {
29436 long k[2];
29437
29438 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29439 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29440 else
29441 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29442
29443 if (TARGET_64BIT)
29444 {
29445 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29446 fputs (DOUBLE_INT_ASM_OP, file);
29447 else
29448 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29449 k[0] & 0xffffffff, k[1] & 0xffffffff);
29450 fprintf (file, "0x%lx%08lx\n",
29451 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29452 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29453 return;
29454 }
29455 else
29456 {
29457 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29458 fputs ("\t.long ", file);
29459 else
29460 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29461 k[0] & 0xffffffff, k[1] & 0xffffffff);
29462 fprintf (file, "0x%lx,0x%lx\n",
29463 k[0] & 0xffffffff, k[1] & 0xffffffff);
29464 return;
29465 }
29466 }
29467 else if (GET_CODE (x) == CONST_DOUBLE &&
29468 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29469 {
29470 long l;
29471
29472 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29473 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29474 else
29475 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29476
29477 if (TARGET_64BIT)
29478 {
29479 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29480 fputs (DOUBLE_INT_ASM_OP, file);
29481 else
29482 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29483 if (WORDS_BIG_ENDIAN)
29484 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29485 else
29486 fprintf (file, "0x%lx\n", l & 0xffffffff);
29487 return;
29488 }
29489 else
29490 {
29491 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29492 fputs ("\t.long ", file);
29493 else
29494 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29495 fprintf (file, "0x%lx\n", l & 0xffffffff);
29496 return;
29497 }
29498 }
29499 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29500 {
29501 unsigned HOST_WIDE_INT low;
29502 HOST_WIDE_INT high;
29503
29504 low = INTVAL (x) & 0xffffffff;
29505 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29506
29507 /* TOC entries are always Pmode-sized, so when big-endian
29508 smaller integer constants in the TOC need to be padded.
29509 (This is still a win over putting the constants in
29510 a separate constant pool, because then we'd have
29511 to have both a TOC entry _and_ the actual constant.)
29512
29513 For a 32-bit target, CONST_INT values are loaded and shifted
29514 entirely within `low' and can be stored in one TOC entry. */
29515
29516 /* It would be easy to make this work, but it doesn't now. */
29517 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29518
29519 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29520 {
29521 low |= high << 32;
29522 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29523 high = (HOST_WIDE_INT) low >> 32;
29524 low &= 0xffffffff;
29525 }
29526
29527 if (TARGET_64BIT)
29528 {
29529 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29530 fputs (DOUBLE_INT_ASM_OP, file);
29531 else
29532 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29533 (long) high & 0xffffffff, (long) low & 0xffffffff);
29534 fprintf (file, "0x%lx%08lx\n",
29535 (long) high & 0xffffffff, (long) low & 0xffffffff);
29536 return;
29537 }
29538 else
29539 {
29540 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29541 {
29542 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29543 fputs ("\t.long ", file);
29544 else
29545 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29546 (long) high & 0xffffffff, (long) low & 0xffffffff);
29547 fprintf (file, "0x%lx,0x%lx\n",
29548 (long) high & 0xffffffff, (long) low & 0xffffffff);
29549 }
29550 else
29551 {
29552 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29553 fputs ("\t.long ", file);
29554 else
29555 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29556 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29557 }
29558 return;
29559 }
29560 }
29561
29562 if (GET_CODE (x) == CONST)
29563 {
29564 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29565 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29566
29567 base = XEXP (XEXP (x, 0), 0);
29568 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29569 }
29570
29571 switch (GET_CODE (base))
29572 {
29573 case SYMBOL_REF:
29574 name = XSTR (base, 0);
29575 break;
29576
29577 case LABEL_REF:
29578 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29579 CODE_LABEL_NUMBER (XEXP (base, 0)));
29580 break;
29581
29582 case CODE_LABEL:
29583 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29584 break;
29585
29586 default:
29587 gcc_unreachable ();
29588 }
29589
29590 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29591 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29592 else
29593 {
29594 fputs ("\t.tc ", file);
29595 RS6000_OUTPUT_BASENAME (file, name);
29596
29597 if (offset < 0)
29598 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29599 else if (offset)
29600 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29601
29602 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29603 after other TOC symbols, reducing overflow of small TOC access
29604 to [TC] symbols. */
29605 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29606 ? "[TE]," : "[TC],", file);
29607 }
29608
29609 /* Currently C++ toc references to vtables can be emitted before it
29610 is decided whether the vtable is public or private. If this is
29611 the case, then the linker will eventually complain that there is
29612 a TOC reference to an unknown section. Thus, for vtables only,
29613 we emit the TOC reference to reference the symbol and not the
29614 section. */
29615 if (VTABLE_NAME_P (name))
29616 {
29617 RS6000_OUTPUT_BASENAME (file, name);
29618 if (offset < 0)
29619 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29620 else if (offset > 0)
29621 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29622 }
29623 else
29624 output_addr_const (file, x);
29625
29626 #if HAVE_AS_TLS
29627 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29628 {
29629 switch (SYMBOL_REF_TLS_MODEL (base))
29630 {
29631 case 0:
29632 break;
29633 case TLS_MODEL_LOCAL_EXEC:
29634 fputs ("@le", file);
29635 break;
29636 case TLS_MODEL_INITIAL_EXEC:
29637 fputs ("@ie", file);
29638 break;
29639 /* Use global-dynamic for local-dynamic. */
29640 case TLS_MODEL_GLOBAL_DYNAMIC:
29641 case TLS_MODEL_LOCAL_DYNAMIC:
29642 putc ('\n', file);
29643 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29644 fputs ("\t.tc .", file);
29645 RS6000_OUTPUT_BASENAME (file, name);
29646 fputs ("[TC],", file);
29647 output_addr_const (file, x);
29648 fputs ("@m", file);
29649 break;
29650 default:
29651 gcc_unreachable ();
29652 }
29653 }
29654 #endif
29655
29656 putc ('\n', file);
29657 }
29658 \f
29659 /* Output an assembler pseudo-op to write an ASCII string of N characters
29660 starting at P to FILE.
29661
29662 On the RS/6000, we have to do this using the .byte operation and
29663 write out special characters outside the quoted string.
29664 Also, the assembler is broken; very long strings are truncated,
29665 so we must artificially break them up early. */
29666
29667 void
29668 output_ascii (FILE *file, const char *p, int n)
29669 {
29670 char c;
29671 int i, count_string;
29672 const char *for_string = "\t.byte \"";
29673 const char *for_decimal = "\t.byte ";
29674 const char *to_close = NULL;
29675
29676 count_string = 0;
29677 for (i = 0; i < n; i++)
29678 {
29679 c = *p++;
29680 if (c >= ' ' && c < 0177)
29681 {
29682 if (for_string)
29683 fputs (for_string, file);
29684 putc (c, file);
29685
29686 /* Write two quotes to get one. */
29687 if (c == '"')
29688 {
29689 putc (c, file);
29690 ++count_string;
29691 }
29692
29693 for_string = NULL;
29694 for_decimal = "\"\n\t.byte ";
29695 to_close = "\"\n";
29696 ++count_string;
29697
29698 if (count_string >= 512)
29699 {
29700 fputs (to_close, file);
29701
29702 for_string = "\t.byte \"";
29703 for_decimal = "\t.byte ";
29704 to_close = NULL;
29705 count_string = 0;
29706 }
29707 }
29708 else
29709 {
29710 if (for_decimal)
29711 fputs (for_decimal, file);
29712 fprintf (file, "%d", c);
29713
29714 for_string = "\n\t.byte \"";
29715 for_decimal = ", ";
29716 to_close = "\n";
29717 count_string = 0;
29718 }
29719 }
29720
29721 /* Now close the string if we have written one. Then end the line. */
29722 if (to_close)
29723 fputs (to_close, file);
29724 }
29725 \f
29726 /* Generate a unique section name for FILENAME for a section type
29727 represented by SECTION_DESC. Output goes into BUF.
29728
29729 SECTION_DESC can be any string, as long as it is different for each
29730 possible section type.
29731
29732 We name the section in the same manner as xlc. The name begins with an
29733 underscore followed by the filename (after stripping any leading directory
29734 names) with the last period replaced by the string SECTION_DESC. If
29735 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29736 the name. */
29737
29738 void
29739 rs6000_gen_section_name (char **buf, const char *filename,
29740 const char *section_desc)
29741 {
29742 const char *q, *after_last_slash, *last_period = 0;
29743 char *p;
29744 int len;
29745
29746 after_last_slash = filename;
29747 for (q = filename; *q; q++)
29748 {
29749 if (*q == '/')
29750 after_last_slash = q + 1;
29751 else if (*q == '.')
29752 last_period = q;
29753 }
29754
29755 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29756 *buf = (char *) xmalloc (len);
29757
29758 p = *buf;
29759 *p++ = '_';
29760
29761 for (q = after_last_slash; *q; q++)
29762 {
29763 if (q == last_period)
29764 {
29765 strcpy (p, section_desc);
29766 p += strlen (section_desc);
29767 break;
29768 }
29769
29770 else if (ISALNUM (*q))
29771 *p++ = *q;
29772 }
29773
29774 if (last_period == 0)
29775 strcpy (p, section_desc);
29776 else
29777 *p = '\0';
29778 }
29779 \f
29780 /* Emit profile function. */
29781
29782 void
29783 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29784 {
29785 /* Non-standard profiling for kernels, which just saves LR then calls
29786 _mcount without worrying about arg saves. The idea is to change
29787 the function prologue as little as possible as it isn't easy to
29788 account for arg save/restore code added just for _mcount. */
29789 if (TARGET_PROFILE_KERNEL)
29790 return;
29791
29792 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29793 {
29794 #ifndef NO_PROFILE_COUNTERS
29795 # define NO_PROFILE_COUNTERS 0
29796 #endif
29797 if (NO_PROFILE_COUNTERS)
29798 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29799 LCT_NORMAL, VOIDmode);
29800 else
29801 {
29802 char buf[30];
29803 const char *label_name;
29804 rtx fun;
29805
29806 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29807 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29808 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29809
29810 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29811 LCT_NORMAL, VOIDmode, fun, Pmode);
29812 }
29813 }
29814 else if (DEFAULT_ABI == ABI_DARWIN)
29815 {
29816 const char *mcount_name = RS6000_MCOUNT;
29817 int caller_addr_regno = LR_REGNO;
29818
29819 /* Be conservative and always set this, at least for now. */
29820 crtl->uses_pic_offset_table = 1;
29821
29822 #if TARGET_MACHO
29823 /* For PIC code, set up a stub and collect the caller's address
29824 from r0, which is where the prologue puts it. */
29825 if (MACHOPIC_INDIRECT
29826 && crtl->uses_pic_offset_table)
29827 caller_addr_regno = 0;
29828 #endif
29829 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29830 LCT_NORMAL, VOIDmode,
29831 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29832 }
29833 }
29834
29835 /* Write function profiler code. */
29836
29837 void
29838 output_function_profiler (FILE *file, int labelno)
29839 {
29840 char buf[100];
29841
29842 switch (DEFAULT_ABI)
29843 {
29844 default:
29845 gcc_unreachable ();
29846
29847 case ABI_V4:
29848 if (!TARGET_32BIT)
29849 {
29850 warning (0, "no profiling of 64-bit code for this ABI");
29851 return;
29852 }
29853 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29854 fprintf (file, "\tmflr %s\n", reg_names[0]);
29855 if (NO_PROFILE_COUNTERS)
29856 {
29857 asm_fprintf (file, "\tstw %s,4(%s)\n",
29858 reg_names[0], reg_names[1]);
29859 }
29860 else if (TARGET_SECURE_PLT && flag_pic)
29861 {
29862 if (TARGET_LINK_STACK)
29863 {
29864 char name[32];
29865 get_ppc476_thunk_name (name);
29866 asm_fprintf (file, "\tbl %s\n", name);
29867 }
29868 else
29869 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
29870 asm_fprintf (file, "\tstw %s,4(%s)\n",
29871 reg_names[0], reg_names[1]);
29872 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29873 asm_fprintf (file, "\taddis %s,%s,",
29874 reg_names[12], reg_names[12]);
29875 assemble_name (file, buf);
29876 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
29877 assemble_name (file, buf);
29878 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
29879 }
29880 else if (flag_pic == 1)
29881 {
29882 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
29883 asm_fprintf (file, "\tstw %s,4(%s)\n",
29884 reg_names[0], reg_names[1]);
29885 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29886 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
29887 assemble_name (file, buf);
29888 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
29889 }
29890 else if (flag_pic > 1)
29891 {
29892 asm_fprintf (file, "\tstw %s,4(%s)\n",
29893 reg_names[0], reg_names[1]);
29894 /* Now, we need to get the address of the label. */
29895 if (TARGET_LINK_STACK)
29896 {
29897 char name[32];
29898 get_ppc476_thunk_name (name);
29899 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
29900 assemble_name (file, buf);
29901 fputs ("-.\n1:", file);
29902 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29903 asm_fprintf (file, "\taddi %s,%s,4\n",
29904 reg_names[11], reg_names[11]);
29905 }
29906 else
29907 {
29908 fputs ("\tbcl 20,31,1f\n\t.long ", file);
29909 assemble_name (file, buf);
29910 fputs ("-.\n1:", file);
29911 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29912 }
29913 asm_fprintf (file, "\tlwz %s,0(%s)\n",
29914 reg_names[0], reg_names[11]);
29915 asm_fprintf (file, "\tadd %s,%s,%s\n",
29916 reg_names[0], reg_names[0], reg_names[11]);
29917 }
29918 else
29919 {
29920 asm_fprintf (file, "\tlis %s,", reg_names[12]);
29921 assemble_name (file, buf);
29922 fputs ("@ha\n", file);
29923 asm_fprintf (file, "\tstw %s,4(%s)\n",
29924 reg_names[0], reg_names[1]);
29925 asm_fprintf (file, "\tla %s,", reg_names[0]);
29926 assemble_name (file, buf);
29927 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
29928 }
29929
29930 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
29931 fprintf (file, "\tbl %s%s\n",
29932 RS6000_MCOUNT, flag_pic ? "@plt" : "");
29933 break;
29934
29935 case ABI_AIX:
29936 case ABI_ELFv2:
29937 case ABI_DARWIN:
29938 /* Don't do anything, done in output_profile_hook (). */
29939 break;
29940 }
29941 }
29942
29943 \f
29944
29945 /* The following variable value is the last issued insn. */
29946
29947 static rtx_insn *last_scheduled_insn;
29948
29949 /* The following variable helps to balance issuing of load and
29950 store instructions */
29951
29952 static int load_store_pendulum;
29953
29954 /* The following variable helps pair divide insns during scheduling. */
29955 static int divide_cnt;
29956 /* The following variable helps pair and alternate vector and vector load
29957 insns during scheduling. */
29958 static int vec_pairing;
29959
29960
29961 /* Power4 load update and store update instructions are cracked into a
29962 load or store and an integer insn which are executed in the same cycle.
29963 Branches have their own dispatch slot which does not count against the
29964 GCC issue rate, but it changes the program flow so there are no other
29965 instructions to issue in this cycle. */
29966
29967 static int
29968 rs6000_variable_issue_1 (rtx_insn *insn, int more)
29969 {
29970 last_scheduled_insn = insn;
29971 if (GET_CODE (PATTERN (insn)) == USE
29972 || GET_CODE (PATTERN (insn)) == CLOBBER)
29973 {
29974 cached_can_issue_more = more;
29975 return cached_can_issue_more;
29976 }
29977
29978 if (insn_terminates_group_p (insn, current_group))
29979 {
29980 cached_can_issue_more = 0;
29981 return cached_can_issue_more;
29982 }
29983
29984 /* If no reservation, but reach here */
29985 if (recog_memoized (insn) < 0)
29986 return more;
29987
29988 if (rs6000_sched_groups)
29989 {
29990 if (is_microcoded_insn (insn))
29991 cached_can_issue_more = 0;
29992 else if (is_cracked_insn (insn))
29993 cached_can_issue_more = more > 2 ? more - 2 : 0;
29994 else
29995 cached_can_issue_more = more - 1;
29996
29997 return cached_can_issue_more;
29998 }
29999
30000 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30001 return 0;
30002
30003 cached_can_issue_more = more - 1;
30004 return cached_can_issue_more;
30005 }
30006
30007 static int
30008 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30009 {
30010 int r = rs6000_variable_issue_1 (insn, more);
30011 if (verbose)
30012 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30013 return r;
30014 }
30015
30016 /* Adjust the cost of a scheduling dependency. Return the new cost of
30017 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30018
30019 static int
30020 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30021 unsigned int)
30022 {
30023 enum attr_type attr_type;
30024
30025 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30026 return cost;
30027
30028 switch (dep_type)
30029 {
30030 case REG_DEP_TRUE:
30031 {
30032 /* Data dependency; DEP_INSN writes a register that INSN reads
30033 some cycles later. */
30034
30035 /* Separate a load from a narrower, dependent store. */
30036 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30037 && GET_CODE (PATTERN (insn)) == SET
30038 && GET_CODE (PATTERN (dep_insn)) == SET
30039 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30040 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30041 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30042 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30043 return cost + 14;
30044
30045 attr_type = get_attr_type (insn);
30046
30047 switch (attr_type)
30048 {
30049 case TYPE_JMPREG:
30050 /* Tell the first scheduling pass about the latency between
30051 a mtctr and bctr (and mtlr and br/blr). The first
30052 scheduling pass will not know about this latency since
30053 the mtctr instruction, which has the latency associated
30054 to it, will be generated by reload. */
30055 return 4;
30056 case TYPE_BRANCH:
30057 /* Leave some extra cycles between a compare and its
30058 dependent branch, to inhibit expensive mispredicts. */
30059 if ((rs6000_tune == PROCESSOR_PPC603
30060 || rs6000_tune == PROCESSOR_PPC604
30061 || rs6000_tune == PROCESSOR_PPC604e
30062 || rs6000_tune == PROCESSOR_PPC620
30063 || rs6000_tune == PROCESSOR_PPC630
30064 || rs6000_tune == PROCESSOR_PPC750
30065 || rs6000_tune == PROCESSOR_PPC7400
30066 || rs6000_tune == PROCESSOR_PPC7450
30067 || rs6000_tune == PROCESSOR_PPCE5500
30068 || rs6000_tune == PROCESSOR_PPCE6500
30069 || rs6000_tune == PROCESSOR_POWER4
30070 || rs6000_tune == PROCESSOR_POWER5
30071 || rs6000_tune == PROCESSOR_POWER7
30072 || rs6000_tune == PROCESSOR_POWER8
30073 || rs6000_tune == PROCESSOR_POWER9
30074 || rs6000_tune == PROCESSOR_CELL)
30075 && recog_memoized (dep_insn)
30076 && (INSN_CODE (dep_insn) >= 0))
30077
30078 switch (get_attr_type (dep_insn))
30079 {
30080 case TYPE_CMP:
30081 case TYPE_FPCOMPARE:
30082 case TYPE_CR_LOGICAL:
30083 return cost + 2;
30084 case TYPE_EXTS:
30085 case TYPE_MUL:
30086 if (get_attr_dot (dep_insn) == DOT_YES)
30087 return cost + 2;
30088 else
30089 break;
30090 case TYPE_SHIFT:
30091 if (get_attr_dot (dep_insn) == DOT_YES
30092 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30093 return cost + 2;
30094 else
30095 break;
30096 default:
30097 break;
30098 }
30099 break;
30100
30101 case TYPE_STORE:
30102 case TYPE_FPSTORE:
30103 if ((rs6000_tune == PROCESSOR_POWER6)
30104 && recog_memoized (dep_insn)
30105 && (INSN_CODE (dep_insn) >= 0))
30106 {
30107
30108 if (GET_CODE (PATTERN (insn)) != SET)
30109 /* If this happens, we have to extend this to schedule
30110 optimally. Return default for now. */
30111 return cost;
30112
30113 /* Adjust the cost for the case where the value written
30114 by a fixed point operation is used as the address
30115 gen value on a store. */
30116 switch (get_attr_type (dep_insn))
30117 {
30118 case TYPE_LOAD:
30119 case TYPE_CNTLZ:
30120 {
30121 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30122 return get_attr_sign_extend (dep_insn)
30123 == SIGN_EXTEND_YES ? 6 : 4;
30124 break;
30125 }
30126 case TYPE_SHIFT:
30127 {
30128 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30129 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30130 6 : 3;
30131 break;
30132 }
30133 case TYPE_INTEGER:
30134 case TYPE_ADD:
30135 case TYPE_LOGICAL:
30136 case TYPE_EXTS:
30137 case TYPE_INSERT:
30138 {
30139 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30140 return 3;
30141 break;
30142 }
30143 case TYPE_STORE:
30144 case TYPE_FPLOAD:
30145 case TYPE_FPSTORE:
30146 {
30147 if (get_attr_update (dep_insn) == UPDATE_YES
30148 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30149 return 3;
30150 break;
30151 }
30152 case TYPE_MUL:
30153 {
30154 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30155 return 17;
30156 break;
30157 }
30158 case TYPE_DIV:
30159 {
30160 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30161 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30162 break;
30163 }
30164 default:
30165 break;
30166 }
30167 }
30168 break;
30169
30170 case TYPE_LOAD:
30171 if ((rs6000_tune == PROCESSOR_POWER6)
30172 && recog_memoized (dep_insn)
30173 && (INSN_CODE (dep_insn) >= 0))
30174 {
30175
30176 /* Adjust the cost for the case where the value written
30177 by a fixed point instruction is used within the address
30178 gen portion of a subsequent load(u)(x) */
30179 switch (get_attr_type (dep_insn))
30180 {
30181 case TYPE_LOAD:
30182 case TYPE_CNTLZ:
30183 {
30184 if (set_to_load_agen (dep_insn, insn))
30185 return get_attr_sign_extend (dep_insn)
30186 == SIGN_EXTEND_YES ? 6 : 4;
30187 break;
30188 }
30189 case TYPE_SHIFT:
30190 {
30191 if (set_to_load_agen (dep_insn, insn))
30192 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30193 6 : 3;
30194 break;
30195 }
30196 case TYPE_INTEGER:
30197 case TYPE_ADD:
30198 case TYPE_LOGICAL:
30199 case TYPE_EXTS:
30200 case TYPE_INSERT:
30201 {
30202 if (set_to_load_agen (dep_insn, insn))
30203 return 3;
30204 break;
30205 }
30206 case TYPE_STORE:
30207 case TYPE_FPLOAD:
30208 case TYPE_FPSTORE:
30209 {
30210 if (get_attr_update (dep_insn) == UPDATE_YES
30211 && set_to_load_agen (dep_insn, insn))
30212 return 3;
30213 break;
30214 }
30215 case TYPE_MUL:
30216 {
30217 if (set_to_load_agen (dep_insn, insn))
30218 return 17;
30219 break;
30220 }
30221 case TYPE_DIV:
30222 {
30223 if (set_to_load_agen (dep_insn, insn))
30224 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30225 break;
30226 }
30227 default:
30228 break;
30229 }
30230 }
30231 break;
30232
30233 case TYPE_FPLOAD:
30234 if ((rs6000_tune == PROCESSOR_POWER6)
30235 && get_attr_update (insn) == UPDATE_NO
30236 && recog_memoized (dep_insn)
30237 && (INSN_CODE (dep_insn) >= 0)
30238 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30239 return 2;
30240
30241 default:
30242 break;
30243 }
30244
30245 /* Fall out to return default cost. */
30246 }
30247 break;
30248
30249 case REG_DEP_OUTPUT:
30250 /* Output dependency; DEP_INSN writes a register that INSN writes some
30251 cycles later. */
30252 if ((rs6000_tune == PROCESSOR_POWER6)
30253 && recog_memoized (dep_insn)
30254 && (INSN_CODE (dep_insn) >= 0))
30255 {
30256 attr_type = get_attr_type (insn);
30257
30258 switch (attr_type)
30259 {
30260 case TYPE_FP:
30261 case TYPE_FPSIMPLE:
30262 if (get_attr_type (dep_insn) == TYPE_FP
30263 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30264 return 1;
30265 break;
30266 case TYPE_FPLOAD:
30267 if (get_attr_update (insn) == UPDATE_NO
30268 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30269 return 2;
30270 break;
30271 default:
30272 break;
30273 }
30274 }
30275 /* Fall through, no cost for output dependency. */
30276 /* FALLTHRU */
30277
30278 case REG_DEP_ANTI:
30279 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30280 cycles later. */
30281 return 0;
30282
30283 default:
30284 gcc_unreachable ();
30285 }
30286
30287 return cost;
30288 }
30289
30290 /* Debug version of rs6000_adjust_cost. */
30291
30292 static int
30293 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30294 int cost, unsigned int dw)
30295 {
30296 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30297
30298 if (ret != cost)
30299 {
30300 const char *dep;
30301
30302 switch (dep_type)
30303 {
30304 default: dep = "unknown depencency"; break;
30305 case REG_DEP_TRUE: dep = "data dependency"; break;
30306 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30307 case REG_DEP_ANTI: dep = "anti depencency"; break;
30308 }
30309
30310 fprintf (stderr,
30311 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30312 "%s, insn:\n", ret, cost, dep);
30313
30314 debug_rtx (insn);
30315 }
30316
30317 return ret;
30318 }
30319
30320 /* The function returns a true if INSN is microcoded.
30321 Return false otherwise. */
30322
30323 static bool
30324 is_microcoded_insn (rtx_insn *insn)
30325 {
30326 if (!insn || !NONDEBUG_INSN_P (insn)
30327 || GET_CODE (PATTERN (insn)) == USE
30328 || GET_CODE (PATTERN (insn)) == CLOBBER)
30329 return false;
30330
30331 if (rs6000_tune == PROCESSOR_CELL)
30332 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30333
30334 if (rs6000_sched_groups
30335 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30336 {
30337 enum attr_type type = get_attr_type (insn);
30338 if ((type == TYPE_LOAD
30339 && get_attr_update (insn) == UPDATE_YES
30340 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30341 || ((type == TYPE_LOAD || type == TYPE_STORE)
30342 && get_attr_update (insn) == UPDATE_YES
30343 && get_attr_indexed (insn) == INDEXED_YES)
30344 || type == TYPE_MFCR)
30345 return true;
30346 }
30347
30348 return false;
30349 }
30350
30351 /* The function returns true if INSN is cracked into 2 instructions
30352 by the processor (and therefore occupies 2 issue slots). */
30353
30354 static bool
30355 is_cracked_insn (rtx_insn *insn)
30356 {
30357 if (!insn || !NONDEBUG_INSN_P (insn)
30358 || GET_CODE (PATTERN (insn)) == USE
30359 || GET_CODE (PATTERN (insn)) == CLOBBER)
30360 return false;
30361
30362 if (rs6000_sched_groups
30363 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30364 {
30365 enum attr_type type = get_attr_type (insn);
30366 if ((type == TYPE_LOAD
30367 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30368 && get_attr_update (insn) == UPDATE_NO)
30369 || (type == TYPE_LOAD
30370 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30371 && get_attr_update (insn) == UPDATE_YES
30372 && get_attr_indexed (insn) == INDEXED_NO)
30373 || (type == TYPE_STORE
30374 && get_attr_update (insn) == UPDATE_YES
30375 && get_attr_indexed (insn) == INDEXED_NO)
30376 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30377 && get_attr_update (insn) == UPDATE_YES)
30378 || (type == TYPE_CR_LOGICAL
30379 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30380 || (type == TYPE_EXTS
30381 && get_attr_dot (insn) == DOT_YES)
30382 || (type == TYPE_SHIFT
30383 && get_attr_dot (insn) == DOT_YES
30384 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30385 || (type == TYPE_MUL
30386 && get_attr_dot (insn) == DOT_YES)
30387 || type == TYPE_DIV
30388 || (type == TYPE_INSERT
30389 && get_attr_size (insn) == SIZE_32))
30390 return true;
30391 }
30392
30393 return false;
30394 }
30395
30396 /* The function returns true if INSN can be issued only from
30397 the branch slot. */
30398
30399 static bool
30400 is_branch_slot_insn (rtx_insn *insn)
30401 {
30402 if (!insn || !NONDEBUG_INSN_P (insn)
30403 || GET_CODE (PATTERN (insn)) == USE
30404 || GET_CODE (PATTERN (insn)) == CLOBBER)
30405 return false;
30406
30407 if (rs6000_sched_groups)
30408 {
30409 enum attr_type type = get_attr_type (insn);
30410 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30411 return true;
30412 return false;
30413 }
30414
30415 return false;
30416 }
30417
30418 /* The function returns true if out_inst sets a value that is
30419 used in the address generation computation of in_insn */
30420 static bool
30421 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30422 {
30423 rtx out_set, in_set;
30424
30425 /* For performance reasons, only handle the simple case where
30426 both loads are a single_set. */
30427 out_set = single_set (out_insn);
30428 if (out_set)
30429 {
30430 in_set = single_set (in_insn);
30431 if (in_set)
30432 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30433 }
30434
30435 return false;
30436 }
30437
30438 /* Try to determine base/offset/size parts of the given MEM.
30439 Return true if successful, false if all the values couldn't
30440 be determined.
30441
30442 This function only looks for REG or REG+CONST address forms.
30443 REG+REG address form will return false. */
30444
30445 static bool
30446 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30447 HOST_WIDE_INT *size)
30448 {
30449 rtx addr_rtx;
30450 if MEM_SIZE_KNOWN_P (mem)
30451 *size = MEM_SIZE (mem);
30452 else
30453 return false;
30454
30455 addr_rtx = (XEXP (mem, 0));
30456 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30457 addr_rtx = XEXP (addr_rtx, 1);
30458
30459 *offset = 0;
30460 while (GET_CODE (addr_rtx) == PLUS
30461 && CONST_INT_P (XEXP (addr_rtx, 1)))
30462 {
30463 *offset += INTVAL (XEXP (addr_rtx, 1));
30464 addr_rtx = XEXP (addr_rtx, 0);
30465 }
30466 if (!REG_P (addr_rtx))
30467 return false;
30468
30469 *base = addr_rtx;
30470 return true;
30471 }
30472
30473 /* The function returns true if the target storage location of
30474 mem1 is adjacent to the target storage location of mem2 */
30475 /* Return 1 if memory locations are adjacent. */
30476
30477 static bool
30478 adjacent_mem_locations (rtx mem1, rtx mem2)
30479 {
30480 rtx reg1, reg2;
30481 HOST_WIDE_INT off1, size1, off2, size2;
30482
30483 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30484 && get_memref_parts (mem2, &reg2, &off2, &size2))
30485 return ((REGNO (reg1) == REGNO (reg2))
30486 && ((off1 + size1 == off2)
30487 || (off2 + size2 == off1)));
30488
30489 return false;
30490 }
30491
30492 /* This function returns true if it can be determined that the two MEM
30493 locations overlap by at least 1 byte based on base reg/offset/size. */
30494
30495 static bool
30496 mem_locations_overlap (rtx mem1, rtx mem2)
30497 {
30498 rtx reg1, reg2;
30499 HOST_WIDE_INT off1, size1, off2, size2;
30500
30501 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30502 && get_memref_parts (mem2, &reg2, &off2, &size2))
30503 return ((REGNO (reg1) == REGNO (reg2))
30504 && (((off1 <= off2) && (off1 + size1 > off2))
30505 || ((off2 <= off1) && (off2 + size2 > off1))));
30506
30507 return false;
30508 }
30509
30510 /* A C statement (sans semicolon) to update the integer scheduling
30511 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30512 INSN earlier, reduce the priority to execute INSN later. Do not
30513 define this macro if you do not need to adjust the scheduling
30514 priorities of insns. */
30515
30516 static int
30517 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30518 {
30519 rtx load_mem, str_mem;
30520 /* On machines (like the 750) which have asymmetric integer units,
30521 where one integer unit can do multiply and divides and the other
30522 can't, reduce the priority of multiply/divide so it is scheduled
30523 before other integer operations. */
30524
30525 #if 0
30526 if (! INSN_P (insn))
30527 return priority;
30528
30529 if (GET_CODE (PATTERN (insn)) == USE)
30530 return priority;
30531
30532 switch (rs6000_tune) {
30533 case PROCESSOR_PPC750:
30534 switch (get_attr_type (insn))
30535 {
30536 default:
30537 break;
30538
30539 case TYPE_MUL:
30540 case TYPE_DIV:
30541 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30542 priority, priority);
30543 if (priority >= 0 && priority < 0x01000000)
30544 priority >>= 3;
30545 break;
30546 }
30547 }
30548 #endif
30549
30550 if (insn_must_be_first_in_group (insn)
30551 && reload_completed
30552 && current_sched_info->sched_max_insns_priority
30553 && rs6000_sched_restricted_insns_priority)
30554 {
30555
30556 /* Prioritize insns that can be dispatched only in the first
30557 dispatch slot. */
30558 if (rs6000_sched_restricted_insns_priority == 1)
30559 /* Attach highest priority to insn. This means that in
30560 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30561 precede 'priority' (critical path) considerations. */
30562 return current_sched_info->sched_max_insns_priority;
30563 else if (rs6000_sched_restricted_insns_priority == 2)
30564 /* Increase priority of insn by a minimal amount. This means that in
30565 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30566 considerations precede dispatch-slot restriction considerations. */
30567 return (priority + 1);
30568 }
30569
30570 if (rs6000_tune == PROCESSOR_POWER6
30571 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30572 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30573 /* Attach highest priority to insn if the scheduler has just issued two
30574 stores and this instruction is a load, or two loads and this instruction
30575 is a store. Power6 wants loads and stores scheduled alternately
30576 when possible */
30577 return current_sched_info->sched_max_insns_priority;
30578
30579 return priority;
30580 }
30581
30582 /* Return true if the instruction is nonpipelined on the Cell. */
30583 static bool
30584 is_nonpipeline_insn (rtx_insn *insn)
30585 {
30586 enum attr_type type;
30587 if (!insn || !NONDEBUG_INSN_P (insn)
30588 || GET_CODE (PATTERN (insn)) == USE
30589 || GET_CODE (PATTERN (insn)) == CLOBBER)
30590 return false;
30591
30592 type = get_attr_type (insn);
30593 if (type == TYPE_MUL
30594 || type == TYPE_DIV
30595 || type == TYPE_SDIV
30596 || type == TYPE_DDIV
30597 || type == TYPE_SSQRT
30598 || type == TYPE_DSQRT
30599 || type == TYPE_MFCR
30600 || type == TYPE_MFCRF
30601 || type == TYPE_MFJMPR)
30602 {
30603 return true;
30604 }
30605 return false;
30606 }
30607
30608
30609 /* Return how many instructions the machine can issue per cycle. */
30610
30611 static int
30612 rs6000_issue_rate (void)
30613 {
30614 /* Unless scheduling for register pressure, use issue rate of 1 for
30615 first scheduling pass to decrease degradation. */
30616 if (!reload_completed && !flag_sched_pressure)
30617 return 1;
30618
30619 switch (rs6000_tune) {
30620 case PROCESSOR_RS64A:
30621 case PROCESSOR_PPC601: /* ? */
30622 case PROCESSOR_PPC7450:
30623 return 3;
30624 case PROCESSOR_PPC440:
30625 case PROCESSOR_PPC603:
30626 case PROCESSOR_PPC750:
30627 case PROCESSOR_PPC7400:
30628 case PROCESSOR_PPC8540:
30629 case PROCESSOR_PPC8548:
30630 case PROCESSOR_CELL:
30631 case PROCESSOR_PPCE300C2:
30632 case PROCESSOR_PPCE300C3:
30633 case PROCESSOR_PPCE500MC:
30634 case PROCESSOR_PPCE500MC64:
30635 case PROCESSOR_PPCE5500:
30636 case PROCESSOR_PPCE6500:
30637 case PROCESSOR_TITAN:
30638 return 2;
30639 case PROCESSOR_PPC476:
30640 case PROCESSOR_PPC604:
30641 case PROCESSOR_PPC604e:
30642 case PROCESSOR_PPC620:
30643 case PROCESSOR_PPC630:
30644 return 4;
30645 case PROCESSOR_POWER4:
30646 case PROCESSOR_POWER5:
30647 case PROCESSOR_POWER6:
30648 case PROCESSOR_POWER7:
30649 return 5;
30650 case PROCESSOR_POWER8:
30651 return 7;
30652 case PROCESSOR_POWER9:
30653 return 6;
30654 default:
30655 return 1;
30656 }
30657 }
30658
30659 /* Return how many instructions to look ahead for better insn
30660 scheduling. */
30661
30662 static int
30663 rs6000_use_sched_lookahead (void)
30664 {
30665 switch (rs6000_tune)
30666 {
30667 case PROCESSOR_PPC8540:
30668 case PROCESSOR_PPC8548:
30669 return 4;
30670
30671 case PROCESSOR_CELL:
30672 return (reload_completed ? 8 : 0);
30673
30674 default:
30675 return 0;
30676 }
30677 }
30678
30679 /* We are choosing insn from the ready queue. Return zero if INSN can be
30680 chosen. */
30681 static int
30682 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30683 {
30684 if (ready_index == 0)
30685 return 0;
30686
30687 if (rs6000_tune != PROCESSOR_CELL)
30688 return 0;
30689
30690 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30691
30692 if (!reload_completed
30693 || is_nonpipeline_insn (insn)
30694 || is_microcoded_insn (insn))
30695 return 1;
30696
30697 return 0;
30698 }
30699
30700 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30701 and return true. */
30702
30703 static bool
30704 find_mem_ref (rtx pat, rtx *mem_ref)
30705 {
30706 const char * fmt;
30707 int i, j;
30708
30709 /* stack_tie does not produce any real memory traffic. */
30710 if (tie_operand (pat, VOIDmode))
30711 return false;
30712
30713 if (GET_CODE (pat) == MEM)
30714 {
30715 *mem_ref = pat;
30716 return true;
30717 }
30718
30719 /* Recursively process the pattern. */
30720 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30721
30722 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30723 {
30724 if (fmt[i] == 'e')
30725 {
30726 if (find_mem_ref (XEXP (pat, i), mem_ref))
30727 return true;
30728 }
30729 else if (fmt[i] == 'E')
30730 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30731 {
30732 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30733 return true;
30734 }
30735 }
30736
30737 return false;
30738 }
30739
30740 /* Determine if PAT is a PATTERN of a load insn. */
30741
30742 static bool
30743 is_load_insn1 (rtx pat, rtx *load_mem)
30744 {
30745 if (!pat || pat == NULL_RTX)
30746 return false;
30747
30748 if (GET_CODE (pat) == SET)
30749 return find_mem_ref (SET_SRC (pat), load_mem);
30750
30751 if (GET_CODE (pat) == PARALLEL)
30752 {
30753 int i;
30754
30755 for (i = 0; i < XVECLEN (pat, 0); i++)
30756 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30757 return true;
30758 }
30759
30760 return false;
30761 }
30762
30763 /* Determine if INSN loads from memory. */
30764
30765 static bool
30766 is_load_insn (rtx insn, rtx *load_mem)
30767 {
30768 if (!insn || !INSN_P (insn))
30769 return false;
30770
30771 if (CALL_P (insn))
30772 return false;
30773
30774 return is_load_insn1 (PATTERN (insn), load_mem);
30775 }
30776
30777 /* Determine if PAT is a PATTERN of a store insn. */
30778
30779 static bool
30780 is_store_insn1 (rtx pat, rtx *str_mem)
30781 {
30782 if (!pat || pat == NULL_RTX)
30783 return false;
30784
30785 if (GET_CODE (pat) == SET)
30786 return find_mem_ref (SET_DEST (pat), str_mem);
30787
30788 if (GET_CODE (pat) == PARALLEL)
30789 {
30790 int i;
30791
30792 for (i = 0; i < XVECLEN (pat, 0); i++)
30793 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30794 return true;
30795 }
30796
30797 return false;
30798 }
30799
30800 /* Determine if INSN stores to memory. */
30801
30802 static bool
30803 is_store_insn (rtx insn, rtx *str_mem)
30804 {
30805 if (!insn || !INSN_P (insn))
30806 return false;
30807
30808 return is_store_insn1 (PATTERN (insn), str_mem);
30809 }
30810
30811 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30812
30813 static bool
30814 is_power9_pairable_vec_type (enum attr_type type)
30815 {
30816 switch (type)
30817 {
30818 case TYPE_VECSIMPLE:
30819 case TYPE_VECCOMPLEX:
30820 case TYPE_VECDIV:
30821 case TYPE_VECCMP:
30822 case TYPE_VECPERM:
30823 case TYPE_VECFLOAT:
30824 case TYPE_VECFDIV:
30825 case TYPE_VECDOUBLE:
30826 return true;
30827 default:
30828 break;
30829 }
30830 return false;
30831 }
30832
30833 /* Returns whether the dependence between INSN and NEXT is considered
30834 costly by the given target. */
30835
30836 static bool
30837 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30838 {
30839 rtx insn;
30840 rtx next;
30841 rtx load_mem, str_mem;
30842
30843 /* If the flag is not enabled - no dependence is considered costly;
30844 allow all dependent insns in the same group.
30845 This is the most aggressive option. */
30846 if (rs6000_sched_costly_dep == no_dep_costly)
30847 return false;
30848
30849 /* If the flag is set to 1 - a dependence is always considered costly;
30850 do not allow dependent instructions in the same group.
30851 This is the most conservative option. */
30852 if (rs6000_sched_costly_dep == all_deps_costly)
30853 return true;
30854
30855 insn = DEP_PRO (dep);
30856 next = DEP_CON (dep);
30857
30858 if (rs6000_sched_costly_dep == store_to_load_dep_costly
30859 && is_load_insn (next, &load_mem)
30860 && is_store_insn (insn, &str_mem))
30861 /* Prevent load after store in the same group. */
30862 return true;
30863
30864 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
30865 && is_load_insn (next, &load_mem)
30866 && is_store_insn (insn, &str_mem)
30867 && DEP_TYPE (dep) == REG_DEP_TRUE
30868 && mem_locations_overlap(str_mem, load_mem))
30869 /* Prevent load after store in the same group if it is a true
30870 dependence. */
30871 return true;
30872
30873 /* The flag is set to X; dependences with latency >= X are considered costly,
30874 and will not be scheduled in the same group. */
30875 if (rs6000_sched_costly_dep <= max_dep_latency
30876 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
30877 return true;
30878
30879 return false;
30880 }
30881
30882 /* Return the next insn after INSN that is found before TAIL is reached,
30883 skipping any "non-active" insns - insns that will not actually occupy
30884 an issue slot. Return NULL_RTX if such an insn is not found. */
30885
30886 static rtx_insn *
30887 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
30888 {
30889 if (insn == NULL_RTX || insn == tail)
30890 return NULL;
30891
30892 while (1)
30893 {
30894 insn = NEXT_INSN (insn);
30895 if (insn == NULL_RTX || insn == tail)
30896 return NULL;
30897
30898 if (CALL_P (insn)
30899 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
30900 || (NONJUMP_INSN_P (insn)
30901 && GET_CODE (PATTERN (insn)) != USE
30902 && GET_CODE (PATTERN (insn)) != CLOBBER
30903 && INSN_CODE (insn) != CODE_FOR_stack_tie))
30904 break;
30905 }
30906 return insn;
30907 }
30908
30909 /* Do Power9 specific sched_reorder2 reordering of ready list. */
30910
30911 static int
30912 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
30913 {
30914 int pos;
30915 int i;
30916 rtx_insn *tmp;
30917 enum attr_type type, type2;
30918
30919 type = get_attr_type (last_scheduled_insn);
30920
30921 /* Try to issue fixed point divides back-to-back in pairs so they will be
30922 routed to separate execution units and execute in parallel. */
30923 if (type == TYPE_DIV && divide_cnt == 0)
30924 {
30925 /* First divide has been scheduled. */
30926 divide_cnt = 1;
30927
30928 /* Scan the ready list looking for another divide, if found move it
30929 to the end of the list so it is chosen next. */
30930 pos = lastpos;
30931 while (pos >= 0)
30932 {
30933 if (recog_memoized (ready[pos]) >= 0
30934 && get_attr_type (ready[pos]) == TYPE_DIV)
30935 {
30936 tmp = ready[pos];
30937 for (i = pos; i < lastpos; i++)
30938 ready[i] = ready[i + 1];
30939 ready[lastpos] = tmp;
30940 break;
30941 }
30942 pos--;
30943 }
30944 }
30945 else
30946 {
30947 /* Last insn was the 2nd divide or not a divide, reset the counter. */
30948 divide_cnt = 0;
30949
30950 /* The best dispatch throughput for vector and vector load insns can be
30951 achieved by interleaving a vector and vector load such that they'll
30952 dispatch to the same superslice. If this pairing cannot be achieved
30953 then it is best to pair vector insns together and vector load insns
30954 together.
30955
30956 To aid in this pairing, vec_pairing maintains the current state with
30957 the following values:
30958
30959 0 : Initial state, no vecload/vector pairing has been started.
30960
30961 1 : A vecload or vector insn has been issued and a candidate for
30962 pairing has been found and moved to the end of the ready
30963 list. */
30964 if (type == TYPE_VECLOAD)
30965 {
30966 /* Issued a vecload. */
30967 if (vec_pairing == 0)
30968 {
30969 int vecload_pos = -1;
30970 /* We issued a single vecload, look for a vector insn to pair it
30971 with. If one isn't found, try to pair another vecload. */
30972 pos = lastpos;
30973 while (pos >= 0)
30974 {
30975 if (recog_memoized (ready[pos]) >= 0)
30976 {
30977 type2 = get_attr_type (ready[pos]);
30978 if (is_power9_pairable_vec_type (type2))
30979 {
30980 /* Found a vector insn to pair with, move it to the
30981 end of the ready list so it is scheduled next. */
30982 tmp = ready[pos];
30983 for (i = pos; i < lastpos; i++)
30984 ready[i] = ready[i + 1];
30985 ready[lastpos] = tmp;
30986 vec_pairing = 1;
30987 return cached_can_issue_more;
30988 }
30989 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
30990 /* Remember position of first vecload seen. */
30991 vecload_pos = pos;
30992 }
30993 pos--;
30994 }
30995 if (vecload_pos >= 0)
30996 {
30997 /* Didn't find a vector to pair with but did find a vecload,
30998 move it to the end of the ready list. */
30999 tmp = ready[vecload_pos];
31000 for (i = vecload_pos; i < lastpos; i++)
31001 ready[i] = ready[i + 1];
31002 ready[lastpos] = tmp;
31003 vec_pairing = 1;
31004 return cached_can_issue_more;
31005 }
31006 }
31007 }
31008 else if (is_power9_pairable_vec_type (type))
31009 {
31010 /* Issued a vector operation. */
31011 if (vec_pairing == 0)
31012 {
31013 int vec_pos = -1;
31014 /* We issued a single vector insn, look for a vecload to pair it
31015 with. If one isn't found, try to pair another vector. */
31016 pos = lastpos;
31017 while (pos >= 0)
31018 {
31019 if (recog_memoized (ready[pos]) >= 0)
31020 {
31021 type2 = get_attr_type (ready[pos]);
31022 if (type2 == TYPE_VECLOAD)
31023 {
31024 /* Found a vecload insn to pair with, move it to the
31025 end of the ready list so it is scheduled next. */
31026 tmp = ready[pos];
31027 for (i = pos; i < lastpos; i++)
31028 ready[i] = ready[i + 1];
31029 ready[lastpos] = tmp;
31030 vec_pairing = 1;
31031 return cached_can_issue_more;
31032 }
31033 else if (is_power9_pairable_vec_type (type2)
31034 && vec_pos == -1)
31035 /* Remember position of first vector insn seen. */
31036 vec_pos = pos;
31037 }
31038 pos--;
31039 }
31040 if (vec_pos >= 0)
31041 {
31042 /* Didn't find a vecload to pair with but did find a vector
31043 insn, move it to the end of the ready list. */
31044 tmp = ready[vec_pos];
31045 for (i = vec_pos; i < lastpos; i++)
31046 ready[i] = ready[i + 1];
31047 ready[lastpos] = tmp;
31048 vec_pairing = 1;
31049 return cached_can_issue_more;
31050 }
31051 }
31052 }
31053
31054 /* We've either finished a vec/vecload pair, couldn't find an insn to
31055 continue the current pair, or the last insn had nothing to do with
31056 with pairing. In any case, reset the state. */
31057 vec_pairing = 0;
31058 }
31059
31060 return cached_can_issue_more;
31061 }
31062
31063 /* We are about to begin issuing insns for this clock cycle. */
31064
31065 static int
31066 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31067 rtx_insn **ready ATTRIBUTE_UNUSED,
31068 int *pn_ready ATTRIBUTE_UNUSED,
31069 int clock_var ATTRIBUTE_UNUSED)
31070 {
31071 int n_ready = *pn_ready;
31072
31073 if (sched_verbose)
31074 fprintf (dump, "// rs6000_sched_reorder :\n");
31075
31076 /* Reorder the ready list, if the second to last ready insn
31077 is a nonepipeline insn. */
31078 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31079 {
31080 if (is_nonpipeline_insn (ready[n_ready - 1])
31081 && (recog_memoized (ready[n_ready - 2]) > 0))
31082 /* Simply swap first two insns. */
31083 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31084 }
31085
31086 if (rs6000_tune == PROCESSOR_POWER6)
31087 load_store_pendulum = 0;
31088
31089 return rs6000_issue_rate ();
31090 }
31091
31092 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31093
31094 static int
31095 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31096 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31097 {
31098 if (sched_verbose)
31099 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31100
31101 /* For Power6, we need to handle some special cases to try and keep the
31102 store queue from overflowing and triggering expensive flushes.
31103
31104 This code monitors how load and store instructions are being issued
31105 and skews the ready list one way or the other to increase the likelihood
31106 that a desired instruction is issued at the proper time.
31107
31108 A couple of things are done. First, we maintain a "load_store_pendulum"
31109 to track the current state of load/store issue.
31110
31111 - If the pendulum is at zero, then no loads or stores have been
31112 issued in the current cycle so we do nothing.
31113
31114 - If the pendulum is 1, then a single load has been issued in this
31115 cycle and we attempt to locate another load in the ready list to
31116 issue with it.
31117
31118 - If the pendulum is -2, then two stores have already been
31119 issued in this cycle, so we increase the priority of the first load
31120 in the ready list to increase it's likelihood of being chosen first
31121 in the next cycle.
31122
31123 - If the pendulum is -1, then a single store has been issued in this
31124 cycle and we attempt to locate another store in the ready list to
31125 issue with it, preferring a store to an adjacent memory location to
31126 facilitate store pairing in the store queue.
31127
31128 - If the pendulum is 2, then two loads have already been
31129 issued in this cycle, so we increase the priority of the first store
31130 in the ready list to increase it's likelihood of being chosen first
31131 in the next cycle.
31132
31133 - If the pendulum < -2 or > 2, then do nothing.
31134
31135 Note: This code covers the most common scenarios. There exist non
31136 load/store instructions which make use of the LSU and which
31137 would need to be accounted for to strictly model the behavior
31138 of the machine. Those instructions are currently unaccounted
31139 for to help minimize compile time overhead of this code.
31140 */
31141 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31142 {
31143 int pos;
31144 int i;
31145 rtx_insn *tmp;
31146 rtx load_mem, str_mem;
31147
31148 if (is_store_insn (last_scheduled_insn, &str_mem))
31149 /* Issuing a store, swing the load_store_pendulum to the left */
31150 load_store_pendulum--;
31151 else if (is_load_insn (last_scheduled_insn, &load_mem))
31152 /* Issuing a load, swing the load_store_pendulum to the right */
31153 load_store_pendulum++;
31154 else
31155 return cached_can_issue_more;
31156
31157 /* If the pendulum is balanced, or there is only one instruction on
31158 the ready list, then all is well, so return. */
31159 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31160 return cached_can_issue_more;
31161
31162 if (load_store_pendulum == 1)
31163 {
31164 /* A load has been issued in this cycle. Scan the ready list
31165 for another load to issue with it */
31166 pos = *pn_ready-1;
31167
31168 while (pos >= 0)
31169 {
31170 if (is_load_insn (ready[pos], &load_mem))
31171 {
31172 /* Found a load. Move it to the head of the ready list,
31173 and adjust it's priority so that it is more likely to
31174 stay there */
31175 tmp = ready[pos];
31176 for (i=pos; i<*pn_ready-1; i++)
31177 ready[i] = ready[i + 1];
31178 ready[*pn_ready-1] = tmp;
31179
31180 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31181 INSN_PRIORITY (tmp)++;
31182 break;
31183 }
31184 pos--;
31185 }
31186 }
31187 else if (load_store_pendulum == -2)
31188 {
31189 /* Two stores have been issued in this cycle. Increase the
31190 priority of the first load in the ready list to favor it for
31191 issuing in the next cycle. */
31192 pos = *pn_ready-1;
31193
31194 while (pos >= 0)
31195 {
31196 if (is_load_insn (ready[pos], &load_mem)
31197 && !sel_sched_p ()
31198 && INSN_PRIORITY_KNOWN (ready[pos]))
31199 {
31200 INSN_PRIORITY (ready[pos])++;
31201
31202 /* Adjust the pendulum to account for the fact that a load
31203 was found and increased in priority. This is to prevent
31204 increasing the priority of multiple loads */
31205 load_store_pendulum--;
31206
31207 break;
31208 }
31209 pos--;
31210 }
31211 }
31212 else if (load_store_pendulum == -1)
31213 {
31214 /* A store has been issued in this cycle. Scan the ready list for
31215 another store to issue with it, preferring a store to an adjacent
31216 memory location */
31217 int first_store_pos = -1;
31218
31219 pos = *pn_ready-1;
31220
31221 while (pos >= 0)
31222 {
31223 if (is_store_insn (ready[pos], &str_mem))
31224 {
31225 rtx str_mem2;
31226 /* Maintain the index of the first store found on the
31227 list */
31228 if (first_store_pos == -1)
31229 first_store_pos = pos;
31230
31231 if (is_store_insn (last_scheduled_insn, &str_mem2)
31232 && adjacent_mem_locations (str_mem, str_mem2))
31233 {
31234 /* Found an adjacent store. Move it to the head of the
31235 ready list, and adjust it's priority so that it is
31236 more likely to stay there */
31237 tmp = ready[pos];
31238 for (i=pos; i<*pn_ready-1; i++)
31239 ready[i] = ready[i + 1];
31240 ready[*pn_ready-1] = tmp;
31241
31242 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31243 INSN_PRIORITY (tmp)++;
31244
31245 first_store_pos = -1;
31246
31247 break;
31248 };
31249 }
31250 pos--;
31251 }
31252
31253 if (first_store_pos >= 0)
31254 {
31255 /* An adjacent store wasn't found, but a non-adjacent store was,
31256 so move the non-adjacent store to the front of the ready
31257 list, and adjust its priority so that it is more likely to
31258 stay there. */
31259 tmp = ready[first_store_pos];
31260 for (i=first_store_pos; i<*pn_ready-1; i++)
31261 ready[i] = ready[i + 1];
31262 ready[*pn_ready-1] = tmp;
31263 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31264 INSN_PRIORITY (tmp)++;
31265 }
31266 }
31267 else if (load_store_pendulum == 2)
31268 {
31269 /* Two loads have been issued in this cycle. Increase the priority
31270 of the first store in the ready list to favor it for issuing in
31271 the next cycle. */
31272 pos = *pn_ready-1;
31273
31274 while (pos >= 0)
31275 {
31276 if (is_store_insn (ready[pos], &str_mem)
31277 && !sel_sched_p ()
31278 && INSN_PRIORITY_KNOWN (ready[pos]))
31279 {
31280 INSN_PRIORITY (ready[pos])++;
31281
31282 /* Adjust the pendulum to account for the fact that a store
31283 was found and increased in priority. This is to prevent
31284 increasing the priority of multiple stores */
31285 load_store_pendulum++;
31286
31287 break;
31288 }
31289 pos--;
31290 }
31291 }
31292 }
31293
31294 /* Do Power9 dependent reordering if necessary. */
31295 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31296 && recog_memoized (last_scheduled_insn) >= 0)
31297 return power9_sched_reorder2 (ready, *pn_ready - 1);
31298
31299 return cached_can_issue_more;
31300 }
31301
31302 /* Return whether the presence of INSN causes a dispatch group termination
31303 of group WHICH_GROUP.
31304
31305 If WHICH_GROUP == current_group, this function will return true if INSN
31306 causes the termination of the current group (i.e, the dispatch group to
31307 which INSN belongs). This means that INSN will be the last insn in the
31308 group it belongs to.
31309
31310 If WHICH_GROUP == previous_group, this function will return true if INSN
31311 causes the termination of the previous group (i.e, the dispatch group that
31312 precedes the group to which INSN belongs). This means that INSN will be
31313 the first insn in the group it belongs to). */
31314
31315 static bool
31316 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31317 {
31318 bool first, last;
31319
31320 if (! insn)
31321 return false;
31322
31323 first = insn_must_be_first_in_group (insn);
31324 last = insn_must_be_last_in_group (insn);
31325
31326 if (first && last)
31327 return true;
31328
31329 if (which_group == current_group)
31330 return last;
31331 else if (which_group == previous_group)
31332 return first;
31333
31334 return false;
31335 }
31336
31337
31338 static bool
31339 insn_must_be_first_in_group (rtx_insn *insn)
31340 {
31341 enum attr_type type;
31342
31343 if (!insn
31344 || NOTE_P (insn)
31345 || DEBUG_INSN_P (insn)
31346 || GET_CODE (PATTERN (insn)) == USE
31347 || GET_CODE (PATTERN (insn)) == CLOBBER)
31348 return false;
31349
31350 switch (rs6000_tune)
31351 {
31352 case PROCESSOR_POWER5:
31353 if (is_cracked_insn (insn))
31354 return true;
31355 /* FALLTHRU */
31356 case PROCESSOR_POWER4:
31357 if (is_microcoded_insn (insn))
31358 return true;
31359
31360 if (!rs6000_sched_groups)
31361 return false;
31362
31363 type = get_attr_type (insn);
31364
31365 switch (type)
31366 {
31367 case TYPE_MFCR:
31368 case TYPE_MFCRF:
31369 case TYPE_MTCR:
31370 case TYPE_CR_LOGICAL:
31371 case TYPE_MTJMPR:
31372 case TYPE_MFJMPR:
31373 case TYPE_DIV:
31374 case TYPE_LOAD_L:
31375 case TYPE_STORE_C:
31376 case TYPE_ISYNC:
31377 case TYPE_SYNC:
31378 return true;
31379 default:
31380 break;
31381 }
31382 break;
31383 case PROCESSOR_POWER6:
31384 type = get_attr_type (insn);
31385
31386 switch (type)
31387 {
31388 case TYPE_EXTS:
31389 case TYPE_CNTLZ:
31390 case TYPE_TRAP:
31391 case TYPE_MUL:
31392 case TYPE_INSERT:
31393 case TYPE_FPCOMPARE:
31394 case TYPE_MFCR:
31395 case TYPE_MTCR:
31396 case TYPE_MFJMPR:
31397 case TYPE_MTJMPR:
31398 case TYPE_ISYNC:
31399 case TYPE_SYNC:
31400 case TYPE_LOAD_L:
31401 case TYPE_STORE_C:
31402 return true;
31403 case TYPE_SHIFT:
31404 if (get_attr_dot (insn) == DOT_NO
31405 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31406 return true;
31407 else
31408 break;
31409 case TYPE_DIV:
31410 if (get_attr_size (insn) == SIZE_32)
31411 return true;
31412 else
31413 break;
31414 case TYPE_LOAD:
31415 case TYPE_STORE:
31416 case TYPE_FPLOAD:
31417 case TYPE_FPSTORE:
31418 if (get_attr_update (insn) == UPDATE_YES)
31419 return true;
31420 else
31421 break;
31422 default:
31423 break;
31424 }
31425 break;
31426 case PROCESSOR_POWER7:
31427 type = get_attr_type (insn);
31428
31429 switch (type)
31430 {
31431 case TYPE_CR_LOGICAL:
31432 case TYPE_MFCR:
31433 case TYPE_MFCRF:
31434 case TYPE_MTCR:
31435 case TYPE_DIV:
31436 case TYPE_ISYNC:
31437 case TYPE_LOAD_L:
31438 case TYPE_STORE_C:
31439 case TYPE_MFJMPR:
31440 case TYPE_MTJMPR:
31441 return true;
31442 case TYPE_MUL:
31443 case TYPE_SHIFT:
31444 case TYPE_EXTS:
31445 if (get_attr_dot (insn) == DOT_YES)
31446 return true;
31447 else
31448 break;
31449 case TYPE_LOAD:
31450 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31451 || get_attr_update (insn) == UPDATE_YES)
31452 return true;
31453 else
31454 break;
31455 case TYPE_STORE:
31456 case TYPE_FPLOAD:
31457 case TYPE_FPSTORE:
31458 if (get_attr_update (insn) == UPDATE_YES)
31459 return true;
31460 else
31461 break;
31462 default:
31463 break;
31464 }
31465 break;
31466 case PROCESSOR_POWER8:
31467 type = get_attr_type (insn);
31468
31469 switch (type)
31470 {
31471 case TYPE_CR_LOGICAL:
31472 case TYPE_MFCR:
31473 case TYPE_MFCRF:
31474 case TYPE_MTCR:
31475 case TYPE_SYNC:
31476 case TYPE_ISYNC:
31477 case TYPE_LOAD_L:
31478 case TYPE_STORE_C:
31479 case TYPE_VECSTORE:
31480 case TYPE_MFJMPR:
31481 case TYPE_MTJMPR:
31482 return true;
31483 case TYPE_SHIFT:
31484 case TYPE_EXTS:
31485 case TYPE_MUL:
31486 if (get_attr_dot (insn) == DOT_YES)
31487 return true;
31488 else
31489 break;
31490 case TYPE_LOAD:
31491 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31492 || get_attr_update (insn) == UPDATE_YES)
31493 return true;
31494 else
31495 break;
31496 case TYPE_STORE:
31497 if (get_attr_update (insn) == UPDATE_YES
31498 && get_attr_indexed (insn) == INDEXED_YES)
31499 return true;
31500 else
31501 break;
31502 default:
31503 break;
31504 }
31505 break;
31506 default:
31507 break;
31508 }
31509
31510 return false;
31511 }
31512
31513 static bool
31514 insn_must_be_last_in_group (rtx_insn *insn)
31515 {
31516 enum attr_type type;
31517
31518 if (!insn
31519 || NOTE_P (insn)
31520 || DEBUG_INSN_P (insn)
31521 || GET_CODE (PATTERN (insn)) == USE
31522 || GET_CODE (PATTERN (insn)) == CLOBBER)
31523 return false;
31524
31525 switch (rs6000_tune) {
31526 case PROCESSOR_POWER4:
31527 case PROCESSOR_POWER5:
31528 if (is_microcoded_insn (insn))
31529 return true;
31530
31531 if (is_branch_slot_insn (insn))
31532 return true;
31533
31534 break;
31535 case PROCESSOR_POWER6:
31536 type = get_attr_type (insn);
31537
31538 switch (type)
31539 {
31540 case TYPE_EXTS:
31541 case TYPE_CNTLZ:
31542 case TYPE_TRAP:
31543 case TYPE_MUL:
31544 case TYPE_FPCOMPARE:
31545 case TYPE_MFCR:
31546 case TYPE_MTCR:
31547 case TYPE_MFJMPR:
31548 case TYPE_MTJMPR:
31549 case TYPE_ISYNC:
31550 case TYPE_SYNC:
31551 case TYPE_LOAD_L:
31552 case TYPE_STORE_C:
31553 return true;
31554 case TYPE_SHIFT:
31555 if (get_attr_dot (insn) == DOT_NO
31556 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31557 return true;
31558 else
31559 break;
31560 case TYPE_DIV:
31561 if (get_attr_size (insn) == SIZE_32)
31562 return true;
31563 else
31564 break;
31565 default:
31566 break;
31567 }
31568 break;
31569 case PROCESSOR_POWER7:
31570 type = get_attr_type (insn);
31571
31572 switch (type)
31573 {
31574 case TYPE_ISYNC:
31575 case TYPE_SYNC:
31576 case TYPE_LOAD_L:
31577 case TYPE_STORE_C:
31578 return true;
31579 case TYPE_LOAD:
31580 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31581 && get_attr_update (insn) == UPDATE_YES)
31582 return true;
31583 else
31584 break;
31585 case TYPE_STORE:
31586 if (get_attr_update (insn) == UPDATE_YES
31587 && get_attr_indexed (insn) == INDEXED_YES)
31588 return true;
31589 else
31590 break;
31591 default:
31592 break;
31593 }
31594 break;
31595 case PROCESSOR_POWER8:
31596 type = get_attr_type (insn);
31597
31598 switch (type)
31599 {
31600 case TYPE_MFCR:
31601 case TYPE_MTCR:
31602 case TYPE_ISYNC:
31603 case TYPE_SYNC:
31604 case TYPE_LOAD_L:
31605 case TYPE_STORE_C:
31606 return true;
31607 case TYPE_LOAD:
31608 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31609 && get_attr_update (insn) == UPDATE_YES)
31610 return true;
31611 else
31612 break;
31613 case TYPE_STORE:
31614 if (get_attr_update (insn) == UPDATE_YES
31615 && get_attr_indexed (insn) == INDEXED_YES)
31616 return true;
31617 else
31618 break;
31619 default:
31620 break;
31621 }
31622 break;
31623 default:
31624 break;
31625 }
31626
31627 return false;
31628 }
31629
31630 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31631 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31632
31633 static bool
31634 is_costly_group (rtx *group_insns, rtx next_insn)
31635 {
31636 int i;
31637 int issue_rate = rs6000_issue_rate ();
31638
31639 for (i = 0; i < issue_rate; i++)
31640 {
31641 sd_iterator_def sd_it;
31642 dep_t dep;
31643 rtx insn = group_insns[i];
31644
31645 if (!insn)
31646 continue;
31647
31648 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31649 {
31650 rtx next = DEP_CON (dep);
31651
31652 if (next == next_insn
31653 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31654 return true;
31655 }
31656 }
31657
31658 return false;
31659 }
31660
31661 /* Utility of the function redefine_groups.
31662 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31663 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31664 to keep it "far" (in a separate group) from GROUP_INSNS, following
31665 one of the following schemes, depending on the value of the flag
31666 -minsert_sched_nops = X:
31667 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31668 in order to force NEXT_INSN into a separate group.
31669 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31670 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31671 insertion (has a group just ended, how many vacant issue slots remain in the
31672 last group, and how many dispatch groups were encountered so far). */
31673
31674 static int
31675 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31676 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31677 int *group_count)
31678 {
31679 rtx nop;
31680 bool force;
31681 int issue_rate = rs6000_issue_rate ();
31682 bool end = *group_end;
31683 int i;
31684
31685 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31686 return can_issue_more;
31687
31688 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31689 return can_issue_more;
31690
31691 force = is_costly_group (group_insns, next_insn);
31692 if (!force)
31693 return can_issue_more;
31694
31695 if (sched_verbose > 6)
31696 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31697 *group_count ,can_issue_more);
31698
31699 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31700 {
31701 if (*group_end)
31702 can_issue_more = 0;
31703
31704 /* Since only a branch can be issued in the last issue_slot, it is
31705 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31706 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31707 in this case the last nop will start a new group and the branch
31708 will be forced to the new group. */
31709 if (can_issue_more && !is_branch_slot_insn (next_insn))
31710 can_issue_more--;
31711
31712 /* Do we have a special group ending nop? */
31713 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
31714 || rs6000_tune == PROCESSOR_POWER8)
31715 {
31716 nop = gen_group_ending_nop ();
31717 emit_insn_before (nop, next_insn);
31718 can_issue_more = 0;
31719 }
31720 else
31721 while (can_issue_more > 0)
31722 {
31723 nop = gen_nop ();
31724 emit_insn_before (nop, next_insn);
31725 can_issue_more--;
31726 }
31727
31728 *group_end = true;
31729 return 0;
31730 }
31731
31732 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31733 {
31734 int n_nops = rs6000_sched_insert_nops;
31735
31736 /* Nops can't be issued from the branch slot, so the effective
31737 issue_rate for nops is 'issue_rate - 1'. */
31738 if (can_issue_more == 0)
31739 can_issue_more = issue_rate;
31740 can_issue_more--;
31741 if (can_issue_more == 0)
31742 {
31743 can_issue_more = issue_rate - 1;
31744 (*group_count)++;
31745 end = true;
31746 for (i = 0; i < issue_rate; i++)
31747 {
31748 group_insns[i] = 0;
31749 }
31750 }
31751
31752 while (n_nops > 0)
31753 {
31754 nop = gen_nop ();
31755 emit_insn_before (nop, next_insn);
31756 if (can_issue_more == issue_rate - 1) /* new group begins */
31757 end = false;
31758 can_issue_more--;
31759 if (can_issue_more == 0)
31760 {
31761 can_issue_more = issue_rate - 1;
31762 (*group_count)++;
31763 end = true;
31764 for (i = 0; i < issue_rate; i++)
31765 {
31766 group_insns[i] = 0;
31767 }
31768 }
31769 n_nops--;
31770 }
31771
31772 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31773 can_issue_more++;
31774
31775 /* Is next_insn going to start a new group? */
31776 *group_end
31777 = (end
31778 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31779 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31780 || (can_issue_more < issue_rate &&
31781 insn_terminates_group_p (next_insn, previous_group)));
31782 if (*group_end && end)
31783 (*group_count)--;
31784
31785 if (sched_verbose > 6)
31786 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31787 *group_count, can_issue_more);
31788 return can_issue_more;
31789 }
31790
31791 return can_issue_more;
31792 }
31793
31794 /* This function tries to synch the dispatch groups that the compiler "sees"
31795 with the dispatch groups that the processor dispatcher is expected to
31796 form in practice. It tries to achieve this synchronization by forcing the
31797 estimated processor grouping on the compiler (as opposed to the function
31798 'pad_goups' which tries to force the scheduler's grouping on the processor).
31799
31800 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31801 examines the (estimated) dispatch groups that will be formed by the processor
31802 dispatcher. It marks these group boundaries to reflect the estimated
31803 processor grouping, overriding the grouping that the scheduler had marked.
31804 Depending on the value of the flag '-minsert-sched-nops' this function can
31805 force certain insns into separate groups or force a certain distance between
31806 them by inserting nops, for example, if there exists a "costly dependence"
31807 between the insns.
31808
31809 The function estimates the group boundaries that the processor will form as
31810 follows: It keeps track of how many vacant issue slots are available after
31811 each insn. A subsequent insn will start a new group if one of the following
31812 4 cases applies:
31813 - no more vacant issue slots remain in the current dispatch group.
31814 - only the last issue slot, which is the branch slot, is vacant, but the next
31815 insn is not a branch.
31816 - only the last 2 or less issue slots, including the branch slot, are vacant,
31817 which means that a cracked insn (which occupies two issue slots) can't be
31818 issued in this group.
31819 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31820 start a new group. */
31821
31822 static int
31823 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31824 rtx_insn *tail)
31825 {
31826 rtx_insn *insn, *next_insn;
31827 int issue_rate;
31828 int can_issue_more;
31829 int slot, i;
31830 bool group_end;
31831 int group_count = 0;
31832 rtx *group_insns;
31833
31834 /* Initialize. */
31835 issue_rate = rs6000_issue_rate ();
31836 group_insns = XALLOCAVEC (rtx, issue_rate);
31837 for (i = 0; i < issue_rate; i++)
31838 {
31839 group_insns[i] = 0;
31840 }
31841 can_issue_more = issue_rate;
31842 slot = 0;
31843 insn = get_next_active_insn (prev_head_insn, tail);
31844 group_end = false;
31845
31846 while (insn != NULL_RTX)
31847 {
31848 slot = (issue_rate - can_issue_more);
31849 group_insns[slot] = insn;
31850 can_issue_more =
31851 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31852 if (insn_terminates_group_p (insn, current_group))
31853 can_issue_more = 0;
31854
31855 next_insn = get_next_active_insn (insn, tail);
31856 if (next_insn == NULL_RTX)
31857 return group_count + 1;
31858
31859 /* Is next_insn going to start a new group? */
31860 group_end
31861 = (can_issue_more == 0
31862 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31863 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31864 || (can_issue_more < issue_rate &&
31865 insn_terminates_group_p (next_insn, previous_group)));
31866
31867 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
31868 next_insn, &group_end, can_issue_more,
31869 &group_count);
31870
31871 if (group_end)
31872 {
31873 group_count++;
31874 can_issue_more = 0;
31875 for (i = 0; i < issue_rate; i++)
31876 {
31877 group_insns[i] = 0;
31878 }
31879 }
31880
31881 if (GET_MODE (next_insn) == TImode && can_issue_more)
31882 PUT_MODE (next_insn, VOIDmode);
31883 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
31884 PUT_MODE (next_insn, TImode);
31885
31886 insn = next_insn;
31887 if (can_issue_more == 0)
31888 can_issue_more = issue_rate;
31889 } /* while */
31890
31891 return group_count;
31892 }
31893
31894 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
31895 dispatch group boundaries that the scheduler had marked. Pad with nops
31896 any dispatch groups which have vacant issue slots, in order to force the
31897 scheduler's grouping on the processor dispatcher. The function
31898 returns the number of dispatch groups found. */
31899
31900 static int
31901 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31902 rtx_insn *tail)
31903 {
31904 rtx_insn *insn, *next_insn;
31905 rtx nop;
31906 int issue_rate;
31907 int can_issue_more;
31908 int group_end;
31909 int group_count = 0;
31910
31911 /* Initialize issue_rate. */
31912 issue_rate = rs6000_issue_rate ();
31913 can_issue_more = issue_rate;
31914
31915 insn = get_next_active_insn (prev_head_insn, tail);
31916 next_insn = get_next_active_insn (insn, tail);
31917
31918 while (insn != NULL_RTX)
31919 {
31920 can_issue_more =
31921 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31922
31923 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
31924
31925 if (next_insn == NULL_RTX)
31926 break;
31927
31928 if (group_end)
31929 {
31930 /* If the scheduler had marked group termination at this location
31931 (between insn and next_insn), and neither insn nor next_insn will
31932 force group termination, pad the group with nops to force group
31933 termination. */
31934 if (can_issue_more
31935 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
31936 && !insn_terminates_group_p (insn, current_group)
31937 && !insn_terminates_group_p (next_insn, previous_group))
31938 {
31939 if (!is_branch_slot_insn (next_insn))
31940 can_issue_more--;
31941
31942 while (can_issue_more)
31943 {
31944 nop = gen_nop ();
31945 emit_insn_before (nop, next_insn);
31946 can_issue_more--;
31947 }
31948 }
31949
31950 can_issue_more = issue_rate;
31951 group_count++;
31952 }
31953
31954 insn = next_insn;
31955 next_insn = get_next_active_insn (insn, tail);
31956 }
31957
31958 return group_count;
31959 }
31960
31961 /* We're beginning a new block. Initialize data structures as necessary. */
31962
31963 static void
31964 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
31965 int sched_verbose ATTRIBUTE_UNUSED,
31966 int max_ready ATTRIBUTE_UNUSED)
31967 {
31968 last_scheduled_insn = NULL;
31969 load_store_pendulum = 0;
31970 divide_cnt = 0;
31971 vec_pairing = 0;
31972 }
31973
31974 /* The following function is called at the end of scheduling BB.
31975 After reload, it inserts nops at insn group bundling. */
31976
31977 static void
31978 rs6000_sched_finish (FILE *dump, int sched_verbose)
31979 {
31980 int n_groups;
31981
31982 if (sched_verbose)
31983 fprintf (dump, "=== Finishing schedule.\n");
31984
31985 if (reload_completed && rs6000_sched_groups)
31986 {
31987 /* Do not run sched_finish hook when selective scheduling enabled. */
31988 if (sel_sched_p ())
31989 return;
31990
31991 if (rs6000_sched_insert_nops == sched_finish_none)
31992 return;
31993
31994 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
31995 n_groups = pad_groups (dump, sched_verbose,
31996 current_sched_info->prev_head,
31997 current_sched_info->next_tail);
31998 else
31999 n_groups = redefine_groups (dump, sched_verbose,
32000 current_sched_info->prev_head,
32001 current_sched_info->next_tail);
32002
32003 if (sched_verbose >= 6)
32004 {
32005 fprintf (dump, "ngroups = %d\n", n_groups);
32006 print_rtl (dump, current_sched_info->prev_head);
32007 fprintf (dump, "Done finish_sched\n");
32008 }
32009 }
32010 }
32011
32012 struct rs6000_sched_context
32013 {
32014 short cached_can_issue_more;
32015 rtx_insn *last_scheduled_insn;
32016 int load_store_pendulum;
32017 int divide_cnt;
32018 int vec_pairing;
32019 };
32020
32021 typedef struct rs6000_sched_context rs6000_sched_context_def;
32022 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32023
32024 /* Allocate store for new scheduling context. */
32025 static void *
32026 rs6000_alloc_sched_context (void)
32027 {
32028 return xmalloc (sizeof (rs6000_sched_context_def));
32029 }
32030
32031 /* If CLEAN_P is true then initializes _SC with clean data,
32032 and from the global context otherwise. */
32033 static void
32034 rs6000_init_sched_context (void *_sc, bool clean_p)
32035 {
32036 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32037
32038 if (clean_p)
32039 {
32040 sc->cached_can_issue_more = 0;
32041 sc->last_scheduled_insn = NULL;
32042 sc->load_store_pendulum = 0;
32043 sc->divide_cnt = 0;
32044 sc->vec_pairing = 0;
32045 }
32046 else
32047 {
32048 sc->cached_can_issue_more = cached_can_issue_more;
32049 sc->last_scheduled_insn = last_scheduled_insn;
32050 sc->load_store_pendulum = load_store_pendulum;
32051 sc->divide_cnt = divide_cnt;
32052 sc->vec_pairing = vec_pairing;
32053 }
32054 }
32055
32056 /* Sets the global scheduling context to the one pointed to by _SC. */
32057 static void
32058 rs6000_set_sched_context (void *_sc)
32059 {
32060 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32061
32062 gcc_assert (sc != NULL);
32063
32064 cached_can_issue_more = sc->cached_can_issue_more;
32065 last_scheduled_insn = sc->last_scheduled_insn;
32066 load_store_pendulum = sc->load_store_pendulum;
32067 divide_cnt = sc->divide_cnt;
32068 vec_pairing = sc->vec_pairing;
32069 }
32070
32071 /* Free _SC. */
32072 static void
32073 rs6000_free_sched_context (void *_sc)
32074 {
32075 gcc_assert (_sc != NULL);
32076
32077 free (_sc);
32078 }
32079
32080 static bool
32081 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32082 {
32083 switch (get_attr_type (insn))
32084 {
32085 case TYPE_DIV:
32086 case TYPE_SDIV:
32087 case TYPE_DDIV:
32088 case TYPE_VECDIV:
32089 case TYPE_SSQRT:
32090 case TYPE_DSQRT:
32091 return false;
32092
32093 default:
32094 return true;
32095 }
32096 }
32097 \f
32098 /* Length in units of the trampoline for entering a nested function. */
32099
32100 int
32101 rs6000_trampoline_size (void)
32102 {
32103 int ret = 0;
32104
32105 switch (DEFAULT_ABI)
32106 {
32107 default:
32108 gcc_unreachable ();
32109
32110 case ABI_AIX:
32111 ret = (TARGET_32BIT) ? 12 : 24;
32112 break;
32113
32114 case ABI_ELFv2:
32115 gcc_assert (!TARGET_32BIT);
32116 ret = 32;
32117 break;
32118
32119 case ABI_DARWIN:
32120 case ABI_V4:
32121 ret = (TARGET_32BIT) ? 40 : 48;
32122 break;
32123 }
32124
32125 return ret;
32126 }
32127
32128 /* Emit RTL insns to initialize the variable parts of a trampoline.
32129 FNADDR is an RTX for the address of the function's pure code.
32130 CXT is an RTX for the static chain value for the function. */
32131
32132 static void
32133 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32134 {
32135 int regsize = (TARGET_32BIT) ? 4 : 8;
32136 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32137 rtx ctx_reg = force_reg (Pmode, cxt);
32138 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32139
32140 switch (DEFAULT_ABI)
32141 {
32142 default:
32143 gcc_unreachable ();
32144
32145 /* Under AIX, just build the 3 word function descriptor */
32146 case ABI_AIX:
32147 {
32148 rtx fnmem, fn_reg, toc_reg;
32149
32150 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32151 error ("you cannot take the address of a nested function if you use "
32152 "the %qs option", "-mno-pointers-to-nested-functions");
32153
32154 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32155 fn_reg = gen_reg_rtx (Pmode);
32156 toc_reg = gen_reg_rtx (Pmode);
32157
32158 /* Macro to shorten the code expansions below. */
32159 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32160
32161 m_tramp = replace_equiv_address (m_tramp, addr);
32162
32163 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32164 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32165 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32166 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32167 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32168
32169 # undef MEM_PLUS
32170 }
32171 break;
32172
32173 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32174 case ABI_ELFv2:
32175 case ABI_DARWIN:
32176 case ABI_V4:
32177 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32178 LCT_NORMAL, VOIDmode,
32179 addr, Pmode,
32180 GEN_INT (rs6000_trampoline_size ()), SImode,
32181 fnaddr, Pmode,
32182 ctx_reg, Pmode);
32183 break;
32184 }
32185 }
32186
32187 \f
32188 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32189 identifier as an argument, so the front end shouldn't look it up. */
32190
32191 static bool
32192 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32193 {
32194 return is_attribute_p ("altivec", attr_id);
32195 }
32196
32197 /* Handle the "altivec" attribute. The attribute may have
32198 arguments as follows:
32199
32200 __attribute__((altivec(vector__)))
32201 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32202 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32203
32204 and may appear more than once (e.g., 'vector bool char') in a
32205 given declaration. */
32206
32207 static tree
32208 rs6000_handle_altivec_attribute (tree *node,
32209 tree name ATTRIBUTE_UNUSED,
32210 tree args,
32211 int flags ATTRIBUTE_UNUSED,
32212 bool *no_add_attrs)
32213 {
32214 tree type = *node, result = NULL_TREE;
32215 machine_mode mode;
32216 int unsigned_p;
32217 char altivec_type
32218 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32219 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32220 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32221 : '?');
32222
32223 while (POINTER_TYPE_P (type)
32224 || TREE_CODE (type) == FUNCTION_TYPE
32225 || TREE_CODE (type) == METHOD_TYPE
32226 || TREE_CODE (type) == ARRAY_TYPE)
32227 type = TREE_TYPE (type);
32228
32229 mode = TYPE_MODE (type);
32230
32231 /* Check for invalid AltiVec type qualifiers. */
32232 if (type == long_double_type_node)
32233 error ("use of %<long double%> in AltiVec types is invalid");
32234 else if (type == boolean_type_node)
32235 error ("use of boolean types in AltiVec types is invalid");
32236 else if (TREE_CODE (type) == COMPLEX_TYPE)
32237 error ("use of %<complex%> in AltiVec types is invalid");
32238 else if (DECIMAL_FLOAT_MODE_P (mode))
32239 error ("use of decimal floating point types in AltiVec types is invalid");
32240 else if (!TARGET_VSX)
32241 {
32242 if (type == long_unsigned_type_node || type == long_integer_type_node)
32243 {
32244 if (TARGET_64BIT)
32245 error ("use of %<long%> in AltiVec types is invalid for "
32246 "64-bit code without %qs", "-mvsx");
32247 else if (rs6000_warn_altivec_long)
32248 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32249 "use %<int%>");
32250 }
32251 else if (type == long_long_unsigned_type_node
32252 || type == long_long_integer_type_node)
32253 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32254 "-mvsx");
32255 else if (type == double_type_node)
32256 error ("use of %<double%> in AltiVec types is invalid without %qs",
32257 "-mvsx");
32258 }
32259
32260 switch (altivec_type)
32261 {
32262 case 'v':
32263 unsigned_p = TYPE_UNSIGNED (type);
32264 switch (mode)
32265 {
32266 case E_TImode:
32267 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32268 break;
32269 case E_DImode:
32270 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32271 break;
32272 case E_SImode:
32273 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32274 break;
32275 case E_HImode:
32276 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32277 break;
32278 case E_QImode:
32279 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32280 break;
32281 case E_SFmode: result = V4SF_type_node; break;
32282 case E_DFmode: result = V2DF_type_node; break;
32283 /* If the user says 'vector int bool', we may be handed the 'bool'
32284 attribute _before_ the 'vector' attribute, and so select the
32285 proper type in the 'b' case below. */
32286 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32287 case E_V2DImode: case E_V2DFmode:
32288 result = type;
32289 default: break;
32290 }
32291 break;
32292 case 'b':
32293 switch (mode)
32294 {
32295 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32296 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32297 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32298 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32299 default: break;
32300 }
32301 break;
32302 case 'p':
32303 switch (mode)
32304 {
32305 case E_V8HImode: result = pixel_V8HI_type_node;
32306 default: break;
32307 }
32308 default: break;
32309 }
32310
32311 /* Propagate qualifiers attached to the element type
32312 onto the vector type. */
32313 if (result && result != type && TYPE_QUALS (type))
32314 result = build_qualified_type (result, TYPE_QUALS (type));
32315
32316 *no_add_attrs = true; /* No need to hang on to the attribute. */
32317
32318 if (result)
32319 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32320
32321 return NULL_TREE;
32322 }
32323
32324 /* AltiVec defines five built-in scalar types that serve as vector
32325 elements; we must teach the compiler how to mangle them. The 128-bit
32326 floating point mangling is target-specific as well. */
32327
32328 static const char *
32329 rs6000_mangle_type (const_tree type)
32330 {
32331 type = TYPE_MAIN_VARIANT (type);
32332
32333 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32334 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32335 return NULL;
32336
32337 if (type == bool_char_type_node) return "U6__boolc";
32338 if (type == bool_short_type_node) return "U6__bools";
32339 if (type == pixel_type_node) return "u7__pixel";
32340 if (type == bool_int_type_node) return "U6__booli";
32341 if (type == bool_long_long_type_node) return "U6__boolx";
32342
32343 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32344 return "g";
32345 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32346 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32347
32348 /* For all other types, use the default mangling. */
32349 return NULL;
32350 }
32351
32352 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32353 struct attribute_spec.handler. */
32354
32355 static tree
32356 rs6000_handle_longcall_attribute (tree *node, tree name,
32357 tree args ATTRIBUTE_UNUSED,
32358 int flags ATTRIBUTE_UNUSED,
32359 bool *no_add_attrs)
32360 {
32361 if (TREE_CODE (*node) != FUNCTION_TYPE
32362 && TREE_CODE (*node) != FIELD_DECL
32363 && TREE_CODE (*node) != TYPE_DECL)
32364 {
32365 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32366 name);
32367 *no_add_attrs = true;
32368 }
32369
32370 return NULL_TREE;
32371 }
32372
32373 /* Set longcall attributes on all functions declared when
32374 rs6000_default_long_calls is true. */
32375 static void
32376 rs6000_set_default_type_attributes (tree type)
32377 {
32378 if (rs6000_default_long_calls
32379 && (TREE_CODE (type) == FUNCTION_TYPE
32380 || TREE_CODE (type) == METHOD_TYPE))
32381 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32382 NULL_TREE,
32383 TYPE_ATTRIBUTES (type));
32384
32385 #if TARGET_MACHO
32386 darwin_set_default_type_attributes (type);
32387 #endif
32388 }
32389
32390 /* Return a reference suitable for calling a function with the
32391 longcall attribute. */
32392
32393 rtx
32394 rs6000_longcall_ref (rtx call_ref)
32395 {
32396 const char *call_name;
32397 tree node;
32398
32399 if (GET_CODE (call_ref) != SYMBOL_REF)
32400 return call_ref;
32401
32402 /* System V adds '.' to the internal name, so skip them. */
32403 call_name = XSTR (call_ref, 0);
32404 if (*call_name == '.')
32405 {
32406 while (*call_name == '.')
32407 call_name++;
32408
32409 node = get_identifier (call_name);
32410 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32411 }
32412
32413 return force_reg (Pmode, call_ref);
32414 }
32415 \f
32416 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32417 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32418 #endif
32419
32420 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32421 struct attribute_spec.handler. */
32422 static tree
32423 rs6000_handle_struct_attribute (tree *node, tree name,
32424 tree args ATTRIBUTE_UNUSED,
32425 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32426 {
32427 tree *type = NULL;
32428 if (DECL_P (*node))
32429 {
32430 if (TREE_CODE (*node) == TYPE_DECL)
32431 type = &TREE_TYPE (*node);
32432 }
32433 else
32434 type = node;
32435
32436 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32437 || TREE_CODE (*type) == UNION_TYPE)))
32438 {
32439 warning (OPT_Wattributes, "%qE attribute ignored", name);
32440 *no_add_attrs = true;
32441 }
32442
32443 else if ((is_attribute_p ("ms_struct", name)
32444 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32445 || ((is_attribute_p ("gcc_struct", name)
32446 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32447 {
32448 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32449 name);
32450 *no_add_attrs = true;
32451 }
32452
32453 return NULL_TREE;
32454 }
32455
32456 static bool
32457 rs6000_ms_bitfield_layout_p (const_tree record_type)
32458 {
32459 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32460 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32461 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32462 }
32463 \f
32464 #ifdef USING_ELFOS_H
32465
32466 /* A get_unnamed_section callback, used for switching to toc_section. */
32467
32468 static void
32469 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32470 {
32471 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32472 && TARGET_MINIMAL_TOC)
32473 {
32474 if (!toc_initialized)
32475 {
32476 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32477 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32478 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32479 fprintf (asm_out_file, "\t.tc ");
32480 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32481 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32482 fprintf (asm_out_file, "\n");
32483
32484 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32485 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32486 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32487 fprintf (asm_out_file, " = .+32768\n");
32488 toc_initialized = 1;
32489 }
32490 else
32491 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32492 }
32493 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32494 {
32495 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32496 if (!toc_initialized)
32497 {
32498 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32499 toc_initialized = 1;
32500 }
32501 }
32502 else
32503 {
32504 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32505 if (!toc_initialized)
32506 {
32507 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32508 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32509 fprintf (asm_out_file, " = .+32768\n");
32510 toc_initialized = 1;
32511 }
32512 }
32513 }
32514
32515 /* Implement TARGET_ASM_INIT_SECTIONS. */
32516
32517 static void
32518 rs6000_elf_asm_init_sections (void)
32519 {
32520 toc_section
32521 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32522
32523 sdata2_section
32524 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32525 SDATA2_SECTION_ASM_OP);
32526 }
32527
32528 /* Implement TARGET_SELECT_RTX_SECTION. */
32529
32530 static section *
32531 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32532 unsigned HOST_WIDE_INT align)
32533 {
32534 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32535 return toc_section;
32536 else
32537 return default_elf_select_rtx_section (mode, x, align);
32538 }
32539 \f
32540 /* For a SYMBOL_REF, set generic flags and then perform some
32541 target-specific processing.
32542
32543 When the AIX ABI is requested on a non-AIX system, replace the
32544 function name with the real name (with a leading .) rather than the
32545 function descriptor name. This saves a lot of overriding code to
32546 read the prefixes. */
32547
32548 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32549 static void
32550 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32551 {
32552 default_encode_section_info (decl, rtl, first);
32553
32554 if (first
32555 && TREE_CODE (decl) == FUNCTION_DECL
32556 && !TARGET_AIX
32557 && DEFAULT_ABI == ABI_AIX)
32558 {
32559 rtx sym_ref = XEXP (rtl, 0);
32560 size_t len = strlen (XSTR (sym_ref, 0));
32561 char *str = XALLOCAVEC (char, len + 2);
32562 str[0] = '.';
32563 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32564 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32565 }
32566 }
32567
32568 static inline bool
32569 compare_section_name (const char *section, const char *templ)
32570 {
32571 int len;
32572
32573 len = strlen (templ);
32574 return (strncmp (section, templ, len) == 0
32575 && (section[len] == 0 || section[len] == '.'));
32576 }
32577
32578 bool
32579 rs6000_elf_in_small_data_p (const_tree decl)
32580 {
32581 if (rs6000_sdata == SDATA_NONE)
32582 return false;
32583
32584 /* We want to merge strings, so we never consider them small data. */
32585 if (TREE_CODE (decl) == STRING_CST)
32586 return false;
32587
32588 /* Functions are never in the small data area. */
32589 if (TREE_CODE (decl) == FUNCTION_DECL)
32590 return false;
32591
32592 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32593 {
32594 const char *section = DECL_SECTION_NAME (decl);
32595 if (compare_section_name (section, ".sdata")
32596 || compare_section_name (section, ".sdata2")
32597 || compare_section_name (section, ".gnu.linkonce.s")
32598 || compare_section_name (section, ".sbss")
32599 || compare_section_name (section, ".sbss2")
32600 || compare_section_name (section, ".gnu.linkonce.sb")
32601 || strcmp (section, ".PPC.EMB.sdata0") == 0
32602 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32603 return true;
32604 }
32605 else
32606 {
32607 /* If we are told not to put readonly data in sdata, then don't. */
32608 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32609 && !rs6000_readonly_in_sdata)
32610 return false;
32611
32612 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32613
32614 if (size > 0
32615 && size <= g_switch_value
32616 /* If it's not public, and we're not going to reference it there,
32617 there's no need to put it in the small data section. */
32618 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32619 return true;
32620 }
32621
32622 return false;
32623 }
32624
32625 #endif /* USING_ELFOS_H */
32626 \f
32627 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32628
32629 static bool
32630 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32631 {
32632 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32633 }
32634
32635 /* Do not place thread-local symbols refs in the object blocks. */
32636
32637 static bool
32638 rs6000_use_blocks_for_decl_p (const_tree decl)
32639 {
32640 return !DECL_THREAD_LOCAL_P (decl);
32641 }
32642 \f
32643 /* Return a REG that occurs in ADDR with coefficient 1.
32644 ADDR can be effectively incremented by incrementing REG.
32645
32646 r0 is special and we must not select it as an address
32647 register by this routine since our caller will try to
32648 increment the returned register via an "la" instruction. */
32649
32650 rtx
32651 find_addr_reg (rtx addr)
32652 {
32653 while (GET_CODE (addr) == PLUS)
32654 {
32655 if (GET_CODE (XEXP (addr, 0)) == REG
32656 && REGNO (XEXP (addr, 0)) != 0)
32657 addr = XEXP (addr, 0);
32658 else if (GET_CODE (XEXP (addr, 1)) == REG
32659 && REGNO (XEXP (addr, 1)) != 0)
32660 addr = XEXP (addr, 1);
32661 else if (CONSTANT_P (XEXP (addr, 0)))
32662 addr = XEXP (addr, 1);
32663 else if (CONSTANT_P (XEXP (addr, 1)))
32664 addr = XEXP (addr, 0);
32665 else
32666 gcc_unreachable ();
32667 }
32668 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32669 return addr;
32670 }
32671
32672 void
32673 rs6000_fatal_bad_address (rtx op)
32674 {
32675 fatal_insn ("bad address", op);
32676 }
32677
32678 #if TARGET_MACHO
32679
32680 typedef struct branch_island_d {
32681 tree function_name;
32682 tree label_name;
32683 int line_number;
32684 } branch_island;
32685
32686
32687 static vec<branch_island, va_gc> *branch_islands;
32688
32689 /* Remember to generate a branch island for far calls to the given
32690 function. */
32691
32692 static void
32693 add_compiler_branch_island (tree label_name, tree function_name,
32694 int line_number)
32695 {
32696 branch_island bi = {function_name, label_name, line_number};
32697 vec_safe_push (branch_islands, bi);
32698 }
32699
32700 /* Generate far-jump branch islands for everything recorded in
32701 branch_islands. Invoked immediately after the last instruction of
32702 the epilogue has been emitted; the branch islands must be appended
32703 to, and contiguous with, the function body. Mach-O stubs are
32704 generated in machopic_output_stub(). */
32705
32706 static void
32707 macho_branch_islands (void)
32708 {
32709 char tmp_buf[512];
32710
32711 while (!vec_safe_is_empty (branch_islands))
32712 {
32713 branch_island *bi = &branch_islands->last ();
32714 const char *label = IDENTIFIER_POINTER (bi->label_name);
32715 const char *name = IDENTIFIER_POINTER (bi->function_name);
32716 char name_buf[512];
32717 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32718 if (name[0] == '*' || name[0] == '&')
32719 strcpy (name_buf, name+1);
32720 else
32721 {
32722 name_buf[0] = '_';
32723 strcpy (name_buf+1, name);
32724 }
32725 strcpy (tmp_buf, "\n");
32726 strcat (tmp_buf, label);
32727 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32728 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32729 dbxout_stabd (N_SLINE, bi->line_number);
32730 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32731 if (flag_pic)
32732 {
32733 if (TARGET_LINK_STACK)
32734 {
32735 char name[32];
32736 get_ppc476_thunk_name (name);
32737 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32738 strcat (tmp_buf, name);
32739 strcat (tmp_buf, "\n");
32740 strcat (tmp_buf, label);
32741 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32742 }
32743 else
32744 {
32745 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32746 strcat (tmp_buf, label);
32747 strcat (tmp_buf, "_pic\n");
32748 strcat (tmp_buf, label);
32749 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32750 }
32751
32752 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32753 strcat (tmp_buf, name_buf);
32754 strcat (tmp_buf, " - ");
32755 strcat (tmp_buf, label);
32756 strcat (tmp_buf, "_pic)\n");
32757
32758 strcat (tmp_buf, "\tmtlr r0\n");
32759
32760 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32761 strcat (tmp_buf, name_buf);
32762 strcat (tmp_buf, " - ");
32763 strcat (tmp_buf, label);
32764 strcat (tmp_buf, "_pic)\n");
32765
32766 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32767 }
32768 else
32769 {
32770 strcat (tmp_buf, ":\nlis r12,hi16(");
32771 strcat (tmp_buf, name_buf);
32772 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32773 strcat (tmp_buf, name_buf);
32774 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32775 }
32776 output_asm_insn (tmp_buf, 0);
32777 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32778 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32779 dbxout_stabd (N_SLINE, bi->line_number);
32780 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32781 branch_islands->pop ();
32782 }
32783 }
32784
32785 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32786 already there or not. */
32787
32788 static int
32789 no_previous_def (tree function_name)
32790 {
32791 branch_island *bi;
32792 unsigned ix;
32793
32794 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32795 if (function_name == bi->function_name)
32796 return 0;
32797 return 1;
32798 }
32799
32800 /* GET_PREV_LABEL gets the label name from the previous definition of
32801 the function. */
32802
32803 static tree
32804 get_prev_label (tree function_name)
32805 {
32806 branch_island *bi;
32807 unsigned ix;
32808
32809 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32810 if (function_name == bi->function_name)
32811 return bi->label_name;
32812 return NULL_TREE;
32813 }
32814
32815 /* INSN is either a function call or a millicode call. It may have an
32816 unconditional jump in its delay slot.
32817
32818 CALL_DEST is the routine we are calling. */
32819
32820 char *
32821 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
32822 int cookie_operand_number)
32823 {
32824 static char buf[256];
32825 if (darwin_emit_branch_islands
32826 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
32827 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
32828 {
32829 tree labelname;
32830 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
32831
32832 if (no_previous_def (funname))
32833 {
32834 rtx label_rtx = gen_label_rtx ();
32835 char *label_buf, temp_buf[256];
32836 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32837 CODE_LABEL_NUMBER (label_rtx));
32838 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32839 labelname = get_identifier (label_buf);
32840 add_compiler_branch_island (labelname, funname, insn_line (insn));
32841 }
32842 else
32843 labelname = get_prev_label (funname);
32844
32845 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32846 instruction will reach 'foo', otherwise link as 'bl L42'".
32847 "L42" should be a 'branch island', that will do a far jump to
32848 'foo'. Branch islands are generated in
32849 macho_branch_islands(). */
32850 sprintf (buf, "jbsr %%z%d,%.246s",
32851 dest_operand_number, IDENTIFIER_POINTER (labelname));
32852 }
32853 else
32854 sprintf (buf, "bl %%z%d", dest_operand_number);
32855 return buf;
32856 }
32857
32858 /* Generate PIC and indirect symbol stubs. */
32859
32860 void
32861 machopic_output_stub (FILE *file, const char *symb, const char *stub)
32862 {
32863 unsigned int length;
32864 char *symbol_name, *lazy_ptr_name;
32865 char *local_label_0;
32866 static int label = 0;
32867
32868 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32869 symb = (*targetm.strip_name_encoding) (symb);
32870
32871
32872 length = strlen (symb);
32873 symbol_name = XALLOCAVEC (char, length + 32);
32874 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
32875
32876 lazy_ptr_name = XALLOCAVEC (char, length + 32);
32877 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
32878
32879 if (flag_pic == 2)
32880 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
32881 else
32882 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
32883
32884 if (flag_pic == 2)
32885 {
32886 fprintf (file, "\t.align 5\n");
32887
32888 fprintf (file, "%s:\n", stub);
32889 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32890
32891 label++;
32892 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
32893 sprintf (local_label_0, "\"L%011d$spb\"", label);
32894
32895 fprintf (file, "\tmflr r0\n");
32896 if (TARGET_LINK_STACK)
32897 {
32898 char name[32];
32899 get_ppc476_thunk_name (name);
32900 fprintf (file, "\tbl %s\n", name);
32901 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32902 }
32903 else
32904 {
32905 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
32906 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32907 }
32908 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
32909 lazy_ptr_name, local_label_0);
32910 fprintf (file, "\tmtlr r0\n");
32911 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
32912 (TARGET_64BIT ? "ldu" : "lwzu"),
32913 lazy_ptr_name, local_label_0);
32914 fprintf (file, "\tmtctr r12\n");
32915 fprintf (file, "\tbctr\n");
32916 }
32917 else
32918 {
32919 fprintf (file, "\t.align 4\n");
32920
32921 fprintf (file, "%s:\n", stub);
32922 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32923
32924 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
32925 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
32926 (TARGET_64BIT ? "ldu" : "lwzu"),
32927 lazy_ptr_name);
32928 fprintf (file, "\tmtctr r12\n");
32929 fprintf (file, "\tbctr\n");
32930 }
32931
32932 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
32933 fprintf (file, "%s:\n", lazy_ptr_name);
32934 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32935 fprintf (file, "%sdyld_stub_binding_helper\n",
32936 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
32937 }
32938
32939 /* Legitimize PIC addresses. If the address is already
32940 position-independent, we return ORIG. Newly generated
32941 position-independent addresses go into a reg. This is REG if non
32942 zero, otherwise we allocate register(s) as necessary. */
32943
32944 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
32945
32946 rtx
32947 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
32948 rtx reg)
32949 {
32950 rtx base, offset;
32951
32952 if (reg == NULL && !reload_completed)
32953 reg = gen_reg_rtx (Pmode);
32954
32955 if (GET_CODE (orig) == CONST)
32956 {
32957 rtx reg_temp;
32958
32959 if (GET_CODE (XEXP (orig, 0)) == PLUS
32960 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
32961 return orig;
32962
32963 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
32964
32965 /* Use a different reg for the intermediate value, as
32966 it will be marked UNCHANGING. */
32967 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
32968 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
32969 Pmode, reg_temp);
32970 offset =
32971 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
32972 Pmode, reg);
32973
32974 if (GET_CODE (offset) == CONST_INT)
32975 {
32976 if (SMALL_INT (offset))
32977 return plus_constant (Pmode, base, INTVAL (offset));
32978 else if (!reload_completed)
32979 offset = force_reg (Pmode, offset);
32980 else
32981 {
32982 rtx mem = force_const_mem (Pmode, orig);
32983 return machopic_legitimize_pic_address (mem, Pmode, reg);
32984 }
32985 }
32986 return gen_rtx_PLUS (Pmode, base, offset);
32987 }
32988
32989 /* Fall back on generic machopic code. */
32990 return machopic_legitimize_pic_address (orig, mode, reg);
32991 }
32992
32993 /* Output a .machine directive for the Darwin assembler, and call
32994 the generic start_file routine. */
32995
32996 static void
32997 rs6000_darwin_file_start (void)
32998 {
32999 static const struct
33000 {
33001 const char *arg;
33002 const char *name;
33003 HOST_WIDE_INT if_set;
33004 } mapping[] = {
33005 { "ppc64", "ppc64", MASK_64BIT },
33006 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33007 { "power4", "ppc970", 0 },
33008 { "G5", "ppc970", 0 },
33009 { "7450", "ppc7450", 0 },
33010 { "7400", "ppc7400", MASK_ALTIVEC },
33011 { "G4", "ppc7400", 0 },
33012 { "750", "ppc750", 0 },
33013 { "740", "ppc750", 0 },
33014 { "G3", "ppc750", 0 },
33015 { "604e", "ppc604e", 0 },
33016 { "604", "ppc604", 0 },
33017 { "603e", "ppc603", 0 },
33018 { "603", "ppc603", 0 },
33019 { "601", "ppc601", 0 },
33020 { NULL, "ppc", 0 } };
33021 const char *cpu_id = "";
33022 size_t i;
33023
33024 rs6000_file_start ();
33025 darwin_file_start ();
33026
33027 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33028
33029 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33030 cpu_id = rs6000_default_cpu;
33031
33032 if (global_options_set.x_rs6000_cpu_index)
33033 cpu_id = processor_target_table[rs6000_cpu_index].name;
33034
33035 /* Look through the mapping array. Pick the first name that either
33036 matches the argument, has a bit set in IF_SET that is also set
33037 in the target flags, or has a NULL name. */
33038
33039 i = 0;
33040 while (mapping[i].arg != NULL
33041 && strcmp (mapping[i].arg, cpu_id) != 0
33042 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33043 i++;
33044
33045 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33046 }
33047
33048 #endif /* TARGET_MACHO */
33049
33050 #if TARGET_ELF
33051 static int
33052 rs6000_elf_reloc_rw_mask (void)
33053 {
33054 if (flag_pic)
33055 return 3;
33056 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33057 return 2;
33058 else
33059 return 0;
33060 }
33061
33062 /* Record an element in the table of global constructors. SYMBOL is
33063 a SYMBOL_REF of the function to be called; PRIORITY is a number
33064 between 0 and MAX_INIT_PRIORITY.
33065
33066 This differs from default_named_section_asm_out_constructor in
33067 that we have special handling for -mrelocatable. */
33068
33069 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33070 static void
33071 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33072 {
33073 const char *section = ".ctors";
33074 char buf[18];
33075
33076 if (priority != DEFAULT_INIT_PRIORITY)
33077 {
33078 sprintf (buf, ".ctors.%.5u",
33079 /* Invert the numbering so the linker puts us in the proper
33080 order; constructors are run from right to left, and the
33081 linker sorts in increasing order. */
33082 MAX_INIT_PRIORITY - priority);
33083 section = buf;
33084 }
33085
33086 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33087 assemble_align (POINTER_SIZE);
33088
33089 if (DEFAULT_ABI == ABI_V4
33090 && (TARGET_RELOCATABLE || flag_pic > 1))
33091 {
33092 fputs ("\t.long (", asm_out_file);
33093 output_addr_const (asm_out_file, symbol);
33094 fputs (")@fixup\n", asm_out_file);
33095 }
33096 else
33097 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33098 }
33099
33100 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33101 static void
33102 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33103 {
33104 const char *section = ".dtors";
33105 char buf[18];
33106
33107 if (priority != DEFAULT_INIT_PRIORITY)
33108 {
33109 sprintf (buf, ".dtors.%.5u",
33110 /* Invert the numbering so the linker puts us in the proper
33111 order; constructors are run from right to left, and the
33112 linker sorts in increasing order. */
33113 MAX_INIT_PRIORITY - priority);
33114 section = buf;
33115 }
33116
33117 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33118 assemble_align (POINTER_SIZE);
33119
33120 if (DEFAULT_ABI == ABI_V4
33121 && (TARGET_RELOCATABLE || flag_pic > 1))
33122 {
33123 fputs ("\t.long (", asm_out_file);
33124 output_addr_const (asm_out_file, symbol);
33125 fputs (")@fixup\n", asm_out_file);
33126 }
33127 else
33128 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33129 }
33130
33131 void
33132 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33133 {
33134 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33135 {
33136 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33137 ASM_OUTPUT_LABEL (file, name);
33138 fputs (DOUBLE_INT_ASM_OP, file);
33139 rs6000_output_function_entry (file, name);
33140 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33141 if (DOT_SYMBOLS)
33142 {
33143 fputs ("\t.size\t", file);
33144 assemble_name (file, name);
33145 fputs (",24\n\t.type\t.", file);
33146 assemble_name (file, name);
33147 fputs (",@function\n", file);
33148 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33149 {
33150 fputs ("\t.globl\t.", file);
33151 assemble_name (file, name);
33152 putc ('\n', file);
33153 }
33154 }
33155 else
33156 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33157 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33158 rs6000_output_function_entry (file, name);
33159 fputs (":\n", file);
33160 return;
33161 }
33162
33163 int uses_toc;
33164 if (DEFAULT_ABI == ABI_V4
33165 && (TARGET_RELOCATABLE || flag_pic > 1)
33166 && !TARGET_SECURE_PLT
33167 && (!constant_pool_empty_p () || crtl->profile)
33168 && (uses_toc = uses_TOC ()))
33169 {
33170 char buf[256];
33171
33172 if (uses_toc == 2)
33173 switch_to_other_text_partition ();
33174 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33175
33176 fprintf (file, "\t.long ");
33177 assemble_name (file, toc_label_name);
33178 need_toc_init = 1;
33179 putc ('-', file);
33180 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33181 assemble_name (file, buf);
33182 putc ('\n', file);
33183 if (uses_toc == 2)
33184 switch_to_other_text_partition ();
33185 }
33186
33187 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33188 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33189
33190 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33191 {
33192 char buf[256];
33193
33194 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33195
33196 fprintf (file, "\t.quad .TOC.-");
33197 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33198 assemble_name (file, buf);
33199 putc ('\n', file);
33200 }
33201
33202 if (DEFAULT_ABI == ABI_AIX)
33203 {
33204 const char *desc_name, *orig_name;
33205
33206 orig_name = (*targetm.strip_name_encoding) (name);
33207 desc_name = orig_name;
33208 while (*desc_name == '.')
33209 desc_name++;
33210
33211 if (TREE_PUBLIC (decl))
33212 fprintf (file, "\t.globl %s\n", desc_name);
33213
33214 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33215 fprintf (file, "%s:\n", desc_name);
33216 fprintf (file, "\t.long %s\n", orig_name);
33217 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33218 fputs ("\t.long 0\n", file);
33219 fprintf (file, "\t.previous\n");
33220 }
33221 ASM_OUTPUT_LABEL (file, name);
33222 }
33223
33224 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33225 static void
33226 rs6000_elf_file_end (void)
33227 {
33228 #ifdef HAVE_AS_GNU_ATTRIBUTE
33229 /* ??? The value emitted depends on options active at file end.
33230 Assume anyone using #pragma or attributes that might change
33231 options knows what they are doing. */
33232 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33233 && rs6000_passes_float)
33234 {
33235 int fp;
33236
33237 if (TARGET_HARD_FLOAT)
33238 fp = 1;
33239 else
33240 fp = 2;
33241 if (rs6000_passes_long_double)
33242 {
33243 if (!TARGET_LONG_DOUBLE_128)
33244 fp |= 2 * 4;
33245 else if (TARGET_IEEEQUAD)
33246 fp |= 3 * 4;
33247 else
33248 fp |= 1 * 4;
33249 }
33250 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33251 }
33252 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33253 {
33254 if (rs6000_passes_vector)
33255 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33256 (TARGET_ALTIVEC_ABI ? 2 : 1));
33257 if (rs6000_returns_struct)
33258 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33259 aix_struct_return ? 2 : 1);
33260 }
33261 #endif
33262 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33263 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33264 file_end_indicate_exec_stack ();
33265 #endif
33266
33267 if (flag_split_stack)
33268 file_end_indicate_split_stack ();
33269
33270 if (cpu_builtin_p)
33271 {
33272 /* We have expanded a CPU builtin, so we need to emit a reference to
33273 the special symbol that LIBC uses to declare it supports the
33274 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33275 switch_to_section (data_section);
33276 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33277 fprintf (asm_out_file, "\t%s %s\n",
33278 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33279 }
33280 }
33281 #endif
33282
33283 #if TARGET_XCOFF
33284
33285 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33286 #define HAVE_XCOFF_DWARF_EXTRAS 0
33287 #endif
33288
33289 static enum unwind_info_type
33290 rs6000_xcoff_debug_unwind_info (void)
33291 {
33292 return UI_NONE;
33293 }
33294
33295 static void
33296 rs6000_xcoff_asm_output_anchor (rtx symbol)
33297 {
33298 char buffer[100];
33299
33300 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33301 SYMBOL_REF_BLOCK_OFFSET (symbol));
33302 fprintf (asm_out_file, "%s", SET_ASM_OP);
33303 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33304 fprintf (asm_out_file, ",");
33305 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33306 fprintf (asm_out_file, "\n");
33307 }
33308
33309 static void
33310 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33311 {
33312 fputs (GLOBAL_ASM_OP, stream);
33313 RS6000_OUTPUT_BASENAME (stream, name);
33314 putc ('\n', stream);
33315 }
33316
33317 /* A get_unnamed_decl callback, used for read-only sections. PTR
33318 points to the section string variable. */
33319
33320 static void
33321 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33322 {
33323 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33324 *(const char *const *) directive,
33325 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33326 }
33327
33328 /* Likewise for read-write sections. */
33329
33330 static void
33331 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33332 {
33333 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33334 *(const char *const *) directive,
33335 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33336 }
33337
33338 static void
33339 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33340 {
33341 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33342 *(const char *const *) directive,
33343 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33344 }
33345
33346 /* A get_unnamed_section callback, used for switching to toc_section. */
33347
33348 static void
33349 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33350 {
33351 if (TARGET_MINIMAL_TOC)
33352 {
33353 /* toc_section is always selected at least once from
33354 rs6000_xcoff_file_start, so this is guaranteed to
33355 always be defined once and only once in each file. */
33356 if (!toc_initialized)
33357 {
33358 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33359 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33360 toc_initialized = 1;
33361 }
33362 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33363 (TARGET_32BIT ? "" : ",3"));
33364 }
33365 else
33366 fputs ("\t.toc\n", asm_out_file);
33367 }
33368
33369 /* Implement TARGET_ASM_INIT_SECTIONS. */
33370
33371 static void
33372 rs6000_xcoff_asm_init_sections (void)
33373 {
33374 read_only_data_section
33375 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33376 &xcoff_read_only_section_name);
33377
33378 private_data_section
33379 = get_unnamed_section (SECTION_WRITE,
33380 rs6000_xcoff_output_readwrite_section_asm_op,
33381 &xcoff_private_data_section_name);
33382
33383 tls_data_section
33384 = get_unnamed_section (SECTION_TLS,
33385 rs6000_xcoff_output_tls_section_asm_op,
33386 &xcoff_tls_data_section_name);
33387
33388 tls_private_data_section
33389 = get_unnamed_section (SECTION_TLS,
33390 rs6000_xcoff_output_tls_section_asm_op,
33391 &xcoff_private_data_section_name);
33392
33393 read_only_private_data_section
33394 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33395 &xcoff_private_data_section_name);
33396
33397 toc_section
33398 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33399
33400 readonly_data_section = read_only_data_section;
33401 }
33402
33403 static int
33404 rs6000_xcoff_reloc_rw_mask (void)
33405 {
33406 return 3;
33407 }
33408
33409 static void
33410 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33411 tree decl ATTRIBUTE_UNUSED)
33412 {
33413 int smclass;
33414 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33415
33416 if (flags & SECTION_EXCLUDE)
33417 smclass = 4;
33418 else if (flags & SECTION_DEBUG)
33419 {
33420 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33421 return;
33422 }
33423 else if (flags & SECTION_CODE)
33424 smclass = 0;
33425 else if (flags & SECTION_TLS)
33426 smclass = 3;
33427 else if (flags & SECTION_WRITE)
33428 smclass = 2;
33429 else
33430 smclass = 1;
33431
33432 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33433 (flags & SECTION_CODE) ? "." : "",
33434 name, suffix[smclass], flags & SECTION_ENTSIZE);
33435 }
33436
33437 #define IN_NAMED_SECTION(DECL) \
33438 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33439 && DECL_SECTION_NAME (DECL) != NULL)
33440
33441 static section *
33442 rs6000_xcoff_select_section (tree decl, int reloc,
33443 unsigned HOST_WIDE_INT align)
33444 {
33445 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33446 named section. */
33447 if (align > BIGGEST_ALIGNMENT)
33448 {
33449 resolve_unique_section (decl, reloc, true);
33450 if (IN_NAMED_SECTION (decl))
33451 return get_named_section (decl, NULL, reloc);
33452 }
33453
33454 if (decl_readonly_section (decl, reloc))
33455 {
33456 if (TREE_PUBLIC (decl))
33457 return read_only_data_section;
33458 else
33459 return read_only_private_data_section;
33460 }
33461 else
33462 {
33463 #if HAVE_AS_TLS
33464 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33465 {
33466 if (TREE_PUBLIC (decl))
33467 return tls_data_section;
33468 else if (bss_initializer_p (decl))
33469 {
33470 /* Convert to COMMON to emit in BSS. */
33471 DECL_COMMON (decl) = 1;
33472 return tls_comm_section;
33473 }
33474 else
33475 return tls_private_data_section;
33476 }
33477 else
33478 #endif
33479 if (TREE_PUBLIC (decl))
33480 return data_section;
33481 else
33482 return private_data_section;
33483 }
33484 }
33485
33486 static void
33487 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33488 {
33489 const char *name;
33490
33491 /* Use select_section for private data and uninitialized data with
33492 alignment <= BIGGEST_ALIGNMENT. */
33493 if (!TREE_PUBLIC (decl)
33494 || DECL_COMMON (decl)
33495 || (DECL_INITIAL (decl) == NULL_TREE
33496 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33497 || DECL_INITIAL (decl) == error_mark_node
33498 || (flag_zero_initialized_in_bss
33499 && initializer_zerop (DECL_INITIAL (decl))))
33500 return;
33501
33502 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33503 name = (*targetm.strip_name_encoding) (name);
33504 set_decl_section_name (decl, name);
33505 }
33506
33507 /* Select section for constant in constant pool.
33508
33509 On RS/6000, all constants are in the private read-only data area.
33510 However, if this is being placed in the TOC it must be output as a
33511 toc entry. */
33512
33513 static section *
33514 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33515 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33516 {
33517 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33518 return toc_section;
33519 else
33520 return read_only_private_data_section;
33521 }
33522
33523 /* Remove any trailing [DS] or the like from the symbol name. */
33524
33525 static const char *
33526 rs6000_xcoff_strip_name_encoding (const char *name)
33527 {
33528 size_t len;
33529 if (*name == '*')
33530 name++;
33531 len = strlen (name);
33532 if (name[len - 1] == ']')
33533 return ggc_alloc_string (name, len - 4);
33534 else
33535 return name;
33536 }
33537
33538 /* Section attributes. AIX is always PIC. */
33539
33540 static unsigned int
33541 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33542 {
33543 unsigned int align;
33544 unsigned int flags = default_section_type_flags (decl, name, reloc);
33545
33546 /* Align to at least UNIT size. */
33547 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33548 align = MIN_UNITS_PER_WORD;
33549 else
33550 /* Increase alignment of large objects if not already stricter. */
33551 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33552 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33553 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33554
33555 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33556 }
33557
33558 /* Output at beginning of assembler file.
33559
33560 Initialize the section names for the RS/6000 at this point.
33561
33562 Specify filename, including full path, to assembler.
33563
33564 We want to go into the TOC section so at least one .toc will be emitted.
33565 Also, in order to output proper .bs/.es pairs, we need at least one static
33566 [RW] section emitted.
33567
33568 Finally, declare mcount when profiling to make the assembler happy. */
33569
33570 static void
33571 rs6000_xcoff_file_start (void)
33572 {
33573 rs6000_gen_section_name (&xcoff_bss_section_name,
33574 main_input_filename, ".bss_");
33575 rs6000_gen_section_name (&xcoff_private_data_section_name,
33576 main_input_filename, ".rw_");
33577 rs6000_gen_section_name (&xcoff_read_only_section_name,
33578 main_input_filename, ".ro_");
33579 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33580 main_input_filename, ".tls_");
33581 rs6000_gen_section_name (&xcoff_tbss_section_name,
33582 main_input_filename, ".tbss_[UL]");
33583
33584 fputs ("\t.file\t", asm_out_file);
33585 output_quoted_string (asm_out_file, main_input_filename);
33586 fputc ('\n', asm_out_file);
33587 if (write_symbols != NO_DEBUG)
33588 switch_to_section (private_data_section);
33589 switch_to_section (toc_section);
33590 switch_to_section (text_section);
33591 if (profile_flag)
33592 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33593 rs6000_file_start ();
33594 }
33595
33596 /* Output at end of assembler file.
33597 On the RS/6000, referencing data should automatically pull in text. */
33598
33599 static void
33600 rs6000_xcoff_file_end (void)
33601 {
33602 switch_to_section (text_section);
33603 fputs ("_section_.text:\n", asm_out_file);
33604 switch_to_section (data_section);
33605 fputs (TARGET_32BIT
33606 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33607 asm_out_file);
33608 }
33609
33610 struct declare_alias_data
33611 {
33612 FILE *file;
33613 bool function_descriptor;
33614 };
33615
33616 /* Declare alias N. A helper function for for_node_and_aliases. */
33617
33618 static bool
33619 rs6000_declare_alias (struct symtab_node *n, void *d)
33620 {
33621 struct declare_alias_data *data = (struct declare_alias_data *)d;
33622 /* Main symbol is output specially, because varasm machinery does part of
33623 the job for us - we do not need to declare .globl/lglobs and such. */
33624 if (!n->alias || n->weakref)
33625 return false;
33626
33627 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33628 return false;
33629
33630 /* Prevent assemble_alias from trying to use .set pseudo operation
33631 that does not behave as expected by the middle-end. */
33632 TREE_ASM_WRITTEN (n->decl) = true;
33633
33634 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33635 char *buffer = (char *) alloca (strlen (name) + 2);
33636 char *p;
33637 int dollar_inside = 0;
33638
33639 strcpy (buffer, name);
33640 p = strchr (buffer, '$');
33641 while (p) {
33642 *p = '_';
33643 dollar_inside++;
33644 p = strchr (p + 1, '$');
33645 }
33646 if (TREE_PUBLIC (n->decl))
33647 {
33648 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33649 {
33650 if (dollar_inside) {
33651 if (data->function_descriptor)
33652 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33653 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33654 }
33655 if (data->function_descriptor)
33656 {
33657 fputs ("\t.globl .", data->file);
33658 RS6000_OUTPUT_BASENAME (data->file, buffer);
33659 putc ('\n', data->file);
33660 }
33661 fputs ("\t.globl ", data->file);
33662 RS6000_OUTPUT_BASENAME (data->file, buffer);
33663 putc ('\n', data->file);
33664 }
33665 #ifdef ASM_WEAKEN_DECL
33666 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33667 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33668 #endif
33669 }
33670 else
33671 {
33672 if (dollar_inside)
33673 {
33674 if (data->function_descriptor)
33675 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33676 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33677 }
33678 if (data->function_descriptor)
33679 {
33680 fputs ("\t.lglobl .", data->file);
33681 RS6000_OUTPUT_BASENAME (data->file, buffer);
33682 putc ('\n', data->file);
33683 }
33684 fputs ("\t.lglobl ", data->file);
33685 RS6000_OUTPUT_BASENAME (data->file, buffer);
33686 putc ('\n', data->file);
33687 }
33688 if (data->function_descriptor)
33689 fputs (".", data->file);
33690 RS6000_OUTPUT_BASENAME (data->file, buffer);
33691 fputs (":\n", data->file);
33692 return false;
33693 }
33694
33695
33696 #ifdef HAVE_GAS_HIDDEN
33697 /* Helper function to calculate visibility of a DECL
33698 and return the value as a const string. */
33699
33700 static const char *
33701 rs6000_xcoff_visibility (tree decl)
33702 {
33703 static const char * const visibility_types[] = {
33704 "", ",protected", ",hidden", ",internal"
33705 };
33706
33707 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33708 return visibility_types[vis];
33709 }
33710 #endif
33711
33712
33713 /* This macro produces the initial definition of a function name.
33714 On the RS/6000, we need to place an extra '.' in the function name and
33715 output the function descriptor.
33716 Dollar signs are converted to underscores.
33717
33718 The csect for the function will have already been created when
33719 text_section was selected. We do have to go back to that csect, however.
33720
33721 The third and fourth parameters to the .function pseudo-op (16 and 044)
33722 are placeholders which no longer have any use.
33723
33724 Because AIX assembler's .set command has unexpected semantics, we output
33725 all aliases as alternative labels in front of the definition. */
33726
33727 void
33728 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33729 {
33730 char *buffer = (char *) alloca (strlen (name) + 1);
33731 char *p;
33732 int dollar_inside = 0;
33733 struct declare_alias_data data = {file, false};
33734
33735 strcpy (buffer, name);
33736 p = strchr (buffer, '$');
33737 while (p) {
33738 *p = '_';
33739 dollar_inside++;
33740 p = strchr (p + 1, '$');
33741 }
33742 if (TREE_PUBLIC (decl))
33743 {
33744 if (!RS6000_WEAK || !DECL_WEAK (decl))
33745 {
33746 if (dollar_inside) {
33747 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33748 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33749 }
33750 fputs ("\t.globl .", file);
33751 RS6000_OUTPUT_BASENAME (file, buffer);
33752 #ifdef HAVE_GAS_HIDDEN
33753 fputs (rs6000_xcoff_visibility (decl), file);
33754 #endif
33755 putc ('\n', file);
33756 }
33757 }
33758 else
33759 {
33760 if (dollar_inside) {
33761 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33762 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33763 }
33764 fputs ("\t.lglobl .", file);
33765 RS6000_OUTPUT_BASENAME (file, buffer);
33766 putc ('\n', file);
33767 }
33768 fputs ("\t.csect ", file);
33769 RS6000_OUTPUT_BASENAME (file, buffer);
33770 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33771 RS6000_OUTPUT_BASENAME (file, buffer);
33772 fputs (":\n", file);
33773 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33774 &data, true);
33775 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33776 RS6000_OUTPUT_BASENAME (file, buffer);
33777 fputs (", TOC[tc0], 0\n", file);
33778 in_section = NULL;
33779 switch_to_section (function_section (decl));
33780 putc ('.', file);
33781 RS6000_OUTPUT_BASENAME (file, buffer);
33782 fputs (":\n", file);
33783 data.function_descriptor = true;
33784 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33785 &data, true);
33786 if (!DECL_IGNORED_P (decl))
33787 {
33788 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33789 xcoffout_declare_function (file, decl, buffer);
33790 else if (write_symbols == DWARF2_DEBUG)
33791 {
33792 name = (*targetm.strip_name_encoding) (name);
33793 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33794 }
33795 }
33796 return;
33797 }
33798
33799
33800 /* Output assembly language to globalize a symbol from a DECL,
33801 possibly with visibility. */
33802
33803 void
33804 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33805 {
33806 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33807 fputs (GLOBAL_ASM_OP, stream);
33808 RS6000_OUTPUT_BASENAME (stream, name);
33809 #ifdef HAVE_GAS_HIDDEN
33810 fputs (rs6000_xcoff_visibility (decl), stream);
33811 #endif
33812 putc ('\n', stream);
33813 }
33814
33815 /* Output assembly language to define a symbol as COMMON from a DECL,
33816 possibly with visibility. */
33817
33818 void
33819 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33820 tree decl ATTRIBUTE_UNUSED,
33821 const char *name,
33822 unsigned HOST_WIDE_INT size,
33823 unsigned HOST_WIDE_INT align)
33824 {
33825 unsigned HOST_WIDE_INT align2 = 2;
33826
33827 if (align > 32)
33828 align2 = floor_log2 (align / BITS_PER_UNIT);
33829 else if (size > 4)
33830 align2 = 3;
33831
33832 fputs (COMMON_ASM_OP, stream);
33833 RS6000_OUTPUT_BASENAME (stream, name);
33834
33835 fprintf (stream,
33836 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
33837 size, align2);
33838
33839 #ifdef HAVE_GAS_HIDDEN
33840 if (decl != NULL)
33841 fputs (rs6000_xcoff_visibility (decl), stream);
33842 #endif
33843 putc ('\n', stream);
33844 }
33845
33846 /* This macro produces the initial definition of a object (variable) name.
33847 Because AIX assembler's .set command has unexpected semantics, we output
33848 all aliases as alternative labels in front of the definition. */
33849
33850 void
33851 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
33852 {
33853 struct declare_alias_data data = {file, false};
33854 RS6000_OUTPUT_BASENAME (file, name);
33855 fputs (":\n", file);
33856 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33857 &data, true);
33858 }
33859
33860 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33861
33862 void
33863 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
33864 {
33865 fputs (integer_asm_op (size, FALSE), file);
33866 assemble_name (file, label);
33867 fputs ("-$", file);
33868 }
33869
33870 /* Output a symbol offset relative to the dbase for the current object.
33871 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33872 signed offsets.
33873
33874 __gcc_unwind_dbase is embedded in all executables/libraries through
33875 libgcc/config/rs6000/crtdbase.S. */
33876
33877 void
33878 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
33879 {
33880 fputs (integer_asm_op (size, FALSE), file);
33881 assemble_name (file, label);
33882 fputs("-__gcc_unwind_dbase", file);
33883 }
33884
33885 #ifdef HAVE_AS_TLS
33886 static void
33887 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
33888 {
33889 rtx symbol;
33890 int flags;
33891 const char *symname;
33892
33893 default_encode_section_info (decl, rtl, first);
33894
33895 /* Careful not to prod global register variables. */
33896 if (!MEM_P (rtl))
33897 return;
33898 symbol = XEXP (rtl, 0);
33899 if (GET_CODE (symbol) != SYMBOL_REF)
33900 return;
33901
33902 flags = SYMBOL_REF_FLAGS (symbol);
33903
33904 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33905 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
33906
33907 SYMBOL_REF_FLAGS (symbol) = flags;
33908
33909 /* Append mapping class to extern decls. */
33910 symname = XSTR (symbol, 0);
33911 if (decl /* sync condition with assemble_external () */
33912 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
33913 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
33914 || TREE_CODE (decl) == FUNCTION_DECL)
33915 && symname[strlen (symname) - 1] != ']')
33916 {
33917 char *newname = (char *) alloca (strlen (symname) + 5);
33918 strcpy (newname, symname);
33919 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
33920 ? "[DS]" : "[UA]"));
33921 XSTR (symbol, 0) = ggc_strdup (newname);
33922 }
33923 }
33924 #endif /* HAVE_AS_TLS */
33925 #endif /* TARGET_XCOFF */
33926
33927 void
33928 rs6000_asm_weaken_decl (FILE *stream, tree decl,
33929 const char *name, const char *val)
33930 {
33931 fputs ("\t.weak\t", stream);
33932 RS6000_OUTPUT_BASENAME (stream, name);
33933 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33934 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33935 {
33936 if (TARGET_XCOFF)
33937 fputs ("[DS]", stream);
33938 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33939 if (TARGET_XCOFF)
33940 fputs (rs6000_xcoff_visibility (decl), stream);
33941 #endif
33942 fputs ("\n\t.weak\t.", stream);
33943 RS6000_OUTPUT_BASENAME (stream, name);
33944 }
33945 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33946 if (TARGET_XCOFF)
33947 fputs (rs6000_xcoff_visibility (decl), stream);
33948 #endif
33949 fputc ('\n', stream);
33950 if (val)
33951 {
33952 #ifdef ASM_OUTPUT_DEF
33953 ASM_OUTPUT_DEF (stream, name, val);
33954 #endif
33955 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33956 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33957 {
33958 fputs ("\t.set\t.", stream);
33959 RS6000_OUTPUT_BASENAME (stream, name);
33960 fputs (",.", stream);
33961 RS6000_OUTPUT_BASENAME (stream, val);
33962 fputc ('\n', stream);
33963 }
33964 }
33965 }
33966
33967
33968 /* Return true if INSN should not be copied. */
33969
33970 static bool
33971 rs6000_cannot_copy_insn_p (rtx_insn *insn)
33972 {
33973 return recog_memoized (insn) >= 0
33974 && get_attr_cannot_copy (insn);
33975 }
33976
33977 /* Compute a (partial) cost for rtx X. Return true if the complete
33978 cost has been computed, and false if subexpressions should be
33979 scanned. In either case, *TOTAL contains the cost result. */
33980
33981 static bool
33982 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
33983 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
33984 {
33985 int code = GET_CODE (x);
33986
33987 switch (code)
33988 {
33989 /* On the RS/6000, if it is valid in the insn, it is free. */
33990 case CONST_INT:
33991 if (((outer_code == SET
33992 || outer_code == PLUS
33993 || outer_code == MINUS)
33994 && (satisfies_constraint_I (x)
33995 || satisfies_constraint_L (x)))
33996 || (outer_code == AND
33997 && (satisfies_constraint_K (x)
33998 || (mode == SImode
33999 ? satisfies_constraint_L (x)
34000 : satisfies_constraint_J (x))))
34001 || ((outer_code == IOR || outer_code == XOR)
34002 && (satisfies_constraint_K (x)
34003 || (mode == SImode
34004 ? satisfies_constraint_L (x)
34005 : satisfies_constraint_J (x))))
34006 || outer_code == ASHIFT
34007 || outer_code == ASHIFTRT
34008 || outer_code == LSHIFTRT
34009 || outer_code == ROTATE
34010 || outer_code == ROTATERT
34011 || outer_code == ZERO_EXTRACT
34012 || (outer_code == MULT
34013 && satisfies_constraint_I (x))
34014 || ((outer_code == DIV || outer_code == UDIV
34015 || outer_code == MOD || outer_code == UMOD)
34016 && exact_log2 (INTVAL (x)) >= 0)
34017 || (outer_code == COMPARE
34018 && (satisfies_constraint_I (x)
34019 || satisfies_constraint_K (x)))
34020 || ((outer_code == EQ || outer_code == NE)
34021 && (satisfies_constraint_I (x)
34022 || satisfies_constraint_K (x)
34023 || (mode == SImode
34024 ? satisfies_constraint_L (x)
34025 : satisfies_constraint_J (x))))
34026 || (outer_code == GTU
34027 && satisfies_constraint_I (x))
34028 || (outer_code == LTU
34029 && satisfies_constraint_P (x)))
34030 {
34031 *total = 0;
34032 return true;
34033 }
34034 else if ((outer_code == PLUS
34035 && reg_or_add_cint_operand (x, VOIDmode))
34036 || (outer_code == MINUS
34037 && reg_or_sub_cint_operand (x, VOIDmode))
34038 || ((outer_code == SET
34039 || outer_code == IOR
34040 || outer_code == XOR)
34041 && (INTVAL (x)
34042 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34043 {
34044 *total = COSTS_N_INSNS (1);
34045 return true;
34046 }
34047 /* FALLTHRU */
34048
34049 case CONST_DOUBLE:
34050 case CONST_WIDE_INT:
34051 case CONST:
34052 case HIGH:
34053 case SYMBOL_REF:
34054 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34055 return true;
34056
34057 case MEM:
34058 /* When optimizing for size, MEM should be slightly more expensive
34059 than generating address, e.g., (plus (reg) (const)).
34060 L1 cache latency is about two instructions. */
34061 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34062 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34063 *total += COSTS_N_INSNS (100);
34064 return true;
34065
34066 case LABEL_REF:
34067 *total = 0;
34068 return true;
34069
34070 case PLUS:
34071 case MINUS:
34072 if (FLOAT_MODE_P (mode))
34073 *total = rs6000_cost->fp;
34074 else
34075 *total = COSTS_N_INSNS (1);
34076 return false;
34077
34078 case MULT:
34079 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34080 && satisfies_constraint_I (XEXP (x, 1)))
34081 {
34082 if (INTVAL (XEXP (x, 1)) >= -256
34083 && INTVAL (XEXP (x, 1)) <= 255)
34084 *total = rs6000_cost->mulsi_const9;
34085 else
34086 *total = rs6000_cost->mulsi_const;
34087 }
34088 else if (mode == SFmode)
34089 *total = rs6000_cost->fp;
34090 else if (FLOAT_MODE_P (mode))
34091 *total = rs6000_cost->dmul;
34092 else if (mode == DImode)
34093 *total = rs6000_cost->muldi;
34094 else
34095 *total = rs6000_cost->mulsi;
34096 return false;
34097
34098 case FMA:
34099 if (mode == SFmode)
34100 *total = rs6000_cost->fp;
34101 else
34102 *total = rs6000_cost->dmul;
34103 break;
34104
34105 case DIV:
34106 case MOD:
34107 if (FLOAT_MODE_P (mode))
34108 {
34109 *total = mode == DFmode ? rs6000_cost->ddiv
34110 : rs6000_cost->sdiv;
34111 return false;
34112 }
34113 /* FALLTHRU */
34114
34115 case UDIV:
34116 case UMOD:
34117 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34118 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34119 {
34120 if (code == DIV || code == MOD)
34121 /* Shift, addze */
34122 *total = COSTS_N_INSNS (2);
34123 else
34124 /* Shift */
34125 *total = COSTS_N_INSNS (1);
34126 }
34127 else
34128 {
34129 if (GET_MODE (XEXP (x, 1)) == DImode)
34130 *total = rs6000_cost->divdi;
34131 else
34132 *total = rs6000_cost->divsi;
34133 }
34134 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34135 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34136 *total += COSTS_N_INSNS (2);
34137 return false;
34138
34139 case CTZ:
34140 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34141 return false;
34142
34143 case FFS:
34144 *total = COSTS_N_INSNS (4);
34145 return false;
34146
34147 case POPCOUNT:
34148 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34149 return false;
34150
34151 case PARITY:
34152 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34153 return false;
34154
34155 case NOT:
34156 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34157 *total = 0;
34158 else
34159 *total = COSTS_N_INSNS (1);
34160 return false;
34161
34162 case AND:
34163 if (CONST_INT_P (XEXP (x, 1)))
34164 {
34165 rtx left = XEXP (x, 0);
34166 rtx_code left_code = GET_CODE (left);
34167
34168 /* rotate-and-mask: 1 insn. */
34169 if ((left_code == ROTATE
34170 || left_code == ASHIFT
34171 || left_code == LSHIFTRT)
34172 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34173 {
34174 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34175 if (!CONST_INT_P (XEXP (left, 1)))
34176 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34177 *total += COSTS_N_INSNS (1);
34178 return true;
34179 }
34180
34181 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34182 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34183 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34184 || (val & 0xffff) == val
34185 || (val & 0xffff0000) == val
34186 || ((val & 0xffff) == 0 && mode == SImode))
34187 {
34188 *total = rtx_cost (left, mode, AND, 0, speed);
34189 *total += COSTS_N_INSNS (1);
34190 return true;
34191 }
34192
34193 /* 2 insns. */
34194 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34195 {
34196 *total = rtx_cost (left, mode, AND, 0, speed);
34197 *total += COSTS_N_INSNS (2);
34198 return true;
34199 }
34200 }
34201
34202 *total = COSTS_N_INSNS (1);
34203 return false;
34204
34205 case IOR:
34206 /* FIXME */
34207 *total = COSTS_N_INSNS (1);
34208 return true;
34209
34210 case CLZ:
34211 case XOR:
34212 case ZERO_EXTRACT:
34213 *total = COSTS_N_INSNS (1);
34214 return false;
34215
34216 case ASHIFT:
34217 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34218 the sign extend and shift separately within the insn. */
34219 if (TARGET_EXTSWSLI && mode == DImode
34220 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34221 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34222 {
34223 *total = 0;
34224 return false;
34225 }
34226 /* fall through */
34227
34228 case ASHIFTRT:
34229 case LSHIFTRT:
34230 case ROTATE:
34231 case ROTATERT:
34232 /* Handle mul_highpart. */
34233 if (outer_code == TRUNCATE
34234 && GET_CODE (XEXP (x, 0)) == MULT)
34235 {
34236 if (mode == DImode)
34237 *total = rs6000_cost->muldi;
34238 else
34239 *total = rs6000_cost->mulsi;
34240 return true;
34241 }
34242 else if (outer_code == AND)
34243 *total = 0;
34244 else
34245 *total = COSTS_N_INSNS (1);
34246 return false;
34247
34248 case SIGN_EXTEND:
34249 case ZERO_EXTEND:
34250 if (GET_CODE (XEXP (x, 0)) == MEM)
34251 *total = 0;
34252 else
34253 *total = COSTS_N_INSNS (1);
34254 return false;
34255
34256 case COMPARE:
34257 case NEG:
34258 case ABS:
34259 if (!FLOAT_MODE_P (mode))
34260 {
34261 *total = COSTS_N_INSNS (1);
34262 return false;
34263 }
34264 /* FALLTHRU */
34265
34266 case FLOAT:
34267 case UNSIGNED_FLOAT:
34268 case FIX:
34269 case UNSIGNED_FIX:
34270 case FLOAT_TRUNCATE:
34271 *total = rs6000_cost->fp;
34272 return false;
34273
34274 case FLOAT_EXTEND:
34275 if (mode == DFmode)
34276 *total = rs6000_cost->sfdf_convert;
34277 else
34278 *total = rs6000_cost->fp;
34279 return false;
34280
34281 case UNSPEC:
34282 switch (XINT (x, 1))
34283 {
34284 case UNSPEC_FRSP:
34285 *total = rs6000_cost->fp;
34286 return true;
34287
34288 default:
34289 break;
34290 }
34291 break;
34292
34293 case CALL:
34294 case IF_THEN_ELSE:
34295 if (!speed)
34296 {
34297 *total = COSTS_N_INSNS (1);
34298 return true;
34299 }
34300 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34301 {
34302 *total = rs6000_cost->fp;
34303 return false;
34304 }
34305 break;
34306
34307 case NE:
34308 case EQ:
34309 case GTU:
34310 case LTU:
34311 /* Carry bit requires mode == Pmode.
34312 NEG or PLUS already counted so only add one. */
34313 if (mode == Pmode
34314 && (outer_code == NEG || outer_code == PLUS))
34315 {
34316 *total = COSTS_N_INSNS (1);
34317 return true;
34318 }
34319 /* FALLTHRU */
34320
34321 case GT:
34322 case LT:
34323 case UNORDERED:
34324 if (outer_code == SET)
34325 {
34326 if (XEXP (x, 1) == const0_rtx)
34327 {
34328 *total = COSTS_N_INSNS (2);
34329 return true;
34330 }
34331 else
34332 {
34333 *total = COSTS_N_INSNS (3);
34334 return false;
34335 }
34336 }
34337 /* CC COMPARE. */
34338 if (outer_code == COMPARE)
34339 {
34340 *total = 0;
34341 return true;
34342 }
34343 break;
34344
34345 default:
34346 break;
34347 }
34348
34349 return false;
34350 }
34351
34352 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34353
34354 static bool
34355 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34356 int opno, int *total, bool speed)
34357 {
34358 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34359
34360 fprintf (stderr,
34361 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34362 "opno = %d, total = %d, speed = %s, x:\n",
34363 ret ? "complete" : "scan inner",
34364 GET_MODE_NAME (mode),
34365 GET_RTX_NAME (outer_code),
34366 opno,
34367 *total,
34368 speed ? "true" : "false");
34369
34370 debug_rtx (x);
34371
34372 return ret;
34373 }
34374
34375 static int
34376 rs6000_insn_cost (rtx_insn *insn, bool speed)
34377 {
34378 if (recog_memoized (insn) < 0)
34379 return 0;
34380
34381 if (!speed)
34382 return get_attr_length (insn);
34383
34384 int cost = get_attr_cost (insn);
34385 if (cost > 0)
34386 return cost;
34387
34388 int n = get_attr_length (insn) / 4;
34389 enum attr_type type = get_attr_type (insn);
34390
34391 switch (type)
34392 {
34393 case TYPE_LOAD:
34394 case TYPE_FPLOAD:
34395 case TYPE_VECLOAD:
34396 cost = COSTS_N_INSNS (n + 1);
34397 break;
34398
34399 case TYPE_MUL:
34400 switch (get_attr_size (insn))
34401 {
34402 case SIZE_8:
34403 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34404 break;
34405 case SIZE_16:
34406 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34407 break;
34408 case SIZE_32:
34409 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34410 break;
34411 case SIZE_64:
34412 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34413 break;
34414 default:
34415 gcc_unreachable ();
34416 }
34417 break;
34418 case TYPE_DIV:
34419 switch (get_attr_size (insn))
34420 {
34421 case SIZE_32:
34422 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34423 break;
34424 case SIZE_64:
34425 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34426 break;
34427 default:
34428 gcc_unreachable ();
34429 }
34430 break;
34431
34432 case TYPE_FP:
34433 cost = n * rs6000_cost->fp;
34434 break;
34435 case TYPE_DMUL:
34436 cost = n * rs6000_cost->dmul;
34437 break;
34438 case TYPE_SDIV:
34439 cost = n * rs6000_cost->sdiv;
34440 break;
34441 case TYPE_DDIV:
34442 cost = n * rs6000_cost->ddiv;
34443 break;
34444
34445 case TYPE_SYNC:
34446 case TYPE_LOAD_L:
34447 case TYPE_MFCR:
34448 case TYPE_MFCRF:
34449 cost = COSTS_N_INSNS (n + 2);
34450 break;
34451
34452 default:
34453 cost = COSTS_N_INSNS (n);
34454 }
34455
34456 return cost;
34457 }
34458
34459 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34460
34461 static int
34462 rs6000_debug_address_cost (rtx x, machine_mode mode,
34463 addr_space_t as, bool speed)
34464 {
34465 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34466
34467 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34468 ret, speed ? "true" : "false");
34469 debug_rtx (x);
34470
34471 return ret;
34472 }
34473
34474
34475 /* A C expression returning the cost of moving data from a register of class
34476 CLASS1 to one of CLASS2. */
34477
34478 static int
34479 rs6000_register_move_cost (machine_mode mode,
34480 reg_class_t from, reg_class_t to)
34481 {
34482 int ret;
34483
34484 if (TARGET_DEBUG_COST)
34485 dbg_cost_ctrl++;
34486
34487 /* Moves from/to GENERAL_REGS. */
34488 if (reg_classes_intersect_p (to, GENERAL_REGS)
34489 || reg_classes_intersect_p (from, GENERAL_REGS))
34490 {
34491 reg_class_t rclass = from;
34492
34493 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34494 rclass = to;
34495
34496 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34497 ret = (rs6000_memory_move_cost (mode, rclass, false)
34498 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34499
34500 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34501 shift. */
34502 else if (rclass == CR_REGS)
34503 ret = 4;
34504
34505 /* For those processors that have slow LR/CTR moves, make them more
34506 expensive than memory in order to bias spills to memory .*/
34507 else if ((rs6000_tune == PROCESSOR_POWER6
34508 || rs6000_tune == PROCESSOR_POWER7
34509 || rs6000_tune == PROCESSOR_POWER8
34510 || rs6000_tune == PROCESSOR_POWER9)
34511 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34512 ret = 6 * hard_regno_nregs (0, mode);
34513
34514 else
34515 /* A move will cost one instruction per GPR moved. */
34516 ret = 2 * hard_regno_nregs (0, mode);
34517 }
34518
34519 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34520 else if (VECTOR_MEM_VSX_P (mode)
34521 && reg_classes_intersect_p (to, VSX_REGS)
34522 && reg_classes_intersect_p (from, VSX_REGS))
34523 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34524
34525 /* Moving between two similar registers is just one instruction. */
34526 else if (reg_classes_intersect_p (to, from))
34527 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34528
34529 /* Everything else has to go through GENERAL_REGS. */
34530 else
34531 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34532 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34533
34534 if (TARGET_DEBUG_COST)
34535 {
34536 if (dbg_cost_ctrl == 1)
34537 fprintf (stderr,
34538 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34539 ret, GET_MODE_NAME (mode), reg_class_names[from],
34540 reg_class_names[to]);
34541 dbg_cost_ctrl--;
34542 }
34543
34544 return ret;
34545 }
34546
34547 /* A C expressions returning the cost of moving data of MODE from a register to
34548 or from memory. */
34549
34550 static int
34551 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34552 bool in ATTRIBUTE_UNUSED)
34553 {
34554 int ret;
34555
34556 if (TARGET_DEBUG_COST)
34557 dbg_cost_ctrl++;
34558
34559 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34560 ret = 4 * hard_regno_nregs (0, mode);
34561 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34562 || reg_classes_intersect_p (rclass, VSX_REGS)))
34563 ret = 4 * hard_regno_nregs (32, mode);
34564 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34565 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34566 else
34567 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34568
34569 if (TARGET_DEBUG_COST)
34570 {
34571 if (dbg_cost_ctrl == 1)
34572 fprintf (stderr,
34573 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34574 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34575 dbg_cost_ctrl--;
34576 }
34577
34578 return ret;
34579 }
34580
34581 /* Returns a code for a target-specific builtin that implements
34582 reciprocal of the function, or NULL_TREE if not available. */
34583
34584 static tree
34585 rs6000_builtin_reciprocal (tree fndecl)
34586 {
34587 switch (DECL_FUNCTION_CODE (fndecl))
34588 {
34589 case VSX_BUILTIN_XVSQRTDP:
34590 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34591 return NULL_TREE;
34592
34593 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34594
34595 case VSX_BUILTIN_XVSQRTSP:
34596 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34597 return NULL_TREE;
34598
34599 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34600
34601 default:
34602 return NULL_TREE;
34603 }
34604 }
34605
34606 /* Load up a constant. If the mode is a vector mode, splat the value across
34607 all of the vector elements. */
34608
34609 static rtx
34610 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34611 {
34612 rtx reg;
34613
34614 if (mode == SFmode || mode == DFmode)
34615 {
34616 rtx d = const_double_from_real_value (dconst, mode);
34617 reg = force_reg (mode, d);
34618 }
34619 else if (mode == V4SFmode)
34620 {
34621 rtx d = const_double_from_real_value (dconst, SFmode);
34622 rtvec v = gen_rtvec (4, d, d, d, d);
34623 reg = gen_reg_rtx (mode);
34624 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34625 }
34626 else if (mode == V2DFmode)
34627 {
34628 rtx d = const_double_from_real_value (dconst, DFmode);
34629 rtvec v = gen_rtvec (2, d, d);
34630 reg = gen_reg_rtx (mode);
34631 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34632 }
34633 else
34634 gcc_unreachable ();
34635
34636 return reg;
34637 }
34638
34639 /* Generate an FMA instruction. */
34640
34641 static void
34642 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34643 {
34644 machine_mode mode = GET_MODE (target);
34645 rtx dst;
34646
34647 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34648 gcc_assert (dst != NULL);
34649
34650 if (dst != target)
34651 emit_move_insn (target, dst);
34652 }
34653
34654 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34655
34656 static void
34657 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34658 {
34659 machine_mode mode = GET_MODE (dst);
34660 rtx r;
34661
34662 /* This is a tad more complicated, since the fnma_optab is for
34663 a different expression: fma(-m1, m2, a), which is the same
34664 thing except in the case of signed zeros.
34665
34666 Fortunately we know that if FMA is supported that FNMSUB is
34667 also supported in the ISA. Just expand it directly. */
34668
34669 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34670
34671 r = gen_rtx_NEG (mode, a);
34672 r = gen_rtx_FMA (mode, m1, m2, r);
34673 r = gen_rtx_NEG (mode, r);
34674 emit_insn (gen_rtx_SET (dst, r));
34675 }
34676
34677 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34678 add a reg_note saying that this was a division. Support both scalar and
34679 vector divide. Assumes no trapping math and finite arguments. */
34680
34681 void
34682 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34683 {
34684 machine_mode mode = GET_MODE (dst);
34685 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34686 int i;
34687
34688 /* Low precision estimates guarantee 5 bits of accuracy. High
34689 precision estimates guarantee 14 bits of accuracy. SFmode
34690 requires 23 bits of accuracy. DFmode requires 52 bits of
34691 accuracy. Each pass at least doubles the accuracy, leading
34692 to the following. */
34693 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34694 if (mode == DFmode || mode == V2DFmode)
34695 passes++;
34696
34697 enum insn_code code = optab_handler (smul_optab, mode);
34698 insn_gen_fn gen_mul = GEN_FCN (code);
34699
34700 gcc_assert (code != CODE_FOR_nothing);
34701
34702 one = rs6000_load_constant_and_splat (mode, dconst1);
34703
34704 /* x0 = 1./d estimate */
34705 x0 = gen_reg_rtx (mode);
34706 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34707 UNSPEC_FRES)));
34708
34709 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34710 if (passes > 1) {
34711
34712 /* e0 = 1. - d * x0 */
34713 e0 = gen_reg_rtx (mode);
34714 rs6000_emit_nmsub (e0, d, x0, one);
34715
34716 /* x1 = x0 + e0 * x0 */
34717 x1 = gen_reg_rtx (mode);
34718 rs6000_emit_madd (x1, e0, x0, x0);
34719
34720 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34721 ++i, xprev = xnext, eprev = enext) {
34722
34723 /* enext = eprev * eprev */
34724 enext = gen_reg_rtx (mode);
34725 emit_insn (gen_mul (enext, eprev, eprev));
34726
34727 /* xnext = xprev + enext * xprev */
34728 xnext = gen_reg_rtx (mode);
34729 rs6000_emit_madd (xnext, enext, xprev, xprev);
34730 }
34731
34732 } else
34733 xprev = x0;
34734
34735 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34736
34737 /* u = n * xprev */
34738 u = gen_reg_rtx (mode);
34739 emit_insn (gen_mul (u, n, xprev));
34740
34741 /* v = n - (d * u) */
34742 v = gen_reg_rtx (mode);
34743 rs6000_emit_nmsub (v, d, u, n);
34744
34745 /* dst = (v * xprev) + u */
34746 rs6000_emit_madd (dst, v, xprev, u);
34747
34748 if (note_p)
34749 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34750 }
34751
34752 /* Goldschmidt's Algorithm for single/double-precision floating point
34753 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34754
34755 void
34756 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34757 {
34758 machine_mode mode = GET_MODE (src);
34759 rtx e = gen_reg_rtx (mode);
34760 rtx g = gen_reg_rtx (mode);
34761 rtx h = gen_reg_rtx (mode);
34762
34763 /* Low precision estimates guarantee 5 bits of accuracy. High
34764 precision estimates guarantee 14 bits of accuracy. SFmode
34765 requires 23 bits of accuracy. DFmode requires 52 bits of
34766 accuracy. Each pass at least doubles the accuracy, leading
34767 to the following. */
34768 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34769 if (mode == DFmode || mode == V2DFmode)
34770 passes++;
34771
34772 int i;
34773 rtx mhalf;
34774 enum insn_code code = optab_handler (smul_optab, mode);
34775 insn_gen_fn gen_mul = GEN_FCN (code);
34776
34777 gcc_assert (code != CODE_FOR_nothing);
34778
34779 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34780
34781 /* e = rsqrt estimate */
34782 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34783 UNSPEC_RSQRT)));
34784
34785 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34786 if (!recip)
34787 {
34788 rtx zero = force_reg (mode, CONST0_RTX (mode));
34789
34790 if (mode == SFmode)
34791 {
34792 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34793 e, zero, mode, 0);
34794 if (target != e)
34795 emit_move_insn (e, target);
34796 }
34797 else
34798 {
34799 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34800 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34801 }
34802 }
34803
34804 /* g = sqrt estimate. */
34805 emit_insn (gen_mul (g, e, src));
34806 /* h = 1/(2*sqrt) estimate. */
34807 emit_insn (gen_mul (h, e, mhalf));
34808
34809 if (recip)
34810 {
34811 if (passes == 1)
34812 {
34813 rtx t = gen_reg_rtx (mode);
34814 rs6000_emit_nmsub (t, g, h, mhalf);
34815 /* Apply correction directly to 1/rsqrt estimate. */
34816 rs6000_emit_madd (dst, e, t, e);
34817 }
34818 else
34819 {
34820 for (i = 0; i < passes; i++)
34821 {
34822 rtx t1 = gen_reg_rtx (mode);
34823 rtx g1 = gen_reg_rtx (mode);
34824 rtx h1 = gen_reg_rtx (mode);
34825
34826 rs6000_emit_nmsub (t1, g, h, mhalf);
34827 rs6000_emit_madd (g1, g, t1, g);
34828 rs6000_emit_madd (h1, h, t1, h);
34829
34830 g = g1;
34831 h = h1;
34832 }
34833 /* Multiply by 2 for 1/rsqrt. */
34834 emit_insn (gen_add3_insn (dst, h, h));
34835 }
34836 }
34837 else
34838 {
34839 rtx t = gen_reg_rtx (mode);
34840 rs6000_emit_nmsub (t, g, h, mhalf);
34841 rs6000_emit_madd (dst, g, t, g);
34842 }
34843
34844 return;
34845 }
34846
34847 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34848 (Power7) targets. DST is the target, and SRC is the argument operand. */
34849
34850 void
34851 rs6000_emit_popcount (rtx dst, rtx src)
34852 {
34853 machine_mode mode = GET_MODE (dst);
34854 rtx tmp1, tmp2;
34855
34856 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34857 if (TARGET_POPCNTD)
34858 {
34859 if (mode == SImode)
34860 emit_insn (gen_popcntdsi2 (dst, src));
34861 else
34862 emit_insn (gen_popcntddi2 (dst, src));
34863 return;
34864 }
34865
34866 tmp1 = gen_reg_rtx (mode);
34867
34868 if (mode == SImode)
34869 {
34870 emit_insn (gen_popcntbsi2 (tmp1, src));
34871 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34872 NULL_RTX, 0);
34873 tmp2 = force_reg (SImode, tmp2);
34874 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34875 }
34876 else
34877 {
34878 emit_insn (gen_popcntbdi2 (tmp1, src));
34879 tmp2 = expand_mult (DImode, tmp1,
34880 GEN_INT ((HOST_WIDE_INT)
34881 0x01010101 << 32 | 0x01010101),
34882 NULL_RTX, 0);
34883 tmp2 = force_reg (DImode, tmp2);
34884 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
34885 }
34886 }
34887
34888
34889 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34890 target, and SRC is the argument operand. */
34891
34892 void
34893 rs6000_emit_parity (rtx dst, rtx src)
34894 {
34895 machine_mode mode = GET_MODE (dst);
34896 rtx tmp;
34897
34898 tmp = gen_reg_rtx (mode);
34899
34900 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34901 if (TARGET_CMPB)
34902 {
34903 if (mode == SImode)
34904 {
34905 emit_insn (gen_popcntbsi2 (tmp, src));
34906 emit_insn (gen_paritysi2_cmpb (dst, tmp));
34907 }
34908 else
34909 {
34910 emit_insn (gen_popcntbdi2 (tmp, src));
34911 emit_insn (gen_paritydi2_cmpb (dst, tmp));
34912 }
34913 return;
34914 }
34915
34916 if (mode == SImode)
34917 {
34918 /* Is mult+shift >= shift+xor+shift+xor? */
34919 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
34920 {
34921 rtx tmp1, tmp2, tmp3, tmp4;
34922
34923 tmp1 = gen_reg_rtx (SImode);
34924 emit_insn (gen_popcntbsi2 (tmp1, src));
34925
34926 tmp2 = gen_reg_rtx (SImode);
34927 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
34928 tmp3 = gen_reg_rtx (SImode);
34929 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
34930
34931 tmp4 = gen_reg_rtx (SImode);
34932 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
34933 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
34934 }
34935 else
34936 rs6000_emit_popcount (tmp, src);
34937 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
34938 }
34939 else
34940 {
34941 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
34942 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
34943 {
34944 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
34945
34946 tmp1 = gen_reg_rtx (DImode);
34947 emit_insn (gen_popcntbdi2 (tmp1, src));
34948
34949 tmp2 = gen_reg_rtx (DImode);
34950 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
34951 tmp3 = gen_reg_rtx (DImode);
34952 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
34953
34954 tmp4 = gen_reg_rtx (DImode);
34955 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
34956 tmp5 = gen_reg_rtx (DImode);
34957 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
34958
34959 tmp6 = gen_reg_rtx (DImode);
34960 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
34961 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
34962 }
34963 else
34964 rs6000_emit_popcount (tmp, src);
34965 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
34966 }
34967 }
34968
34969 /* Expand an Altivec constant permutation for little endian mode.
34970 OP0 and OP1 are the input vectors and TARGET is the output vector.
34971 SEL specifies the constant permutation vector.
34972
34973 There are two issues: First, the two input operands must be
34974 swapped so that together they form a double-wide array in LE
34975 order. Second, the vperm instruction has surprising behavior
34976 in LE mode: it interprets the elements of the source vectors
34977 in BE mode ("left to right") and interprets the elements of
34978 the destination vector in LE mode ("right to left"). To
34979 correct for this, we must subtract each element of the permute
34980 control vector from 31.
34981
34982 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
34983 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
34984 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
34985 serve as the permute control vector. Then, in BE mode,
34986
34987 vperm 9,10,11,12
34988
34989 places the desired result in vr9. However, in LE mode the
34990 vector contents will be
34991
34992 vr10 = 00000003 00000002 00000001 00000000
34993 vr11 = 00000007 00000006 00000005 00000004
34994
34995 The result of the vperm using the same permute control vector is
34996
34997 vr9 = 05000000 07000000 01000000 03000000
34998
34999 That is, the leftmost 4 bytes of vr10 are interpreted as the
35000 source for the rightmost 4 bytes of vr9, and so on.
35001
35002 If we change the permute control vector to
35003
35004 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35005
35006 and issue
35007
35008 vperm 9,11,10,12
35009
35010 we get the desired
35011
35012 vr9 = 00000006 00000004 00000002 00000000. */
35013
35014 static void
35015 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35016 const vec_perm_indices &sel)
35017 {
35018 unsigned int i;
35019 rtx perm[16];
35020 rtx constv, unspec;
35021
35022 /* Unpack and adjust the constant selector. */
35023 for (i = 0; i < 16; ++i)
35024 {
35025 unsigned int elt = 31 - (sel[i] & 31);
35026 perm[i] = GEN_INT (elt);
35027 }
35028
35029 /* Expand to a permute, swapping the inputs and using the
35030 adjusted selector. */
35031 if (!REG_P (op0))
35032 op0 = force_reg (V16QImode, op0);
35033 if (!REG_P (op1))
35034 op1 = force_reg (V16QImode, op1);
35035
35036 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35037 constv = force_reg (V16QImode, constv);
35038 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35039 UNSPEC_VPERM);
35040 if (!REG_P (target))
35041 {
35042 rtx tmp = gen_reg_rtx (V16QImode);
35043 emit_move_insn (tmp, unspec);
35044 unspec = tmp;
35045 }
35046
35047 emit_move_insn (target, unspec);
35048 }
35049
35050 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35051 permute control vector. But here it's not a constant, so we must
35052 generate a vector NAND or NOR to do the adjustment. */
35053
35054 void
35055 altivec_expand_vec_perm_le (rtx operands[4])
35056 {
35057 rtx notx, iorx, unspec;
35058 rtx target = operands[0];
35059 rtx op0 = operands[1];
35060 rtx op1 = operands[2];
35061 rtx sel = operands[3];
35062 rtx tmp = target;
35063 rtx norreg = gen_reg_rtx (V16QImode);
35064 machine_mode mode = GET_MODE (target);
35065
35066 /* Get everything in regs so the pattern matches. */
35067 if (!REG_P (op0))
35068 op0 = force_reg (mode, op0);
35069 if (!REG_P (op1))
35070 op1 = force_reg (mode, op1);
35071 if (!REG_P (sel))
35072 sel = force_reg (V16QImode, sel);
35073 if (!REG_P (target))
35074 tmp = gen_reg_rtx (mode);
35075
35076 if (TARGET_P9_VECTOR)
35077 {
35078 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35079 UNSPEC_VPERMR);
35080 }
35081 else
35082 {
35083 /* Invert the selector with a VNAND if available, else a VNOR.
35084 The VNAND is preferred for future fusion opportunities. */
35085 notx = gen_rtx_NOT (V16QImode, sel);
35086 iorx = (TARGET_P8_VECTOR
35087 ? gen_rtx_IOR (V16QImode, notx, notx)
35088 : gen_rtx_AND (V16QImode, notx, notx));
35089 emit_insn (gen_rtx_SET (norreg, iorx));
35090
35091 /* Permute with operands reversed and adjusted selector. */
35092 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35093 UNSPEC_VPERM);
35094 }
35095
35096 /* Copy into target, possibly by way of a register. */
35097 if (!REG_P (target))
35098 {
35099 emit_move_insn (tmp, unspec);
35100 unspec = tmp;
35101 }
35102
35103 emit_move_insn (target, unspec);
35104 }
35105
35106 /* Expand an Altivec constant permutation. Return true if we match
35107 an efficient implementation; false to fall back to VPERM.
35108
35109 OP0 and OP1 are the input vectors and TARGET is the output vector.
35110 SEL specifies the constant permutation vector. */
35111
35112 static bool
35113 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35114 const vec_perm_indices &sel)
35115 {
35116 struct altivec_perm_insn {
35117 HOST_WIDE_INT mask;
35118 enum insn_code impl;
35119 unsigned char perm[16];
35120 };
35121 static const struct altivec_perm_insn patterns[] = {
35122 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35123 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35124 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35125 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35126 { OPTION_MASK_ALTIVEC,
35127 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35128 : CODE_FOR_altivec_vmrglb_direct),
35129 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35130 { OPTION_MASK_ALTIVEC,
35131 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35132 : CODE_FOR_altivec_vmrglh_direct),
35133 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35134 { OPTION_MASK_ALTIVEC,
35135 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35136 : CODE_FOR_altivec_vmrglw_direct),
35137 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35138 { OPTION_MASK_ALTIVEC,
35139 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35140 : CODE_FOR_altivec_vmrghb_direct),
35141 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35142 { OPTION_MASK_ALTIVEC,
35143 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35144 : CODE_FOR_altivec_vmrghh_direct),
35145 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35146 { OPTION_MASK_ALTIVEC,
35147 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35148 : CODE_FOR_altivec_vmrghw_direct),
35149 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35150 { OPTION_MASK_P8_VECTOR,
35151 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35152 : CODE_FOR_p8_vmrgow_v4sf_direct),
35153 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35154 { OPTION_MASK_P8_VECTOR,
35155 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35156 : CODE_FOR_p8_vmrgew_v4sf_direct),
35157 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35158 };
35159
35160 unsigned int i, j, elt, which;
35161 unsigned char perm[16];
35162 rtx x;
35163 bool one_vec;
35164
35165 /* Unpack the constant selector. */
35166 for (i = which = 0; i < 16; ++i)
35167 {
35168 elt = sel[i] & 31;
35169 which |= (elt < 16 ? 1 : 2);
35170 perm[i] = elt;
35171 }
35172
35173 /* Simplify the constant selector based on operands. */
35174 switch (which)
35175 {
35176 default:
35177 gcc_unreachable ();
35178
35179 case 3:
35180 one_vec = false;
35181 if (!rtx_equal_p (op0, op1))
35182 break;
35183 /* FALLTHRU */
35184
35185 case 2:
35186 for (i = 0; i < 16; ++i)
35187 perm[i] &= 15;
35188 op0 = op1;
35189 one_vec = true;
35190 break;
35191
35192 case 1:
35193 op1 = op0;
35194 one_vec = true;
35195 break;
35196 }
35197
35198 /* Look for splat patterns. */
35199 if (one_vec)
35200 {
35201 elt = perm[0];
35202
35203 for (i = 0; i < 16; ++i)
35204 if (perm[i] != elt)
35205 break;
35206 if (i == 16)
35207 {
35208 if (!BYTES_BIG_ENDIAN)
35209 elt = 15 - elt;
35210 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35211 return true;
35212 }
35213
35214 if (elt % 2 == 0)
35215 {
35216 for (i = 0; i < 16; i += 2)
35217 if (perm[i] != elt || perm[i + 1] != elt + 1)
35218 break;
35219 if (i == 16)
35220 {
35221 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35222 x = gen_reg_rtx (V8HImode);
35223 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35224 GEN_INT (field)));
35225 emit_move_insn (target, gen_lowpart (V16QImode, x));
35226 return true;
35227 }
35228 }
35229
35230 if (elt % 4 == 0)
35231 {
35232 for (i = 0; i < 16; i += 4)
35233 if (perm[i] != elt
35234 || perm[i + 1] != elt + 1
35235 || perm[i + 2] != elt + 2
35236 || perm[i + 3] != elt + 3)
35237 break;
35238 if (i == 16)
35239 {
35240 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35241 x = gen_reg_rtx (V4SImode);
35242 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35243 GEN_INT (field)));
35244 emit_move_insn (target, gen_lowpart (V16QImode, x));
35245 return true;
35246 }
35247 }
35248 }
35249
35250 /* Look for merge and pack patterns. */
35251 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35252 {
35253 bool swapped;
35254
35255 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35256 continue;
35257
35258 elt = patterns[j].perm[0];
35259 if (perm[0] == elt)
35260 swapped = false;
35261 else if (perm[0] == elt + 16)
35262 swapped = true;
35263 else
35264 continue;
35265 for (i = 1; i < 16; ++i)
35266 {
35267 elt = patterns[j].perm[i];
35268 if (swapped)
35269 elt = (elt >= 16 ? elt - 16 : elt + 16);
35270 else if (one_vec && elt >= 16)
35271 elt -= 16;
35272 if (perm[i] != elt)
35273 break;
35274 }
35275 if (i == 16)
35276 {
35277 enum insn_code icode = patterns[j].impl;
35278 machine_mode omode = insn_data[icode].operand[0].mode;
35279 machine_mode imode = insn_data[icode].operand[1].mode;
35280
35281 /* For little-endian, don't use vpkuwum and vpkuhum if the
35282 underlying vector type is not V4SI and V8HI, respectively.
35283 For example, using vpkuwum with a V8HI picks up the even
35284 halfwords (BE numbering) when the even halfwords (LE
35285 numbering) are what we need. */
35286 if (!BYTES_BIG_ENDIAN
35287 && icode == CODE_FOR_altivec_vpkuwum_direct
35288 && ((GET_CODE (op0) == REG
35289 && GET_MODE (op0) != V4SImode)
35290 || (GET_CODE (op0) == SUBREG
35291 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35292 continue;
35293 if (!BYTES_BIG_ENDIAN
35294 && icode == CODE_FOR_altivec_vpkuhum_direct
35295 && ((GET_CODE (op0) == REG
35296 && GET_MODE (op0) != V8HImode)
35297 || (GET_CODE (op0) == SUBREG
35298 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35299 continue;
35300
35301 /* For little-endian, the two input operands must be swapped
35302 (or swapped back) to ensure proper right-to-left numbering
35303 from 0 to 2N-1. */
35304 if (swapped ^ !BYTES_BIG_ENDIAN)
35305 std::swap (op0, op1);
35306 if (imode != V16QImode)
35307 {
35308 op0 = gen_lowpart (imode, op0);
35309 op1 = gen_lowpart (imode, op1);
35310 }
35311 if (omode == V16QImode)
35312 x = target;
35313 else
35314 x = gen_reg_rtx (omode);
35315 emit_insn (GEN_FCN (icode) (x, op0, op1));
35316 if (omode != V16QImode)
35317 emit_move_insn (target, gen_lowpart (V16QImode, x));
35318 return true;
35319 }
35320 }
35321
35322 if (!BYTES_BIG_ENDIAN)
35323 {
35324 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35325 return true;
35326 }
35327
35328 return false;
35329 }
35330
35331 /* Expand a VSX Permute Doubleword constant permutation.
35332 Return true if we match an efficient implementation. */
35333
35334 static bool
35335 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35336 unsigned char perm0, unsigned char perm1)
35337 {
35338 rtx x;
35339
35340 /* If both selectors come from the same operand, fold to single op. */
35341 if ((perm0 & 2) == (perm1 & 2))
35342 {
35343 if (perm0 & 2)
35344 op0 = op1;
35345 else
35346 op1 = op0;
35347 }
35348 /* If both operands are equal, fold to simpler permutation. */
35349 if (rtx_equal_p (op0, op1))
35350 {
35351 perm0 = perm0 & 1;
35352 perm1 = (perm1 & 1) + 2;
35353 }
35354 /* If the first selector comes from the second operand, swap. */
35355 else if (perm0 & 2)
35356 {
35357 if (perm1 & 2)
35358 return false;
35359 perm0 -= 2;
35360 perm1 += 2;
35361 std::swap (op0, op1);
35362 }
35363 /* If the second selector does not come from the second operand, fail. */
35364 else if ((perm1 & 2) == 0)
35365 return false;
35366
35367 /* Success! */
35368 if (target != NULL)
35369 {
35370 machine_mode vmode, dmode;
35371 rtvec v;
35372
35373 vmode = GET_MODE (target);
35374 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35375 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35376 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35377 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35378 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35379 emit_insn (gen_rtx_SET (target, x));
35380 }
35381 return true;
35382 }
35383
35384 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35385
35386 static bool
35387 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35388 rtx op1, const vec_perm_indices &sel)
35389 {
35390 bool testing_p = !target;
35391
35392 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35393 if (TARGET_ALTIVEC && testing_p)
35394 return true;
35395
35396 /* Check for ps_merge* or xxpermdi insns. */
35397 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35398 {
35399 if (testing_p)
35400 {
35401 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35402 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35403 }
35404 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35405 return true;
35406 }
35407
35408 if (TARGET_ALTIVEC)
35409 {
35410 /* Force the target-independent code to lower to V16QImode. */
35411 if (vmode != V16QImode)
35412 return false;
35413 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35414 return true;
35415 }
35416
35417 return false;
35418 }
35419
35420 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35421 OP0 and OP1 are the input vectors and TARGET is the output vector.
35422 PERM specifies the constant permutation vector. */
35423
35424 static void
35425 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35426 machine_mode vmode, const vec_perm_builder &perm)
35427 {
35428 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35429 if (x != target)
35430 emit_move_insn (target, x);
35431 }
35432
35433 /* Expand an extract even operation. */
35434
35435 void
35436 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35437 {
35438 machine_mode vmode = GET_MODE (target);
35439 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35440 vec_perm_builder perm (nelt, nelt, 1);
35441
35442 for (i = 0; i < nelt; i++)
35443 perm.quick_push (i * 2);
35444
35445 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35446 }
35447
35448 /* Expand a vector interleave operation. */
35449
35450 void
35451 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35452 {
35453 machine_mode vmode = GET_MODE (target);
35454 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35455 vec_perm_builder perm (nelt, nelt, 1);
35456
35457 high = (highp ? 0 : nelt / 2);
35458 for (i = 0; i < nelt / 2; i++)
35459 {
35460 perm.quick_push (i + high);
35461 perm.quick_push (i + nelt + high);
35462 }
35463
35464 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35465 }
35466
35467 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35468 void
35469 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35470 {
35471 HOST_WIDE_INT hwi_scale (scale);
35472 REAL_VALUE_TYPE r_pow;
35473 rtvec v = rtvec_alloc (2);
35474 rtx elt;
35475 rtx scale_vec = gen_reg_rtx (V2DFmode);
35476 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35477 elt = const_double_from_real_value (r_pow, DFmode);
35478 RTVEC_ELT (v, 0) = elt;
35479 RTVEC_ELT (v, 1) = elt;
35480 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35481 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35482 }
35483
35484 /* Return an RTX representing where to find the function value of a
35485 function returning MODE. */
35486 static rtx
35487 rs6000_complex_function_value (machine_mode mode)
35488 {
35489 unsigned int regno;
35490 rtx r1, r2;
35491 machine_mode inner = GET_MODE_INNER (mode);
35492 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35493
35494 if (TARGET_FLOAT128_TYPE
35495 && (mode == KCmode
35496 || (mode == TCmode && TARGET_IEEEQUAD)))
35497 regno = ALTIVEC_ARG_RETURN;
35498
35499 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35500 regno = FP_ARG_RETURN;
35501
35502 else
35503 {
35504 regno = GP_ARG_RETURN;
35505
35506 /* 32-bit is OK since it'll go in r3/r4. */
35507 if (TARGET_32BIT && inner_bytes >= 4)
35508 return gen_rtx_REG (mode, regno);
35509 }
35510
35511 if (inner_bytes >= 8)
35512 return gen_rtx_REG (mode, regno);
35513
35514 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35515 const0_rtx);
35516 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35517 GEN_INT (inner_bytes));
35518 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35519 }
35520
35521 /* Return an rtx describing a return value of MODE as a PARALLEL
35522 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35523 stride REG_STRIDE. */
35524
35525 static rtx
35526 rs6000_parallel_return (machine_mode mode,
35527 int n_elts, machine_mode elt_mode,
35528 unsigned int regno, unsigned int reg_stride)
35529 {
35530 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35531
35532 int i;
35533 for (i = 0; i < n_elts; i++)
35534 {
35535 rtx r = gen_rtx_REG (elt_mode, regno);
35536 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35537 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35538 regno += reg_stride;
35539 }
35540
35541 return par;
35542 }
35543
35544 /* Target hook for TARGET_FUNCTION_VALUE.
35545
35546 An integer value is in r3 and a floating-point value is in fp1,
35547 unless -msoft-float. */
35548
35549 static rtx
35550 rs6000_function_value (const_tree valtype,
35551 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35552 bool outgoing ATTRIBUTE_UNUSED)
35553 {
35554 machine_mode mode;
35555 unsigned int regno;
35556 machine_mode elt_mode;
35557 int n_elts;
35558
35559 /* Special handling for structs in darwin64. */
35560 if (TARGET_MACHO
35561 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35562 {
35563 CUMULATIVE_ARGS valcum;
35564 rtx valret;
35565
35566 valcum.words = 0;
35567 valcum.fregno = FP_ARG_MIN_REG;
35568 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35569 /* Do a trial code generation as if this were going to be passed as
35570 an argument; if any part goes in memory, we return NULL. */
35571 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35572 if (valret)
35573 return valret;
35574 /* Otherwise fall through to standard ABI rules. */
35575 }
35576
35577 mode = TYPE_MODE (valtype);
35578
35579 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35580 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35581 {
35582 int first_reg, n_regs;
35583
35584 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35585 {
35586 /* _Decimal128 must use even/odd register pairs. */
35587 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35588 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35589 }
35590 else
35591 {
35592 first_reg = ALTIVEC_ARG_RETURN;
35593 n_regs = 1;
35594 }
35595
35596 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35597 }
35598
35599 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35600 if (TARGET_32BIT && TARGET_POWERPC64)
35601 switch (mode)
35602 {
35603 default:
35604 break;
35605 case E_DImode:
35606 case E_SCmode:
35607 case E_DCmode:
35608 case E_TCmode:
35609 int count = GET_MODE_SIZE (mode) / 4;
35610 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35611 }
35612
35613 if ((INTEGRAL_TYPE_P (valtype)
35614 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35615 || POINTER_TYPE_P (valtype))
35616 mode = TARGET_32BIT ? SImode : DImode;
35617
35618 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35619 /* _Decimal128 must use an even/odd register pair. */
35620 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35621 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35622 && !FLOAT128_VECTOR_P (mode))
35623 regno = FP_ARG_RETURN;
35624 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35625 && targetm.calls.split_complex_arg)
35626 return rs6000_complex_function_value (mode);
35627 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35628 return register is used in both cases, and we won't see V2DImode/V2DFmode
35629 for pure altivec, combine the two cases. */
35630 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35631 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35632 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35633 regno = ALTIVEC_ARG_RETURN;
35634 else
35635 regno = GP_ARG_RETURN;
35636
35637 return gen_rtx_REG (mode, regno);
35638 }
35639
35640 /* Define how to find the value returned by a library function
35641 assuming the value has mode MODE. */
35642 rtx
35643 rs6000_libcall_value (machine_mode mode)
35644 {
35645 unsigned int regno;
35646
35647 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35648 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35649 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35650
35651 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35652 /* _Decimal128 must use an even/odd register pair. */
35653 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35654 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35655 regno = FP_ARG_RETURN;
35656 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35657 return register is used in both cases, and we won't see V2DImode/V2DFmode
35658 for pure altivec, combine the two cases. */
35659 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35660 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35661 regno = ALTIVEC_ARG_RETURN;
35662 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35663 return rs6000_complex_function_value (mode);
35664 else
35665 regno = GP_ARG_RETURN;
35666
35667 return gen_rtx_REG (mode, regno);
35668 }
35669
35670 /* Compute register pressure classes. We implement the target hook to avoid
35671 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35672 lead to incorrect estimates of number of available registers and therefor
35673 increased register pressure/spill. */
35674 static int
35675 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35676 {
35677 int n;
35678
35679 n = 0;
35680 pressure_classes[n++] = GENERAL_REGS;
35681 if (TARGET_VSX)
35682 pressure_classes[n++] = VSX_REGS;
35683 else
35684 {
35685 if (TARGET_ALTIVEC)
35686 pressure_classes[n++] = ALTIVEC_REGS;
35687 if (TARGET_HARD_FLOAT)
35688 pressure_classes[n++] = FLOAT_REGS;
35689 }
35690 pressure_classes[n++] = CR_REGS;
35691 pressure_classes[n++] = SPECIAL_REGS;
35692
35693 return n;
35694 }
35695
35696 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35697 Frame pointer elimination is automatically handled.
35698
35699 For the RS/6000, if frame pointer elimination is being done, we would like
35700 to convert ap into fp, not sp.
35701
35702 We need r30 if -mminimal-toc was specified, and there are constant pool
35703 references. */
35704
35705 static bool
35706 rs6000_can_eliminate (const int from, const int to)
35707 {
35708 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35709 ? ! frame_pointer_needed
35710 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35711 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35712 || constant_pool_empty_p ()
35713 : true);
35714 }
35715
35716 /* Define the offset between two registers, FROM to be eliminated and its
35717 replacement TO, at the start of a routine. */
35718 HOST_WIDE_INT
35719 rs6000_initial_elimination_offset (int from, int to)
35720 {
35721 rs6000_stack_t *info = rs6000_stack_info ();
35722 HOST_WIDE_INT offset;
35723
35724 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35725 offset = info->push_p ? 0 : -info->total_size;
35726 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35727 {
35728 offset = info->push_p ? 0 : -info->total_size;
35729 if (FRAME_GROWS_DOWNWARD)
35730 offset += info->fixed_size + info->vars_size + info->parm_size;
35731 }
35732 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35733 offset = FRAME_GROWS_DOWNWARD
35734 ? info->fixed_size + info->vars_size + info->parm_size
35735 : 0;
35736 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35737 offset = info->total_size;
35738 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35739 offset = info->push_p ? info->total_size : 0;
35740 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35741 offset = 0;
35742 else
35743 gcc_unreachable ();
35744
35745 return offset;
35746 }
35747
35748 /* Fill in sizes of registers used by unwinder. */
35749
35750 static void
35751 rs6000_init_dwarf_reg_sizes_extra (tree address)
35752 {
35753 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35754 {
35755 int i;
35756 machine_mode mode = TYPE_MODE (char_type_node);
35757 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35758 rtx mem = gen_rtx_MEM (BLKmode, addr);
35759 rtx value = gen_int_mode (16, mode);
35760
35761 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35762 The unwinder still needs to know the size of Altivec registers. */
35763
35764 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35765 {
35766 int column = DWARF_REG_TO_UNWIND_COLUMN
35767 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35768 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35769
35770 emit_move_insn (adjust_address (mem, mode, offset), value);
35771 }
35772 }
35773 }
35774
35775 /* Map internal gcc register numbers to debug format register numbers.
35776 FORMAT specifies the type of debug register number to use:
35777 0 -- debug information, except for frame-related sections
35778 1 -- DWARF .debug_frame section
35779 2 -- DWARF .eh_frame section */
35780
35781 unsigned int
35782 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35783 {
35784 /* Except for the above, we use the internal number for non-DWARF
35785 debug information, and also for .eh_frame. */
35786 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35787 return regno;
35788
35789 /* On some platforms, we use the standard DWARF register
35790 numbering for .debug_info and .debug_frame. */
35791 #ifdef RS6000_USE_DWARF_NUMBERING
35792 if (regno <= 63)
35793 return regno;
35794 if (regno == LR_REGNO)
35795 return 108;
35796 if (regno == CTR_REGNO)
35797 return 109;
35798 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35799 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35800 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35801 to the DWARF reg for CR. */
35802 if (format == 1 && regno == CR2_REGNO)
35803 return 64;
35804 if (CR_REGNO_P (regno))
35805 return regno - CR0_REGNO + 86;
35806 if (regno == CA_REGNO)
35807 return 101; /* XER */
35808 if (ALTIVEC_REGNO_P (regno))
35809 return regno - FIRST_ALTIVEC_REGNO + 1124;
35810 if (regno == VRSAVE_REGNO)
35811 return 356;
35812 if (regno == VSCR_REGNO)
35813 return 67;
35814 #endif
35815 return regno;
35816 }
35817
35818 /* target hook eh_return_filter_mode */
35819 static scalar_int_mode
35820 rs6000_eh_return_filter_mode (void)
35821 {
35822 return TARGET_32BIT ? SImode : word_mode;
35823 }
35824
35825 /* Target hook for translate_mode_attribute. */
35826 static machine_mode
35827 rs6000_translate_mode_attribute (machine_mode mode)
35828 {
35829 if ((FLOAT128_IEEE_P (mode)
35830 && ieee128_float_type_node == long_double_type_node)
35831 || (FLOAT128_IBM_P (mode)
35832 && ibm128_float_type_node == long_double_type_node))
35833 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
35834 return mode;
35835 }
35836
35837 /* Target hook for scalar_mode_supported_p. */
35838 static bool
35839 rs6000_scalar_mode_supported_p (scalar_mode mode)
35840 {
35841 /* -m32 does not support TImode. This is the default, from
35842 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35843 same ABI as for -m32. But default_scalar_mode_supported_p allows
35844 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35845 for -mpowerpc64. */
35846 if (TARGET_32BIT && mode == TImode)
35847 return false;
35848
35849 if (DECIMAL_FLOAT_MODE_P (mode))
35850 return default_decimal_float_supported_p ();
35851 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
35852 return true;
35853 else
35854 return default_scalar_mode_supported_p (mode);
35855 }
35856
35857 /* Target hook for vector_mode_supported_p. */
35858 static bool
35859 rs6000_vector_mode_supported_p (machine_mode mode)
35860 {
35861 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35862 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35863 double-double. */
35864 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35865 return true;
35866
35867 else
35868 return false;
35869 }
35870
35871 /* Target hook for floatn_mode. */
35872 static opt_scalar_float_mode
35873 rs6000_floatn_mode (int n, bool extended)
35874 {
35875 if (extended)
35876 {
35877 switch (n)
35878 {
35879 case 32:
35880 return DFmode;
35881
35882 case 64:
35883 if (TARGET_FLOAT128_TYPE)
35884 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35885 else
35886 return opt_scalar_float_mode ();
35887
35888 case 128:
35889 return opt_scalar_float_mode ();
35890
35891 default:
35892 /* Those are the only valid _FloatNx types. */
35893 gcc_unreachable ();
35894 }
35895 }
35896 else
35897 {
35898 switch (n)
35899 {
35900 case 32:
35901 return SFmode;
35902
35903 case 64:
35904 return DFmode;
35905
35906 case 128:
35907 if (TARGET_FLOAT128_TYPE)
35908 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35909 else
35910 return opt_scalar_float_mode ();
35911
35912 default:
35913 return opt_scalar_float_mode ();
35914 }
35915 }
35916
35917 }
35918
35919 /* Target hook for c_mode_for_suffix. */
35920 static machine_mode
35921 rs6000_c_mode_for_suffix (char suffix)
35922 {
35923 if (TARGET_FLOAT128_TYPE)
35924 {
35925 if (suffix == 'q' || suffix == 'Q')
35926 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35927
35928 /* At the moment, we are not defining a suffix for IBM extended double.
35929 If/when the default for -mabi=ieeelongdouble is changed, and we want
35930 to support __ibm128 constants in legacy library code, we may need to
35931 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
35932 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
35933 __float80 constants. */
35934 }
35935
35936 return VOIDmode;
35937 }
35938
35939 /* Target hook for invalid_arg_for_unprototyped_fn. */
35940 static const char *
35941 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
35942 {
35943 return (!rs6000_darwin64_abi
35944 && typelist == 0
35945 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
35946 && (funcdecl == NULL_TREE
35947 || (TREE_CODE (funcdecl) == FUNCTION_DECL
35948 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
35949 ? N_("AltiVec argument passed to unprototyped function")
35950 : NULL;
35951 }
35952
35953 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
35954 setup by using __stack_chk_fail_local hidden function instead of
35955 calling __stack_chk_fail directly. Otherwise it is better to call
35956 __stack_chk_fail directly. */
35957
35958 static tree ATTRIBUTE_UNUSED
35959 rs6000_stack_protect_fail (void)
35960 {
35961 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
35962 ? default_hidden_stack_protect_fail ()
35963 : default_external_stack_protect_fail ();
35964 }
35965
35966 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
35967
35968 #if TARGET_ELF
35969 static unsigned HOST_WIDE_INT
35970 rs6000_asan_shadow_offset (void)
35971 {
35972 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
35973 }
35974 #endif
35975 \f
35976 /* Mask options that we want to support inside of attribute((target)) and
35977 #pragma GCC target operations. Note, we do not include things like
35978 64/32-bit, endianness, hard/soft floating point, etc. that would have
35979 different calling sequences. */
35980
35981 struct rs6000_opt_mask {
35982 const char *name; /* option name */
35983 HOST_WIDE_INT mask; /* mask to set */
35984 bool invert; /* invert sense of mask */
35985 bool valid_target; /* option is a target option */
35986 };
35987
35988 static struct rs6000_opt_mask const rs6000_opt_masks[] =
35989 {
35990 { "altivec", OPTION_MASK_ALTIVEC, false, true },
35991 { "cmpb", OPTION_MASK_CMPB, false, true },
35992 { "crypto", OPTION_MASK_CRYPTO, false, true },
35993 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
35994 { "dlmzb", OPTION_MASK_DLMZB, false, true },
35995 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
35996 false, true },
35997 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
35998 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
35999 { "fprnd", OPTION_MASK_FPRND, false, true },
36000 { "hard-dfp", OPTION_MASK_DFP, false, true },
36001 { "htm", OPTION_MASK_HTM, false, true },
36002 { "isel", OPTION_MASK_ISEL, false, true },
36003 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36004 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36005 { "modulo", OPTION_MASK_MODULO, false, true },
36006 { "mulhw", OPTION_MASK_MULHW, false, true },
36007 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36008 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36009 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36010 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36011 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36012 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36013 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36014 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36015 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36016 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36017 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36018 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36019 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36020 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36021 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36022 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36023 { "string", 0, false, true },
36024 { "update", OPTION_MASK_NO_UPDATE, true , true },
36025 { "vsx", OPTION_MASK_VSX, false, true },
36026 #ifdef OPTION_MASK_64BIT
36027 #if TARGET_AIX_OS
36028 { "aix64", OPTION_MASK_64BIT, false, false },
36029 { "aix32", OPTION_MASK_64BIT, true, false },
36030 #else
36031 { "64", OPTION_MASK_64BIT, false, false },
36032 { "32", OPTION_MASK_64BIT, true, false },
36033 #endif
36034 #endif
36035 #ifdef OPTION_MASK_EABI
36036 { "eabi", OPTION_MASK_EABI, false, false },
36037 #endif
36038 #ifdef OPTION_MASK_LITTLE_ENDIAN
36039 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36040 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36041 #endif
36042 #ifdef OPTION_MASK_RELOCATABLE
36043 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36044 #endif
36045 #ifdef OPTION_MASK_STRICT_ALIGN
36046 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36047 #endif
36048 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36049 { "string", 0, false, false },
36050 };
36051
36052 /* Builtin mask mapping for printing the flags. */
36053 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36054 {
36055 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36056 { "vsx", RS6000_BTM_VSX, false, false },
36057 { "fre", RS6000_BTM_FRE, false, false },
36058 { "fres", RS6000_BTM_FRES, false, false },
36059 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36060 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36061 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36062 { "cell", RS6000_BTM_CELL, false, false },
36063 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36064 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36065 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36066 { "crypto", RS6000_BTM_CRYPTO, false, false },
36067 { "htm", RS6000_BTM_HTM, false, false },
36068 { "hard-dfp", RS6000_BTM_DFP, false, false },
36069 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36070 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36071 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36072 { "float128", RS6000_BTM_FLOAT128, false, false },
36073 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36074 };
36075
36076 /* Option variables that we want to support inside attribute((target)) and
36077 #pragma GCC target operations. */
36078
36079 struct rs6000_opt_var {
36080 const char *name; /* option name */
36081 size_t global_offset; /* offset of the option in global_options. */
36082 size_t target_offset; /* offset of the option in target options. */
36083 };
36084
36085 static struct rs6000_opt_var const rs6000_opt_vars[] =
36086 {
36087 { "friz",
36088 offsetof (struct gcc_options, x_TARGET_FRIZ),
36089 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36090 { "avoid-indexed-addresses",
36091 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36092 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36093 { "longcall",
36094 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36095 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36096 { "optimize-swaps",
36097 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36098 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36099 { "allow-movmisalign",
36100 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36101 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36102 { "sched-groups",
36103 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36104 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36105 { "always-hint",
36106 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36107 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36108 { "align-branch-targets",
36109 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36110 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36111 { "tls-markers",
36112 offsetof (struct gcc_options, x_tls_markers),
36113 offsetof (struct cl_target_option, x_tls_markers), },
36114 { "sched-prolog",
36115 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36116 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36117 { "sched-epilog",
36118 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36119 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36120 { "speculate-indirect-jumps",
36121 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36122 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36123 };
36124
36125 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36126 parsing. Return true if there were no errors. */
36127
36128 static bool
36129 rs6000_inner_target_options (tree args, bool attr_p)
36130 {
36131 bool ret = true;
36132
36133 if (args == NULL_TREE)
36134 ;
36135
36136 else if (TREE_CODE (args) == STRING_CST)
36137 {
36138 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36139 char *q;
36140
36141 while ((q = strtok (p, ",")) != NULL)
36142 {
36143 bool error_p = false;
36144 bool not_valid_p = false;
36145 const char *cpu_opt = NULL;
36146
36147 p = NULL;
36148 if (strncmp (q, "cpu=", 4) == 0)
36149 {
36150 int cpu_index = rs6000_cpu_name_lookup (q+4);
36151 if (cpu_index >= 0)
36152 rs6000_cpu_index = cpu_index;
36153 else
36154 {
36155 error_p = true;
36156 cpu_opt = q+4;
36157 }
36158 }
36159 else if (strncmp (q, "tune=", 5) == 0)
36160 {
36161 int tune_index = rs6000_cpu_name_lookup (q+5);
36162 if (tune_index >= 0)
36163 rs6000_tune_index = tune_index;
36164 else
36165 {
36166 error_p = true;
36167 cpu_opt = q+5;
36168 }
36169 }
36170 else
36171 {
36172 size_t i;
36173 bool invert = false;
36174 char *r = q;
36175
36176 error_p = true;
36177 if (strncmp (r, "no-", 3) == 0)
36178 {
36179 invert = true;
36180 r += 3;
36181 }
36182
36183 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36184 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36185 {
36186 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36187
36188 if (!rs6000_opt_masks[i].valid_target)
36189 not_valid_p = true;
36190 else
36191 {
36192 error_p = false;
36193 rs6000_isa_flags_explicit |= mask;
36194
36195 /* VSX needs altivec, so -mvsx automagically sets
36196 altivec and disables -mavoid-indexed-addresses. */
36197 if (!invert)
36198 {
36199 if (mask == OPTION_MASK_VSX)
36200 {
36201 mask |= OPTION_MASK_ALTIVEC;
36202 TARGET_AVOID_XFORM = 0;
36203 }
36204 }
36205
36206 if (rs6000_opt_masks[i].invert)
36207 invert = !invert;
36208
36209 if (invert)
36210 rs6000_isa_flags &= ~mask;
36211 else
36212 rs6000_isa_flags |= mask;
36213 }
36214 break;
36215 }
36216
36217 if (error_p && !not_valid_p)
36218 {
36219 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36220 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36221 {
36222 size_t j = rs6000_opt_vars[i].global_offset;
36223 *((int *) ((char *)&global_options + j)) = !invert;
36224 error_p = false;
36225 not_valid_p = false;
36226 break;
36227 }
36228 }
36229 }
36230
36231 if (error_p)
36232 {
36233 const char *eprefix, *esuffix;
36234
36235 ret = false;
36236 if (attr_p)
36237 {
36238 eprefix = "__attribute__((__target__(";
36239 esuffix = ")))";
36240 }
36241 else
36242 {
36243 eprefix = "#pragma GCC target ";
36244 esuffix = "";
36245 }
36246
36247 if (cpu_opt)
36248 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36249 q, esuffix);
36250 else if (not_valid_p)
36251 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36252 else
36253 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36254 }
36255 }
36256 }
36257
36258 else if (TREE_CODE (args) == TREE_LIST)
36259 {
36260 do
36261 {
36262 tree value = TREE_VALUE (args);
36263 if (value)
36264 {
36265 bool ret2 = rs6000_inner_target_options (value, attr_p);
36266 if (!ret2)
36267 ret = false;
36268 }
36269 args = TREE_CHAIN (args);
36270 }
36271 while (args != NULL_TREE);
36272 }
36273
36274 else
36275 {
36276 error ("attribute %<target%> argument not a string");
36277 return false;
36278 }
36279
36280 return ret;
36281 }
36282
36283 /* Print out the target options as a list for -mdebug=target. */
36284
36285 static void
36286 rs6000_debug_target_options (tree args, const char *prefix)
36287 {
36288 if (args == NULL_TREE)
36289 fprintf (stderr, "%s<NULL>", prefix);
36290
36291 else if (TREE_CODE (args) == STRING_CST)
36292 {
36293 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36294 char *q;
36295
36296 while ((q = strtok (p, ",")) != NULL)
36297 {
36298 p = NULL;
36299 fprintf (stderr, "%s\"%s\"", prefix, q);
36300 prefix = ", ";
36301 }
36302 }
36303
36304 else if (TREE_CODE (args) == TREE_LIST)
36305 {
36306 do
36307 {
36308 tree value = TREE_VALUE (args);
36309 if (value)
36310 {
36311 rs6000_debug_target_options (value, prefix);
36312 prefix = ", ";
36313 }
36314 args = TREE_CHAIN (args);
36315 }
36316 while (args != NULL_TREE);
36317 }
36318
36319 else
36320 gcc_unreachable ();
36321
36322 return;
36323 }
36324
36325 \f
36326 /* Hook to validate attribute((target("..."))). */
36327
36328 static bool
36329 rs6000_valid_attribute_p (tree fndecl,
36330 tree ARG_UNUSED (name),
36331 tree args,
36332 int flags)
36333 {
36334 struct cl_target_option cur_target;
36335 bool ret;
36336 tree old_optimize;
36337 tree new_target, new_optimize;
36338 tree func_optimize;
36339
36340 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36341
36342 if (TARGET_DEBUG_TARGET)
36343 {
36344 tree tname = DECL_NAME (fndecl);
36345 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36346 if (tname)
36347 fprintf (stderr, "function: %.*s\n",
36348 (int) IDENTIFIER_LENGTH (tname),
36349 IDENTIFIER_POINTER (tname));
36350 else
36351 fprintf (stderr, "function: unknown\n");
36352
36353 fprintf (stderr, "args:");
36354 rs6000_debug_target_options (args, " ");
36355 fprintf (stderr, "\n");
36356
36357 if (flags)
36358 fprintf (stderr, "flags: 0x%x\n", flags);
36359
36360 fprintf (stderr, "--------------------\n");
36361 }
36362
36363 /* attribute((target("default"))) does nothing, beyond
36364 affecting multi-versioning. */
36365 if (TREE_VALUE (args)
36366 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36367 && TREE_CHAIN (args) == NULL_TREE
36368 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36369 return true;
36370
36371 old_optimize = build_optimization_node (&global_options);
36372 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36373
36374 /* If the function changed the optimization levels as well as setting target
36375 options, start with the optimizations specified. */
36376 if (func_optimize && func_optimize != old_optimize)
36377 cl_optimization_restore (&global_options,
36378 TREE_OPTIMIZATION (func_optimize));
36379
36380 /* The target attributes may also change some optimization flags, so update
36381 the optimization options if necessary. */
36382 cl_target_option_save (&cur_target, &global_options);
36383 rs6000_cpu_index = rs6000_tune_index = -1;
36384 ret = rs6000_inner_target_options (args, true);
36385
36386 /* Set up any additional state. */
36387 if (ret)
36388 {
36389 ret = rs6000_option_override_internal (false);
36390 new_target = build_target_option_node (&global_options);
36391 }
36392 else
36393 new_target = NULL;
36394
36395 new_optimize = build_optimization_node (&global_options);
36396
36397 if (!new_target)
36398 ret = false;
36399
36400 else if (fndecl)
36401 {
36402 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36403
36404 if (old_optimize != new_optimize)
36405 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36406 }
36407
36408 cl_target_option_restore (&global_options, &cur_target);
36409
36410 if (old_optimize != new_optimize)
36411 cl_optimization_restore (&global_options,
36412 TREE_OPTIMIZATION (old_optimize));
36413
36414 return ret;
36415 }
36416
36417 \f
36418 /* Hook to validate the current #pragma GCC target and set the state, and
36419 update the macros based on what was changed. If ARGS is NULL, then
36420 POP_TARGET is used to reset the options. */
36421
36422 bool
36423 rs6000_pragma_target_parse (tree args, tree pop_target)
36424 {
36425 tree prev_tree = build_target_option_node (&global_options);
36426 tree cur_tree;
36427 struct cl_target_option *prev_opt, *cur_opt;
36428 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36429 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36430
36431 if (TARGET_DEBUG_TARGET)
36432 {
36433 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36434 fprintf (stderr, "args:");
36435 rs6000_debug_target_options (args, " ");
36436 fprintf (stderr, "\n");
36437
36438 if (pop_target)
36439 {
36440 fprintf (stderr, "pop_target:\n");
36441 debug_tree (pop_target);
36442 }
36443 else
36444 fprintf (stderr, "pop_target: <NULL>\n");
36445
36446 fprintf (stderr, "--------------------\n");
36447 }
36448
36449 if (! args)
36450 {
36451 cur_tree = ((pop_target)
36452 ? pop_target
36453 : target_option_default_node);
36454 cl_target_option_restore (&global_options,
36455 TREE_TARGET_OPTION (cur_tree));
36456 }
36457 else
36458 {
36459 rs6000_cpu_index = rs6000_tune_index = -1;
36460 if (!rs6000_inner_target_options (args, false)
36461 || !rs6000_option_override_internal (false)
36462 || (cur_tree = build_target_option_node (&global_options))
36463 == NULL_TREE)
36464 {
36465 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36466 fprintf (stderr, "invalid pragma\n");
36467
36468 return false;
36469 }
36470 }
36471
36472 target_option_current_node = cur_tree;
36473 rs6000_activate_target_options (target_option_current_node);
36474
36475 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36476 change the macros that are defined. */
36477 if (rs6000_target_modify_macros_ptr)
36478 {
36479 prev_opt = TREE_TARGET_OPTION (prev_tree);
36480 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36481 prev_flags = prev_opt->x_rs6000_isa_flags;
36482
36483 cur_opt = TREE_TARGET_OPTION (cur_tree);
36484 cur_flags = cur_opt->x_rs6000_isa_flags;
36485 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36486
36487 diff_bumask = (prev_bumask ^ cur_bumask);
36488 diff_flags = (prev_flags ^ cur_flags);
36489
36490 if ((diff_flags != 0) || (diff_bumask != 0))
36491 {
36492 /* Delete old macros. */
36493 rs6000_target_modify_macros_ptr (false,
36494 prev_flags & diff_flags,
36495 prev_bumask & diff_bumask);
36496
36497 /* Define new macros. */
36498 rs6000_target_modify_macros_ptr (true,
36499 cur_flags & diff_flags,
36500 cur_bumask & diff_bumask);
36501 }
36502 }
36503
36504 return true;
36505 }
36506
36507 \f
36508 /* Remember the last target of rs6000_set_current_function. */
36509 static GTY(()) tree rs6000_previous_fndecl;
36510
36511 /* Restore target's globals from NEW_TREE and invalidate the
36512 rs6000_previous_fndecl cache. */
36513
36514 void
36515 rs6000_activate_target_options (tree new_tree)
36516 {
36517 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36518 if (TREE_TARGET_GLOBALS (new_tree))
36519 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36520 else if (new_tree == target_option_default_node)
36521 restore_target_globals (&default_target_globals);
36522 else
36523 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36524 rs6000_previous_fndecl = NULL_TREE;
36525 }
36526
36527 /* Establish appropriate back-end context for processing the function
36528 FNDECL. The argument might be NULL to indicate processing at top
36529 level, outside of any function scope. */
36530 static void
36531 rs6000_set_current_function (tree fndecl)
36532 {
36533 if (TARGET_DEBUG_TARGET)
36534 {
36535 fprintf (stderr, "\n==================== rs6000_set_current_function");
36536
36537 if (fndecl)
36538 fprintf (stderr, ", fndecl %s (%p)",
36539 (DECL_NAME (fndecl)
36540 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36541 : "<unknown>"), (void *)fndecl);
36542
36543 if (rs6000_previous_fndecl)
36544 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36545
36546 fprintf (stderr, "\n");
36547 }
36548
36549 /* Only change the context if the function changes. This hook is called
36550 several times in the course of compiling a function, and we don't want to
36551 slow things down too much or call target_reinit when it isn't safe. */
36552 if (fndecl == rs6000_previous_fndecl)
36553 return;
36554
36555 tree old_tree;
36556 if (rs6000_previous_fndecl == NULL_TREE)
36557 old_tree = target_option_current_node;
36558 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36559 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36560 else
36561 old_tree = target_option_default_node;
36562
36563 tree new_tree;
36564 if (fndecl == NULL_TREE)
36565 {
36566 if (old_tree != target_option_current_node)
36567 new_tree = target_option_current_node;
36568 else
36569 new_tree = NULL_TREE;
36570 }
36571 else
36572 {
36573 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36574 if (new_tree == NULL_TREE)
36575 new_tree = target_option_default_node;
36576 }
36577
36578 if (TARGET_DEBUG_TARGET)
36579 {
36580 if (new_tree)
36581 {
36582 fprintf (stderr, "\nnew fndecl target specific options:\n");
36583 debug_tree (new_tree);
36584 }
36585
36586 if (old_tree)
36587 {
36588 fprintf (stderr, "\nold fndecl target specific options:\n");
36589 debug_tree (old_tree);
36590 }
36591
36592 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36593 fprintf (stderr, "--------------------\n");
36594 }
36595
36596 if (new_tree && old_tree != new_tree)
36597 rs6000_activate_target_options (new_tree);
36598
36599 if (fndecl)
36600 rs6000_previous_fndecl = fndecl;
36601 }
36602
36603 \f
36604 /* Save the current options */
36605
36606 static void
36607 rs6000_function_specific_save (struct cl_target_option *ptr,
36608 struct gcc_options *opts)
36609 {
36610 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36611 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36612 }
36613
36614 /* Restore the current options */
36615
36616 static void
36617 rs6000_function_specific_restore (struct gcc_options *opts,
36618 struct cl_target_option *ptr)
36619
36620 {
36621 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36622 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36623 (void) rs6000_option_override_internal (false);
36624 }
36625
36626 /* Print the current options */
36627
36628 static void
36629 rs6000_function_specific_print (FILE *file, int indent,
36630 struct cl_target_option *ptr)
36631 {
36632 rs6000_print_isa_options (file, indent, "Isa options set",
36633 ptr->x_rs6000_isa_flags);
36634
36635 rs6000_print_isa_options (file, indent, "Isa options explicit",
36636 ptr->x_rs6000_isa_flags_explicit);
36637 }
36638
36639 /* Helper function to print the current isa or misc options on a line. */
36640
36641 static void
36642 rs6000_print_options_internal (FILE *file,
36643 int indent,
36644 const char *string,
36645 HOST_WIDE_INT flags,
36646 const char *prefix,
36647 const struct rs6000_opt_mask *opts,
36648 size_t num_elements)
36649 {
36650 size_t i;
36651 size_t start_column = 0;
36652 size_t cur_column;
36653 size_t max_column = 120;
36654 size_t prefix_len = strlen (prefix);
36655 size_t comma_len = 0;
36656 const char *comma = "";
36657
36658 if (indent)
36659 start_column += fprintf (file, "%*s", indent, "");
36660
36661 if (!flags)
36662 {
36663 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36664 return;
36665 }
36666
36667 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36668
36669 /* Print the various mask options. */
36670 cur_column = start_column;
36671 for (i = 0; i < num_elements; i++)
36672 {
36673 bool invert = opts[i].invert;
36674 const char *name = opts[i].name;
36675 const char *no_str = "";
36676 HOST_WIDE_INT mask = opts[i].mask;
36677 size_t len = comma_len + prefix_len + strlen (name);
36678
36679 if (!invert)
36680 {
36681 if ((flags & mask) == 0)
36682 {
36683 no_str = "no-";
36684 len += sizeof ("no-") - 1;
36685 }
36686
36687 flags &= ~mask;
36688 }
36689
36690 else
36691 {
36692 if ((flags & mask) != 0)
36693 {
36694 no_str = "no-";
36695 len += sizeof ("no-") - 1;
36696 }
36697
36698 flags |= mask;
36699 }
36700
36701 cur_column += len;
36702 if (cur_column > max_column)
36703 {
36704 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36705 cur_column = start_column + len;
36706 comma = "";
36707 }
36708
36709 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36710 comma = ", ";
36711 comma_len = sizeof (", ") - 1;
36712 }
36713
36714 fputs ("\n", file);
36715 }
36716
36717 /* Helper function to print the current isa options on a line. */
36718
36719 static void
36720 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36721 HOST_WIDE_INT flags)
36722 {
36723 rs6000_print_options_internal (file, indent, string, flags, "-m",
36724 &rs6000_opt_masks[0],
36725 ARRAY_SIZE (rs6000_opt_masks));
36726 }
36727
36728 static void
36729 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36730 HOST_WIDE_INT flags)
36731 {
36732 rs6000_print_options_internal (file, indent, string, flags, "",
36733 &rs6000_builtin_mask_names[0],
36734 ARRAY_SIZE (rs6000_builtin_mask_names));
36735 }
36736
36737 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36738 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36739 -mupper-regs-df, etc.).
36740
36741 If the user used -mno-power8-vector, we need to turn off all of the implicit
36742 ISA 2.07 and 3.0 options that relate to the vector unit.
36743
36744 If the user used -mno-power9-vector, we need to turn off all of the implicit
36745 ISA 3.0 options that relate to the vector unit.
36746
36747 This function does not handle explicit options such as the user specifying
36748 -mdirect-move. These are handled in rs6000_option_override_internal, and
36749 the appropriate error is given if needed.
36750
36751 We return a mask of all of the implicit options that should not be enabled
36752 by default. */
36753
36754 static HOST_WIDE_INT
36755 rs6000_disable_incompatible_switches (void)
36756 {
36757 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36758 size_t i, j;
36759
36760 static const struct {
36761 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36762 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36763 const char *const name; /* name of the switch. */
36764 } flags[] = {
36765 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36766 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36767 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36768 };
36769
36770 for (i = 0; i < ARRAY_SIZE (flags); i++)
36771 {
36772 HOST_WIDE_INT no_flag = flags[i].no_flag;
36773
36774 if ((rs6000_isa_flags & no_flag) == 0
36775 && (rs6000_isa_flags_explicit & no_flag) != 0)
36776 {
36777 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36778 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36779 & rs6000_isa_flags
36780 & dep_flags);
36781
36782 if (set_flags)
36783 {
36784 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36785 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36786 {
36787 set_flags &= ~rs6000_opt_masks[j].mask;
36788 error ("%<-mno-%s%> turns off %<-m%s%>",
36789 flags[i].name,
36790 rs6000_opt_masks[j].name);
36791 }
36792
36793 gcc_assert (!set_flags);
36794 }
36795
36796 rs6000_isa_flags &= ~dep_flags;
36797 ignore_masks |= no_flag | dep_flags;
36798 }
36799 }
36800
36801 return ignore_masks;
36802 }
36803
36804 \f
36805 /* Helper function for printing the function name when debugging. */
36806
36807 static const char *
36808 get_decl_name (tree fn)
36809 {
36810 tree name;
36811
36812 if (!fn)
36813 return "<null>";
36814
36815 name = DECL_NAME (fn);
36816 if (!name)
36817 return "<no-name>";
36818
36819 return IDENTIFIER_POINTER (name);
36820 }
36821
36822 /* Return the clone id of the target we are compiling code for in a target
36823 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36824 the priority list for the target clones (ordered from lowest to
36825 highest). */
36826
36827 static int
36828 rs6000_clone_priority (tree fndecl)
36829 {
36830 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36831 HOST_WIDE_INT isa_masks;
36832 int ret = CLONE_DEFAULT;
36833 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
36834 const char *attrs_str = NULL;
36835
36836 attrs = TREE_VALUE (TREE_VALUE (attrs));
36837 attrs_str = TREE_STRING_POINTER (attrs);
36838
36839 /* Return priority zero for default function. Return the ISA needed for the
36840 function if it is not the default. */
36841 if (strcmp (attrs_str, "default") != 0)
36842 {
36843 if (fn_opts == NULL_TREE)
36844 fn_opts = target_option_default_node;
36845
36846 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
36847 isa_masks = rs6000_isa_flags;
36848 else
36849 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
36850
36851 for (ret = CLONE_MAX - 1; ret != 0; ret--)
36852 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
36853 break;
36854 }
36855
36856 if (TARGET_DEBUG_TARGET)
36857 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
36858 get_decl_name (fndecl), ret);
36859
36860 return ret;
36861 }
36862
36863 /* This compares the priority of target features in function DECL1 and DECL2.
36864 It returns positive value if DECL1 is higher priority, negative value if
36865 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36866 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36867
36868 static int
36869 rs6000_compare_version_priority (tree decl1, tree decl2)
36870 {
36871 int priority1 = rs6000_clone_priority (decl1);
36872 int priority2 = rs6000_clone_priority (decl2);
36873 int ret = priority1 - priority2;
36874
36875 if (TARGET_DEBUG_TARGET)
36876 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
36877 get_decl_name (decl1), get_decl_name (decl2), ret);
36878
36879 return ret;
36880 }
36881
36882 /* Make a dispatcher declaration for the multi-versioned function DECL.
36883 Calls to DECL function will be replaced with calls to the dispatcher
36884 by the front-end. Returns the decl of the dispatcher function. */
36885
36886 static tree
36887 rs6000_get_function_versions_dispatcher (void *decl)
36888 {
36889 tree fn = (tree) decl;
36890 struct cgraph_node *node = NULL;
36891 struct cgraph_node *default_node = NULL;
36892 struct cgraph_function_version_info *node_v = NULL;
36893 struct cgraph_function_version_info *first_v = NULL;
36894
36895 tree dispatch_decl = NULL;
36896
36897 struct cgraph_function_version_info *default_version_info = NULL;
36898 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
36899
36900 if (TARGET_DEBUG_TARGET)
36901 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
36902 get_decl_name (fn));
36903
36904 node = cgraph_node::get (fn);
36905 gcc_assert (node != NULL);
36906
36907 node_v = node->function_version ();
36908 gcc_assert (node_v != NULL);
36909
36910 if (node_v->dispatcher_resolver != NULL)
36911 return node_v->dispatcher_resolver;
36912
36913 /* Find the default version and make it the first node. */
36914 first_v = node_v;
36915 /* Go to the beginning of the chain. */
36916 while (first_v->prev != NULL)
36917 first_v = first_v->prev;
36918
36919 default_version_info = first_v;
36920 while (default_version_info != NULL)
36921 {
36922 const tree decl2 = default_version_info->this_node->decl;
36923 if (is_function_default_version (decl2))
36924 break;
36925 default_version_info = default_version_info->next;
36926 }
36927
36928 /* If there is no default node, just return NULL. */
36929 if (default_version_info == NULL)
36930 return NULL;
36931
36932 /* Make default info the first node. */
36933 if (first_v != default_version_info)
36934 {
36935 default_version_info->prev->next = default_version_info->next;
36936 if (default_version_info->next)
36937 default_version_info->next->prev = default_version_info->prev;
36938 first_v->prev = default_version_info;
36939 default_version_info->next = first_v;
36940 default_version_info->prev = NULL;
36941 }
36942
36943 default_node = default_version_info->this_node;
36944
36945 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
36946 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36947 "target_clones attribute needs GLIBC (2.23 and newer) that "
36948 "exports hardware capability bits");
36949 #else
36950
36951 if (targetm.has_ifunc_p ())
36952 {
36953 struct cgraph_function_version_info *it_v = NULL;
36954 struct cgraph_node *dispatcher_node = NULL;
36955 struct cgraph_function_version_info *dispatcher_version_info = NULL;
36956
36957 /* Right now, the dispatching is done via ifunc. */
36958 dispatch_decl = make_dispatcher_decl (default_node->decl);
36959
36960 dispatcher_node = cgraph_node::get_create (dispatch_decl);
36961 gcc_assert (dispatcher_node != NULL);
36962 dispatcher_node->dispatcher_function = 1;
36963 dispatcher_version_info
36964 = dispatcher_node->insert_new_function_version ();
36965 dispatcher_version_info->next = default_version_info;
36966 dispatcher_node->definition = 1;
36967
36968 /* Set the dispatcher for all the versions. */
36969 it_v = default_version_info;
36970 while (it_v != NULL)
36971 {
36972 it_v->dispatcher_resolver = dispatch_decl;
36973 it_v = it_v->next;
36974 }
36975 }
36976 else
36977 {
36978 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36979 "multiversioning needs ifunc which is not supported "
36980 "on this target");
36981 }
36982 #endif
36983
36984 return dispatch_decl;
36985 }
36986
36987 /* Make the resolver function decl to dispatch the versions of a multi-
36988 versioned function, DEFAULT_DECL. Create an empty basic block in the
36989 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
36990 function. */
36991
36992 static tree
36993 make_resolver_func (const tree default_decl,
36994 const tree dispatch_decl,
36995 basic_block *empty_bb)
36996 {
36997 /* Make the resolver function static. The resolver function returns
36998 void *. */
36999 tree decl_name = clone_function_name_numbered (default_decl, "resolver");
37000 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37001 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37002 tree decl = build_fn_decl (resolver_name, type);
37003 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37004
37005 DECL_NAME (decl) = decl_name;
37006 TREE_USED (decl) = 1;
37007 DECL_ARTIFICIAL (decl) = 1;
37008 DECL_IGNORED_P (decl) = 0;
37009 TREE_PUBLIC (decl) = 0;
37010 DECL_UNINLINABLE (decl) = 1;
37011
37012 /* Resolver is not external, body is generated. */
37013 DECL_EXTERNAL (decl) = 0;
37014 DECL_EXTERNAL (dispatch_decl) = 0;
37015
37016 DECL_CONTEXT (decl) = NULL_TREE;
37017 DECL_INITIAL (decl) = make_node (BLOCK);
37018 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37019
37020 /* Build result decl and add to function_decl. */
37021 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37022 DECL_ARTIFICIAL (t) = 1;
37023 DECL_IGNORED_P (t) = 1;
37024 DECL_RESULT (decl) = t;
37025
37026 gimplify_function_tree (decl);
37027 push_cfun (DECL_STRUCT_FUNCTION (decl));
37028 *empty_bb = init_lowered_empty_function (decl, false,
37029 profile_count::uninitialized ());
37030
37031 cgraph_node::add_new_function (decl, true);
37032 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37033
37034 pop_cfun ();
37035
37036 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37037 DECL_ATTRIBUTES (dispatch_decl)
37038 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37039
37040 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37041
37042 return decl;
37043 }
37044
37045 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37046 return a pointer to VERSION_DECL if we are running on a machine that
37047 supports the index CLONE_ISA hardware architecture bits. This function will
37048 be called during version dispatch to decide which function version to
37049 execute. It returns the basic block at the end, to which more conditions
37050 can be added. */
37051
37052 static basic_block
37053 add_condition_to_bb (tree function_decl, tree version_decl,
37054 int clone_isa, basic_block new_bb)
37055 {
37056 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37057
37058 gcc_assert (new_bb != NULL);
37059 gimple_seq gseq = bb_seq (new_bb);
37060
37061
37062 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37063 build_fold_addr_expr (version_decl));
37064 tree result_var = create_tmp_var (ptr_type_node);
37065 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37066 gimple *return_stmt = gimple_build_return (result_var);
37067
37068 if (clone_isa == CLONE_DEFAULT)
37069 {
37070 gimple_seq_add_stmt (&gseq, convert_stmt);
37071 gimple_seq_add_stmt (&gseq, return_stmt);
37072 set_bb_seq (new_bb, gseq);
37073 gimple_set_bb (convert_stmt, new_bb);
37074 gimple_set_bb (return_stmt, new_bb);
37075 pop_cfun ();
37076 return new_bb;
37077 }
37078
37079 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37080 tree cond_var = create_tmp_var (bool_int_type_node);
37081 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37082 const char *arg_str = rs6000_clone_map[clone_isa].name;
37083 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37084 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37085 gimple_call_set_lhs (call_cond_stmt, cond_var);
37086
37087 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37088 gimple_set_bb (call_cond_stmt, new_bb);
37089 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37090
37091 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37092 NULL_TREE, NULL_TREE);
37093 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37094 gimple_set_bb (if_else_stmt, new_bb);
37095 gimple_seq_add_stmt (&gseq, if_else_stmt);
37096
37097 gimple_seq_add_stmt (&gseq, convert_stmt);
37098 gimple_seq_add_stmt (&gseq, return_stmt);
37099 set_bb_seq (new_bb, gseq);
37100
37101 basic_block bb1 = new_bb;
37102 edge e12 = split_block (bb1, if_else_stmt);
37103 basic_block bb2 = e12->dest;
37104 e12->flags &= ~EDGE_FALLTHRU;
37105 e12->flags |= EDGE_TRUE_VALUE;
37106
37107 edge e23 = split_block (bb2, return_stmt);
37108 gimple_set_bb (convert_stmt, bb2);
37109 gimple_set_bb (return_stmt, bb2);
37110
37111 basic_block bb3 = e23->dest;
37112 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37113
37114 remove_edge (e23);
37115 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37116
37117 pop_cfun ();
37118 return bb3;
37119 }
37120
37121 /* This function generates the dispatch function for multi-versioned functions.
37122 DISPATCH_DECL is the function which will contain the dispatch logic.
37123 FNDECLS are the function choices for dispatch, and is a tree chain.
37124 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37125 code is generated. */
37126
37127 static int
37128 dispatch_function_versions (tree dispatch_decl,
37129 void *fndecls_p,
37130 basic_block *empty_bb)
37131 {
37132 int ix;
37133 tree ele;
37134 vec<tree> *fndecls;
37135 tree clones[CLONE_MAX];
37136
37137 if (TARGET_DEBUG_TARGET)
37138 fputs ("dispatch_function_versions, top\n", stderr);
37139
37140 gcc_assert (dispatch_decl != NULL
37141 && fndecls_p != NULL
37142 && empty_bb != NULL);
37143
37144 /* fndecls_p is actually a vector. */
37145 fndecls = static_cast<vec<tree> *> (fndecls_p);
37146
37147 /* At least one more version other than the default. */
37148 gcc_assert (fndecls->length () >= 2);
37149
37150 /* The first version in the vector is the default decl. */
37151 memset ((void *) clones, '\0', sizeof (clones));
37152 clones[CLONE_DEFAULT] = (*fndecls)[0];
37153
37154 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37155 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37156 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37157 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37158 to insert the code here to do the call. */
37159
37160 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37161 {
37162 int priority = rs6000_clone_priority (ele);
37163 if (!clones[priority])
37164 clones[priority] = ele;
37165 }
37166
37167 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37168 if (clones[ix])
37169 {
37170 if (TARGET_DEBUG_TARGET)
37171 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37172 ix, get_decl_name (clones[ix]));
37173
37174 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37175 *empty_bb);
37176 }
37177
37178 return 0;
37179 }
37180
37181 /* Generate the dispatching code body to dispatch multi-versioned function
37182 DECL. The target hook is called to process the "target" attributes and
37183 provide the code to dispatch the right function at run-time. NODE points
37184 to the dispatcher decl whose body will be created. */
37185
37186 static tree
37187 rs6000_generate_version_dispatcher_body (void *node_p)
37188 {
37189 tree resolver;
37190 basic_block empty_bb;
37191 struct cgraph_node *node = (cgraph_node *) node_p;
37192 struct cgraph_function_version_info *ninfo = node->function_version ();
37193
37194 if (ninfo->dispatcher_resolver)
37195 return ninfo->dispatcher_resolver;
37196
37197 /* node is going to be an alias, so remove the finalized bit. */
37198 node->definition = false;
37199
37200 /* The first version in the chain corresponds to the default version. */
37201 ninfo->dispatcher_resolver = resolver
37202 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37203
37204 if (TARGET_DEBUG_TARGET)
37205 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37206 get_decl_name (resolver));
37207
37208 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37209 auto_vec<tree, 2> fn_ver_vec;
37210
37211 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37212 vinfo;
37213 vinfo = vinfo->next)
37214 {
37215 struct cgraph_node *version = vinfo->this_node;
37216 /* Check for virtual functions here again, as by this time it should
37217 have been determined if this function needs a vtable index or
37218 not. This happens for methods in derived classes that override
37219 virtual methods in base classes but are not explicitly marked as
37220 virtual. */
37221 if (DECL_VINDEX (version->decl))
37222 sorry ("Virtual function multiversioning not supported");
37223
37224 fn_ver_vec.safe_push (version->decl);
37225 }
37226
37227 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37228 cgraph_edge::rebuild_edges ();
37229 pop_cfun ();
37230 return resolver;
37231 }
37232
37233 \f
37234 /* Hook to determine if one function can safely inline another. */
37235
37236 static bool
37237 rs6000_can_inline_p (tree caller, tree callee)
37238 {
37239 bool ret = false;
37240 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37241 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37242
37243 /* If callee has no option attributes, then it is ok to inline. */
37244 if (!callee_tree)
37245 ret = true;
37246
37247 /* If caller has no option attributes, but callee does then it is not ok to
37248 inline. */
37249 else if (!caller_tree)
37250 ret = false;
37251
37252 else
37253 {
37254 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37255 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37256
37257 /* Callee's options should a subset of the caller's, i.e. a vsx function
37258 can inline an altivec function but a non-vsx function can't inline a
37259 vsx function. */
37260 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37261 == callee_opts->x_rs6000_isa_flags)
37262 ret = true;
37263 }
37264
37265 if (TARGET_DEBUG_TARGET)
37266 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37267 get_decl_name (caller), get_decl_name (callee),
37268 (ret ? "can" : "cannot"));
37269
37270 return ret;
37271 }
37272 \f
37273 /* Allocate a stack temp and fixup the address so it meets the particular
37274 memory requirements (either offetable or REG+REG addressing). */
37275
37276 rtx
37277 rs6000_allocate_stack_temp (machine_mode mode,
37278 bool offsettable_p,
37279 bool reg_reg_p)
37280 {
37281 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37282 rtx addr = XEXP (stack, 0);
37283 int strict_p = reload_completed;
37284
37285 if (!legitimate_indirect_address_p (addr, strict_p))
37286 {
37287 if (offsettable_p
37288 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37289 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37290
37291 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37292 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37293 }
37294
37295 return stack;
37296 }
37297
37298 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37299 convert to such a form to deal with memory reference instructions
37300 like STFIWX and LDBRX that only take reg+reg addressing. */
37301
37302 rtx
37303 rs6000_force_indexed_or_indirect_mem (rtx x)
37304 {
37305 machine_mode mode = GET_MODE (x);
37306
37307 gcc_assert (MEM_P (x));
37308 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37309 {
37310 rtx addr = XEXP (x, 0);
37311 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37312 {
37313 rtx reg = XEXP (addr, 0);
37314 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37315 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37316 gcc_assert (REG_P (reg));
37317 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37318 addr = reg;
37319 }
37320 else if (GET_CODE (addr) == PRE_MODIFY)
37321 {
37322 rtx reg = XEXP (addr, 0);
37323 rtx expr = XEXP (addr, 1);
37324 gcc_assert (REG_P (reg));
37325 gcc_assert (GET_CODE (expr) == PLUS);
37326 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37327 addr = reg;
37328 }
37329
37330 x = replace_equiv_address (x, force_reg (Pmode, addr));
37331 }
37332
37333 return x;
37334 }
37335
37336 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37337
37338 On the RS/6000, all integer constants are acceptable, most won't be valid
37339 for particular insns, though. Only easy FP constants are acceptable. */
37340
37341 static bool
37342 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37343 {
37344 if (TARGET_ELF && tls_referenced_p (x))
37345 return false;
37346
37347 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37348 || GET_MODE (x) == VOIDmode
37349 || (TARGET_POWERPC64 && mode == DImode)
37350 || easy_fp_constant (x, mode)
37351 || easy_vector_constant (x, mode));
37352 }
37353
37354 \f
37355 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37356
37357 static bool
37358 chain_already_loaded (rtx_insn *last)
37359 {
37360 for (; last != NULL; last = PREV_INSN (last))
37361 {
37362 if (NONJUMP_INSN_P (last))
37363 {
37364 rtx patt = PATTERN (last);
37365
37366 if (GET_CODE (patt) == SET)
37367 {
37368 rtx lhs = XEXP (patt, 0);
37369
37370 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37371 return true;
37372 }
37373 }
37374 }
37375 return false;
37376 }
37377
37378 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37379
37380 void
37381 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37382 {
37383 const bool direct_call_p
37384 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37385 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37386 rtx toc_load = NULL_RTX;
37387 rtx toc_restore = NULL_RTX;
37388 rtx func_addr;
37389 rtx abi_reg = NULL_RTX;
37390 rtx call[4];
37391 int n_call;
37392 rtx insn;
37393
37394 /* Handle longcall attributes. */
37395 if (INTVAL (cookie) & CALL_LONG)
37396 func_desc = rs6000_longcall_ref (func_desc);
37397
37398 /* Handle indirect calls. */
37399 if (GET_CODE (func_desc) != SYMBOL_REF
37400 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37401 {
37402 /* Save the TOC into its reserved slot before the call,
37403 and prepare to restore it after the call. */
37404 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37405 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37406 rtx stack_toc_mem = gen_frame_mem (Pmode,
37407 gen_rtx_PLUS (Pmode, stack_ptr,
37408 stack_toc_offset));
37409 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37410 gen_rtvec (1, stack_toc_offset),
37411 UNSPEC_TOCSLOT);
37412 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37413
37414 /* Can we optimize saving the TOC in the prologue or
37415 do we need to do it at every call? */
37416 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37417 cfun->machine->save_toc_in_prologue = true;
37418 else
37419 {
37420 MEM_VOLATILE_P (stack_toc_mem) = 1;
37421 emit_move_insn (stack_toc_mem, toc_reg);
37422 }
37423
37424 if (DEFAULT_ABI == ABI_ELFv2)
37425 {
37426 /* A function pointer in the ELFv2 ABI is just a plain address, but
37427 the ABI requires it to be loaded into r12 before the call. */
37428 func_addr = gen_rtx_REG (Pmode, 12);
37429 emit_move_insn (func_addr, func_desc);
37430 abi_reg = func_addr;
37431 }
37432 else
37433 {
37434 /* A function pointer under AIX is a pointer to a data area whose
37435 first word contains the actual address of the function, whose
37436 second word contains a pointer to its TOC, and whose third word
37437 contains a value to place in the static chain register (r11).
37438 Note that if we load the static chain, our "trampoline" need
37439 not have any executable code. */
37440
37441 /* Load up address of the actual function. */
37442 func_desc = force_reg (Pmode, func_desc);
37443 func_addr = gen_reg_rtx (Pmode);
37444 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37445
37446 /* Prepare to load the TOC of the called function. Note that the
37447 TOC load must happen immediately before the actual call so
37448 that unwinding the TOC registers works correctly. See the
37449 comment in frob_update_context. */
37450 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37451 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37452 gen_rtx_PLUS (Pmode, func_desc,
37453 func_toc_offset));
37454 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37455
37456 /* If we have a static chain, load it up. But, if the call was
37457 originally direct, the 3rd word has not been written since no
37458 trampoline has been built, so we ought not to load it, lest we
37459 override a static chain value. */
37460 if (!direct_call_p
37461 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37462 && !chain_already_loaded (get_current_sequence ()->next->last))
37463 {
37464 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37465 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37466 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37467 gen_rtx_PLUS (Pmode, func_desc,
37468 func_sc_offset));
37469 emit_move_insn (sc_reg, func_sc_mem);
37470 abi_reg = sc_reg;
37471 }
37472 }
37473 }
37474 else
37475 {
37476 /* Direct calls use the TOC: for local calls, the callee will
37477 assume the TOC register is set; for non-local calls, the
37478 PLT stub needs the TOC register. */
37479 abi_reg = toc_reg;
37480 func_addr = func_desc;
37481 }
37482
37483 /* Create the call. */
37484 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37485 if (value != NULL_RTX)
37486 call[0] = gen_rtx_SET (value, call[0]);
37487 n_call = 1;
37488
37489 if (toc_load)
37490 call[n_call++] = toc_load;
37491 if (toc_restore)
37492 call[n_call++] = toc_restore;
37493
37494 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37495
37496 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37497 insn = emit_call_insn (insn);
37498
37499 /* Mention all registers defined by the ABI to hold information
37500 as uses in CALL_INSN_FUNCTION_USAGE. */
37501 if (abi_reg)
37502 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37503 }
37504
37505 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37506
37507 void
37508 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37509 {
37510 rtx call[2];
37511 rtx insn;
37512
37513 gcc_assert (INTVAL (cookie) == 0);
37514
37515 /* Create the call. */
37516 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37517 if (value != NULL_RTX)
37518 call[0] = gen_rtx_SET (value, call[0]);
37519
37520 call[1] = simple_return_rtx;
37521
37522 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37523 insn = emit_call_insn (insn);
37524
37525 /* Note use of the TOC register. */
37526 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37527 }
37528
37529 /* Return whether we need to always update the saved TOC pointer when we update
37530 the stack pointer. */
37531
37532 static bool
37533 rs6000_save_toc_in_prologue_p (void)
37534 {
37535 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37536 }
37537
37538 #ifdef HAVE_GAS_HIDDEN
37539 # define USE_HIDDEN_LINKONCE 1
37540 #else
37541 # define USE_HIDDEN_LINKONCE 0
37542 #endif
37543
37544 /* Fills in the label name that should be used for a 476 link stack thunk. */
37545
37546 void
37547 get_ppc476_thunk_name (char name[32])
37548 {
37549 gcc_assert (TARGET_LINK_STACK);
37550
37551 if (USE_HIDDEN_LINKONCE)
37552 sprintf (name, "__ppc476.get_thunk");
37553 else
37554 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37555 }
37556
37557 /* This function emits the simple thunk routine that is used to preserve
37558 the link stack on the 476 cpu. */
37559
37560 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37561 static void
37562 rs6000_code_end (void)
37563 {
37564 char name[32];
37565 tree decl;
37566
37567 if (!TARGET_LINK_STACK)
37568 return;
37569
37570 get_ppc476_thunk_name (name);
37571
37572 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37573 build_function_type_list (void_type_node, NULL_TREE));
37574 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37575 NULL_TREE, void_type_node);
37576 TREE_PUBLIC (decl) = 1;
37577 TREE_STATIC (decl) = 1;
37578
37579 #if RS6000_WEAK
37580 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37581 {
37582 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37583 targetm.asm_out.unique_section (decl, 0);
37584 switch_to_section (get_named_section (decl, NULL, 0));
37585 DECL_WEAK (decl) = 1;
37586 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37587 targetm.asm_out.globalize_label (asm_out_file, name);
37588 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37589 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37590 }
37591 else
37592 #endif
37593 {
37594 switch_to_section (text_section);
37595 ASM_OUTPUT_LABEL (asm_out_file, name);
37596 }
37597
37598 DECL_INITIAL (decl) = make_node (BLOCK);
37599 current_function_decl = decl;
37600 allocate_struct_function (decl, false);
37601 init_function_start (decl);
37602 first_function_block_is_cold = false;
37603 /* Make sure unwind info is emitted for the thunk if needed. */
37604 final_start_function (emit_barrier (), asm_out_file, 1);
37605
37606 fputs ("\tblr\n", asm_out_file);
37607
37608 final_end_function ();
37609 init_insn_lengths ();
37610 free_after_compilation (cfun);
37611 set_cfun (NULL);
37612 current_function_decl = NULL;
37613 }
37614
37615 /* Add r30 to hard reg set if the prologue sets it up and it is not
37616 pic_offset_table_rtx. */
37617
37618 static void
37619 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37620 {
37621 if (!TARGET_SINGLE_PIC_BASE
37622 && TARGET_TOC
37623 && TARGET_MINIMAL_TOC
37624 && !constant_pool_empty_p ())
37625 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37626 if (cfun->machine->split_stack_argp_used)
37627 add_to_hard_reg_set (&set->set, Pmode, 12);
37628
37629 /* Make sure the hard reg set doesn't include r2, which was possibly added
37630 via PIC_OFFSET_TABLE_REGNUM. */
37631 if (TARGET_TOC)
37632 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
37633 }
37634
37635 \f
37636 /* Helper function for rs6000_split_logical to emit a logical instruction after
37637 spliting the operation to single GPR registers.
37638
37639 DEST is the destination register.
37640 OP1 and OP2 are the input source registers.
37641 CODE is the base operation (AND, IOR, XOR, NOT).
37642 MODE is the machine mode.
37643 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37644 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37645 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37646
37647 static void
37648 rs6000_split_logical_inner (rtx dest,
37649 rtx op1,
37650 rtx op2,
37651 enum rtx_code code,
37652 machine_mode mode,
37653 bool complement_final_p,
37654 bool complement_op1_p,
37655 bool complement_op2_p)
37656 {
37657 rtx bool_rtx;
37658
37659 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37660 if (op2 && GET_CODE (op2) == CONST_INT
37661 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37662 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37663 {
37664 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37665 HOST_WIDE_INT value = INTVAL (op2) & mask;
37666
37667 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37668 if (code == AND)
37669 {
37670 if (value == 0)
37671 {
37672 emit_insn (gen_rtx_SET (dest, const0_rtx));
37673 return;
37674 }
37675
37676 else if (value == mask)
37677 {
37678 if (!rtx_equal_p (dest, op1))
37679 emit_insn (gen_rtx_SET (dest, op1));
37680 return;
37681 }
37682 }
37683
37684 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37685 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37686 else if (code == IOR || code == XOR)
37687 {
37688 if (value == 0)
37689 {
37690 if (!rtx_equal_p (dest, op1))
37691 emit_insn (gen_rtx_SET (dest, op1));
37692 return;
37693 }
37694 }
37695 }
37696
37697 if (code == AND && mode == SImode
37698 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37699 {
37700 emit_insn (gen_andsi3 (dest, op1, op2));
37701 return;
37702 }
37703
37704 if (complement_op1_p)
37705 op1 = gen_rtx_NOT (mode, op1);
37706
37707 if (complement_op2_p)
37708 op2 = gen_rtx_NOT (mode, op2);
37709
37710 /* For canonical RTL, if only one arm is inverted it is the first. */
37711 if (!complement_op1_p && complement_op2_p)
37712 std::swap (op1, op2);
37713
37714 bool_rtx = ((code == NOT)
37715 ? gen_rtx_NOT (mode, op1)
37716 : gen_rtx_fmt_ee (code, mode, op1, op2));
37717
37718 if (complement_final_p)
37719 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37720
37721 emit_insn (gen_rtx_SET (dest, bool_rtx));
37722 }
37723
37724 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37725 operations are split immediately during RTL generation to allow for more
37726 optimizations of the AND/IOR/XOR.
37727
37728 OPERANDS is an array containing the destination and two input operands.
37729 CODE is the base operation (AND, IOR, XOR, NOT).
37730 MODE is the machine mode.
37731 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37732 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37733 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37734 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37735 formation of the AND instructions. */
37736
37737 static void
37738 rs6000_split_logical_di (rtx operands[3],
37739 enum rtx_code code,
37740 bool complement_final_p,
37741 bool complement_op1_p,
37742 bool complement_op2_p)
37743 {
37744 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37745 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37746 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37747 enum hi_lo { hi = 0, lo = 1 };
37748 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37749 size_t i;
37750
37751 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37752 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37753 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37754 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37755
37756 if (code == NOT)
37757 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37758 else
37759 {
37760 if (GET_CODE (operands[2]) != CONST_INT)
37761 {
37762 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37763 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37764 }
37765 else
37766 {
37767 HOST_WIDE_INT value = INTVAL (operands[2]);
37768 HOST_WIDE_INT value_hi_lo[2];
37769
37770 gcc_assert (!complement_final_p);
37771 gcc_assert (!complement_op1_p);
37772 gcc_assert (!complement_op2_p);
37773
37774 value_hi_lo[hi] = value >> 32;
37775 value_hi_lo[lo] = value & lower_32bits;
37776
37777 for (i = 0; i < 2; i++)
37778 {
37779 HOST_WIDE_INT sub_value = value_hi_lo[i];
37780
37781 if (sub_value & sign_bit)
37782 sub_value |= upper_32bits;
37783
37784 op2_hi_lo[i] = GEN_INT (sub_value);
37785
37786 /* If this is an AND instruction, check to see if we need to load
37787 the value in a register. */
37788 if (code == AND && sub_value != -1 && sub_value != 0
37789 && !and_operand (op2_hi_lo[i], SImode))
37790 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37791 }
37792 }
37793 }
37794
37795 for (i = 0; i < 2; i++)
37796 {
37797 /* Split large IOR/XOR operations. */
37798 if ((code == IOR || code == XOR)
37799 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37800 && !complement_final_p
37801 && !complement_op1_p
37802 && !complement_op2_p
37803 && !logical_const_operand (op2_hi_lo[i], SImode))
37804 {
37805 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37806 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37807 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37808 rtx tmp = gen_reg_rtx (SImode);
37809
37810 /* Make sure the constant is sign extended. */
37811 if ((hi_16bits & sign_bit) != 0)
37812 hi_16bits |= upper_32bits;
37813
37814 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37815 code, SImode, false, false, false);
37816
37817 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37818 code, SImode, false, false, false);
37819 }
37820 else
37821 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37822 code, SImode, complement_final_p,
37823 complement_op1_p, complement_op2_p);
37824 }
37825
37826 return;
37827 }
37828
37829 /* Split the insns that make up boolean operations operating on multiple GPR
37830 registers. The boolean MD patterns ensure that the inputs either are
37831 exactly the same as the output registers, or there is no overlap.
37832
37833 OPERANDS is an array containing the destination and two input operands.
37834 CODE is the base operation (AND, IOR, XOR, NOT).
37835 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37836 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37837 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37838
37839 void
37840 rs6000_split_logical (rtx operands[3],
37841 enum rtx_code code,
37842 bool complement_final_p,
37843 bool complement_op1_p,
37844 bool complement_op2_p)
37845 {
37846 machine_mode mode = GET_MODE (operands[0]);
37847 machine_mode sub_mode;
37848 rtx op0, op1, op2;
37849 int sub_size, regno0, regno1, nregs, i;
37850
37851 /* If this is DImode, use the specialized version that can run before
37852 register allocation. */
37853 if (mode == DImode && !TARGET_POWERPC64)
37854 {
37855 rs6000_split_logical_di (operands, code, complement_final_p,
37856 complement_op1_p, complement_op2_p);
37857 return;
37858 }
37859
37860 op0 = operands[0];
37861 op1 = operands[1];
37862 op2 = (code == NOT) ? NULL_RTX : operands[2];
37863 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
37864 sub_size = GET_MODE_SIZE (sub_mode);
37865 regno0 = REGNO (op0);
37866 regno1 = REGNO (op1);
37867
37868 gcc_assert (reload_completed);
37869 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37870 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37871
37872 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
37873 gcc_assert (nregs > 1);
37874
37875 if (op2 && REG_P (op2))
37876 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
37877
37878 for (i = 0; i < nregs; i++)
37879 {
37880 int offset = i * sub_size;
37881 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
37882 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
37883 rtx sub_op2 = ((code == NOT)
37884 ? NULL_RTX
37885 : simplify_subreg (sub_mode, op2, mode, offset));
37886
37887 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
37888 complement_final_p, complement_op1_p,
37889 complement_op2_p);
37890 }
37891
37892 return;
37893 }
37894
37895 \f
37896 /* Return true if the peephole2 can combine a load involving a combination of
37897 an addis instruction and a load with an offset that can be fused together on
37898 a power8. */
37899
37900 bool
37901 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
37902 rtx addis_value, /* addis value. */
37903 rtx target, /* target register that is loaded. */
37904 rtx mem) /* bottom part of the memory addr. */
37905 {
37906 rtx addr;
37907 rtx base_reg;
37908
37909 /* Validate arguments. */
37910 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37911 return false;
37912
37913 if (!base_reg_operand (target, GET_MODE (target)))
37914 return false;
37915
37916 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37917 return false;
37918
37919 /* Allow sign/zero extension. */
37920 if (GET_CODE (mem) == ZERO_EXTEND
37921 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
37922 mem = XEXP (mem, 0);
37923
37924 if (!MEM_P (mem))
37925 return false;
37926
37927 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
37928 return false;
37929
37930 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37931 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
37932 return false;
37933
37934 /* Validate that the register used to load the high value is either the
37935 register being loaded, or we can safely replace its use.
37936
37937 This function is only called from the peephole2 pass and we assume that
37938 there are 2 instructions in the peephole (addis and load), so we want to
37939 check if the target register was not used in the memory address and the
37940 register to hold the addis result is dead after the peephole. */
37941 if (REGNO (addis_reg) != REGNO (target))
37942 {
37943 if (reg_mentioned_p (target, mem))
37944 return false;
37945
37946 if (!peep2_reg_dead_p (2, addis_reg))
37947 return false;
37948
37949 /* If the target register being loaded is the stack pointer, we must
37950 avoid loading any other value into it, even temporarily. */
37951 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
37952 return false;
37953 }
37954
37955 base_reg = XEXP (addr, 0);
37956 return REGNO (addis_reg) == REGNO (base_reg);
37957 }
37958
37959 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37960 sequence. We adjust the addis register to use the target register. If the
37961 load sign extends, we adjust the code to do the zero extending load, and an
37962 explicit sign extension later since the fusion only covers zero extending
37963 loads.
37964
37965 The operands are:
37966 operands[0] register set with addis (to be replaced with target)
37967 operands[1] value set via addis
37968 operands[2] target register being loaded
37969 operands[3] D-form memory reference using operands[0]. */
37970
37971 void
37972 expand_fusion_gpr_load (rtx *operands)
37973 {
37974 rtx addis_value = operands[1];
37975 rtx target = operands[2];
37976 rtx orig_mem = operands[3];
37977 rtx new_addr, new_mem, orig_addr, offset;
37978 enum rtx_code plus_or_lo_sum;
37979 machine_mode target_mode = GET_MODE (target);
37980 machine_mode extend_mode = target_mode;
37981 machine_mode ptr_mode = Pmode;
37982 enum rtx_code extend = UNKNOWN;
37983
37984 if (GET_CODE (orig_mem) == ZERO_EXTEND
37985 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
37986 {
37987 extend = GET_CODE (orig_mem);
37988 orig_mem = XEXP (orig_mem, 0);
37989 target_mode = GET_MODE (orig_mem);
37990 }
37991
37992 gcc_assert (MEM_P (orig_mem));
37993
37994 orig_addr = XEXP (orig_mem, 0);
37995 plus_or_lo_sum = GET_CODE (orig_addr);
37996 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37997
37998 offset = XEXP (orig_addr, 1);
37999 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38000 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38001
38002 if (extend != UNKNOWN)
38003 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38004
38005 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38006 UNSPEC_FUSION_GPR);
38007 emit_insn (gen_rtx_SET (target, new_mem));
38008
38009 if (extend == SIGN_EXTEND)
38010 {
38011 int sub_off = ((BYTES_BIG_ENDIAN)
38012 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38013 : 0);
38014 rtx sign_reg
38015 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38016
38017 emit_insn (gen_rtx_SET (target,
38018 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38019 }
38020
38021 return;
38022 }
38023
38024 /* Emit the addis instruction that will be part of a fused instruction
38025 sequence. */
38026
38027 void
38028 emit_fusion_addis (rtx target, rtx addis_value)
38029 {
38030 rtx fuse_ops[10];
38031 const char *addis_str = NULL;
38032
38033 /* Emit the addis instruction. */
38034 fuse_ops[0] = target;
38035 if (satisfies_constraint_L (addis_value))
38036 {
38037 fuse_ops[1] = addis_value;
38038 addis_str = "lis %0,%v1";
38039 }
38040
38041 else if (GET_CODE (addis_value) == PLUS)
38042 {
38043 rtx op0 = XEXP (addis_value, 0);
38044 rtx op1 = XEXP (addis_value, 1);
38045
38046 if (REG_P (op0) && CONST_INT_P (op1)
38047 && satisfies_constraint_L (op1))
38048 {
38049 fuse_ops[1] = op0;
38050 fuse_ops[2] = op1;
38051 addis_str = "addis %0,%1,%v2";
38052 }
38053 }
38054
38055 else if (GET_CODE (addis_value) == HIGH)
38056 {
38057 rtx value = XEXP (addis_value, 0);
38058 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38059 {
38060 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38061 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38062 if (TARGET_ELF)
38063 addis_str = "addis %0,%2,%1@toc@ha";
38064
38065 else if (TARGET_XCOFF)
38066 addis_str = "addis %0,%1@u(%2)";
38067
38068 else
38069 gcc_unreachable ();
38070 }
38071
38072 else if (GET_CODE (value) == PLUS)
38073 {
38074 rtx op0 = XEXP (value, 0);
38075 rtx op1 = XEXP (value, 1);
38076
38077 if (GET_CODE (op0) == UNSPEC
38078 && XINT (op0, 1) == UNSPEC_TOCREL
38079 && CONST_INT_P (op1))
38080 {
38081 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38082 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38083 fuse_ops[3] = op1;
38084 if (TARGET_ELF)
38085 addis_str = "addis %0,%2,%1+%3@toc@ha";
38086
38087 else if (TARGET_XCOFF)
38088 addis_str = "addis %0,%1+%3@u(%2)";
38089
38090 else
38091 gcc_unreachable ();
38092 }
38093 }
38094
38095 else if (satisfies_constraint_L (value))
38096 {
38097 fuse_ops[1] = value;
38098 addis_str = "lis %0,%v1";
38099 }
38100
38101 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38102 {
38103 fuse_ops[1] = value;
38104 addis_str = "lis %0,%1@ha";
38105 }
38106 }
38107
38108 if (!addis_str)
38109 fatal_insn ("Could not generate addis value for fusion", addis_value);
38110
38111 output_asm_insn (addis_str, fuse_ops);
38112 }
38113
38114 /* Emit a D-form load or store instruction that is the second instruction
38115 of a fusion sequence. */
38116
38117 void
38118 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38119 const char *insn_str)
38120 {
38121 rtx fuse_ops[10];
38122 char insn_template[80];
38123
38124 fuse_ops[0] = load_store_reg;
38125 fuse_ops[1] = addis_reg;
38126
38127 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38128 {
38129 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38130 fuse_ops[2] = offset;
38131 output_asm_insn (insn_template, fuse_ops);
38132 }
38133
38134 else if (GET_CODE (offset) == UNSPEC
38135 && XINT (offset, 1) == UNSPEC_TOCREL)
38136 {
38137 if (TARGET_ELF)
38138 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38139
38140 else if (TARGET_XCOFF)
38141 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38142
38143 else
38144 gcc_unreachable ();
38145
38146 fuse_ops[2] = XVECEXP (offset, 0, 0);
38147 output_asm_insn (insn_template, fuse_ops);
38148 }
38149
38150 else if (GET_CODE (offset) == PLUS
38151 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38152 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38153 && CONST_INT_P (XEXP (offset, 1)))
38154 {
38155 rtx tocrel_unspec = XEXP (offset, 0);
38156 if (TARGET_ELF)
38157 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38158
38159 else if (TARGET_XCOFF)
38160 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38161
38162 else
38163 gcc_unreachable ();
38164
38165 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38166 fuse_ops[3] = XEXP (offset, 1);
38167 output_asm_insn (insn_template, fuse_ops);
38168 }
38169
38170 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38171 {
38172 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38173
38174 fuse_ops[2] = offset;
38175 output_asm_insn (insn_template, fuse_ops);
38176 }
38177
38178 else
38179 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38180
38181 return;
38182 }
38183
38184 /* Given an address, convert it into the addis and load offset parts. Addresses
38185 created during the peephole2 process look like:
38186 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38187 (unspec [(...)] UNSPEC_TOCREL)) */
38188
38189 static void
38190 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38191 {
38192 rtx hi, lo;
38193
38194 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38195 {
38196 hi = XEXP (addr, 0);
38197 lo = XEXP (addr, 1);
38198 }
38199 else
38200 gcc_unreachable ();
38201
38202 *p_hi = hi;
38203 *p_lo = lo;
38204 }
38205
38206 /* Return a string to fuse an addis instruction with a gpr load to the same
38207 register that we loaded up the addis instruction. The address that is used
38208 is the logical address that was formed during peephole2:
38209 (lo_sum (high) (low-part))
38210
38211 The code is complicated, so we call output_asm_insn directly, and just
38212 return "". */
38213
38214 const char *
38215 emit_fusion_gpr_load (rtx target, rtx mem)
38216 {
38217 rtx addis_value;
38218 rtx addr;
38219 rtx load_offset;
38220 const char *load_str = NULL;
38221 machine_mode mode;
38222
38223 if (GET_CODE (mem) == ZERO_EXTEND)
38224 mem = XEXP (mem, 0);
38225
38226 gcc_assert (REG_P (target) && MEM_P (mem));
38227
38228 addr = XEXP (mem, 0);
38229 fusion_split_address (addr, &addis_value, &load_offset);
38230
38231 /* Now emit the load instruction to the same register. */
38232 mode = GET_MODE (mem);
38233 switch (mode)
38234 {
38235 case E_QImode:
38236 load_str = "lbz";
38237 break;
38238
38239 case E_HImode:
38240 load_str = "lhz";
38241 break;
38242
38243 case E_SImode:
38244 case E_SFmode:
38245 load_str = "lwz";
38246 break;
38247
38248 case E_DImode:
38249 case E_DFmode:
38250 gcc_assert (TARGET_POWERPC64);
38251 load_str = "ld";
38252 break;
38253
38254 default:
38255 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38256 }
38257
38258 /* Emit the addis instruction. */
38259 emit_fusion_addis (target, addis_value);
38260
38261 /* Emit the D-form load instruction. */
38262 emit_fusion_load_store (target, target, load_offset, load_str);
38263
38264 return "";
38265 }
38266 \f
38267
38268 /* Return true if the peephole2 can combine a load/store involving a
38269 combination of an addis instruction and the memory operation. This was
38270 added to the ISA 3.0 (power9) hardware. */
38271
38272 bool
38273 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38274 rtx addis_value, /* addis value. */
38275 rtx dest, /* destination (memory or register). */
38276 rtx src) /* source (register or memory). */
38277 {
38278 rtx addr, mem, offset;
38279 machine_mode mode = GET_MODE (src);
38280
38281 /* Validate arguments. */
38282 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38283 return false;
38284
38285 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38286 return false;
38287
38288 /* Ignore extend operations that are part of the load. */
38289 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38290 src = XEXP (src, 0);
38291
38292 /* Test for memory<-register or register<-memory. */
38293 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38294 {
38295 if (!MEM_P (dest))
38296 return false;
38297
38298 mem = dest;
38299 }
38300
38301 else if (MEM_P (src))
38302 {
38303 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38304 return false;
38305
38306 mem = src;
38307 }
38308
38309 else
38310 return false;
38311
38312 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38313 if (GET_CODE (addr) == PLUS)
38314 {
38315 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38316 return false;
38317
38318 return satisfies_constraint_I (XEXP (addr, 1));
38319 }
38320
38321 else if (GET_CODE (addr) == LO_SUM)
38322 {
38323 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38324 return false;
38325
38326 offset = XEXP (addr, 1);
38327 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38328 return small_toc_ref (offset, GET_MODE (offset));
38329
38330 else if (TARGET_ELF && !TARGET_POWERPC64)
38331 return CONSTANT_P (offset);
38332 }
38333
38334 return false;
38335 }
38336
38337 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38338 load sequence.
38339
38340 The operands are:
38341 operands[0] register set with addis
38342 operands[1] value set via addis
38343 operands[2] target register being loaded
38344 operands[3] D-form memory reference using operands[0].
38345
38346 This is similar to the fusion introduced with power8, except it scales to
38347 both loads/stores and does not require the result register to be the same as
38348 the base register. At the moment, we only do this if register set with addis
38349 is dead. */
38350
38351 void
38352 expand_fusion_p9_load (rtx *operands)
38353 {
38354 rtx tmp_reg = operands[0];
38355 rtx addis_value = operands[1];
38356 rtx target = operands[2];
38357 rtx orig_mem = operands[3];
38358 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38359 enum rtx_code plus_or_lo_sum;
38360 machine_mode target_mode = GET_MODE (target);
38361 machine_mode extend_mode = target_mode;
38362 machine_mode ptr_mode = Pmode;
38363 enum rtx_code extend = UNKNOWN;
38364
38365 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38366 {
38367 extend = GET_CODE (orig_mem);
38368 orig_mem = XEXP (orig_mem, 0);
38369 target_mode = GET_MODE (orig_mem);
38370 }
38371
38372 gcc_assert (MEM_P (orig_mem));
38373
38374 orig_addr = XEXP (orig_mem, 0);
38375 plus_or_lo_sum = GET_CODE (orig_addr);
38376 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38377
38378 offset = XEXP (orig_addr, 1);
38379 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38380 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38381
38382 if (extend != UNKNOWN)
38383 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38384
38385 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38386 UNSPEC_FUSION_P9);
38387
38388 set = gen_rtx_SET (target, new_mem);
38389 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38390 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38391 emit_insn (insn);
38392
38393 return;
38394 }
38395
38396 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38397 store sequence.
38398
38399 The operands are:
38400 operands[0] register set with addis
38401 operands[1] value set via addis
38402 operands[2] target D-form memory being stored to
38403 operands[3] register being stored
38404
38405 This is similar to the fusion introduced with power8, except it scales to
38406 both loads/stores and does not require the result register to be the same as
38407 the base register. At the moment, we only do this if register set with addis
38408 is dead. */
38409
38410 void
38411 expand_fusion_p9_store (rtx *operands)
38412 {
38413 rtx tmp_reg = operands[0];
38414 rtx addis_value = operands[1];
38415 rtx orig_mem = operands[2];
38416 rtx src = operands[3];
38417 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38418 enum rtx_code plus_or_lo_sum;
38419 machine_mode target_mode = GET_MODE (orig_mem);
38420 machine_mode ptr_mode = Pmode;
38421
38422 gcc_assert (MEM_P (orig_mem));
38423
38424 orig_addr = XEXP (orig_mem, 0);
38425 plus_or_lo_sum = GET_CODE (orig_addr);
38426 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38427
38428 offset = XEXP (orig_addr, 1);
38429 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38430 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38431
38432 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38433 UNSPEC_FUSION_P9);
38434
38435 set = gen_rtx_SET (new_mem, new_src);
38436 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38437 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38438 emit_insn (insn);
38439
38440 return;
38441 }
38442
38443 /* Return a string to fuse an addis instruction with a load using extended
38444 fusion. The address that is used is the logical address that was formed
38445 during peephole2: (lo_sum (high) (low-part))
38446
38447 The code is complicated, so we call output_asm_insn directly, and just
38448 return "". */
38449
38450 const char *
38451 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38452 {
38453 machine_mode mode = GET_MODE (reg);
38454 rtx hi;
38455 rtx lo;
38456 rtx addr;
38457 const char *load_string;
38458 int r;
38459
38460 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38461 {
38462 mem = XEXP (mem, 0);
38463 mode = GET_MODE (mem);
38464 }
38465
38466 if (GET_CODE (reg) == SUBREG)
38467 {
38468 gcc_assert (SUBREG_BYTE (reg) == 0);
38469 reg = SUBREG_REG (reg);
38470 }
38471
38472 if (!REG_P (reg))
38473 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38474
38475 r = REGNO (reg);
38476 if (FP_REGNO_P (r))
38477 {
38478 if (mode == SFmode)
38479 load_string = "lfs";
38480 else if (mode == DFmode || mode == DImode)
38481 load_string = "lfd";
38482 else
38483 gcc_unreachable ();
38484 }
38485 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38486 {
38487 if (mode == SFmode)
38488 load_string = "lxssp";
38489 else if (mode == DFmode || mode == DImode)
38490 load_string = "lxsd";
38491 else
38492 gcc_unreachable ();
38493 }
38494 else if (INT_REGNO_P (r))
38495 {
38496 switch (mode)
38497 {
38498 case E_QImode:
38499 load_string = "lbz";
38500 break;
38501 case E_HImode:
38502 load_string = "lhz";
38503 break;
38504 case E_SImode:
38505 case E_SFmode:
38506 load_string = "lwz";
38507 break;
38508 case E_DImode:
38509 case E_DFmode:
38510 if (!TARGET_POWERPC64)
38511 gcc_unreachable ();
38512 load_string = "ld";
38513 break;
38514 default:
38515 gcc_unreachable ();
38516 }
38517 }
38518 else
38519 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38520
38521 if (!MEM_P (mem))
38522 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38523
38524 addr = XEXP (mem, 0);
38525 fusion_split_address (addr, &hi, &lo);
38526
38527 /* Emit the addis instruction. */
38528 emit_fusion_addis (tmp_reg, hi);
38529
38530 /* Emit the D-form load instruction. */
38531 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38532
38533 return "";
38534 }
38535
38536 /* Return a string to fuse an addis instruction with a store using extended
38537 fusion. The address that is used is the logical address that was formed
38538 during peephole2: (lo_sum (high) (low-part))
38539
38540 The code is complicated, so we call output_asm_insn directly, and just
38541 return "". */
38542
38543 const char *
38544 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38545 {
38546 machine_mode mode = GET_MODE (reg);
38547 rtx hi;
38548 rtx lo;
38549 rtx addr;
38550 const char *store_string;
38551 int r;
38552
38553 if (GET_CODE (reg) == SUBREG)
38554 {
38555 gcc_assert (SUBREG_BYTE (reg) == 0);
38556 reg = SUBREG_REG (reg);
38557 }
38558
38559 if (!REG_P (reg))
38560 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38561
38562 r = REGNO (reg);
38563 if (FP_REGNO_P (r))
38564 {
38565 if (mode == SFmode)
38566 store_string = "stfs";
38567 else if (mode == DFmode)
38568 store_string = "stfd";
38569 else
38570 gcc_unreachable ();
38571 }
38572 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38573 {
38574 if (mode == SFmode)
38575 store_string = "stxssp";
38576 else if (mode == DFmode || mode == DImode)
38577 store_string = "stxsd";
38578 else
38579 gcc_unreachable ();
38580 }
38581 else if (INT_REGNO_P (r))
38582 {
38583 switch (mode)
38584 {
38585 case E_QImode:
38586 store_string = "stb";
38587 break;
38588 case E_HImode:
38589 store_string = "sth";
38590 break;
38591 case E_SImode:
38592 case E_SFmode:
38593 store_string = "stw";
38594 break;
38595 case E_DImode:
38596 case E_DFmode:
38597 if (!TARGET_POWERPC64)
38598 gcc_unreachable ();
38599 store_string = "std";
38600 break;
38601 default:
38602 gcc_unreachable ();
38603 }
38604 }
38605 else
38606 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38607
38608 if (!MEM_P (mem))
38609 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38610
38611 addr = XEXP (mem, 0);
38612 fusion_split_address (addr, &hi, &lo);
38613
38614 /* Emit the addis instruction. */
38615 emit_fusion_addis (tmp_reg, hi);
38616
38617 /* Emit the D-form load instruction. */
38618 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38619
38620 return "";
38621 }
38622
38623 #ifdef RS6000_GLIBC_ATOMIC_FENV
38624 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38625 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38626 #endif
38627
38628 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38629
38630 static void
38631 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38632 {
38633 if (!TARGET_HARD_FLOAT)
38634 {
38635 #ifdef RS6000_GLIBC_ATOMIC_FENV
38636 if (atomic_hold_decl == NULL_TREE)
38637 {
38638 atomic_hold_decl
38639 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38640 get_identifier ("__atomic_feholdexcept"),
38641 build_function_type_list (void_type_node,
38642 double_ptr_type_node,
38643 NULL_TREE));
38644 TREE_PUBLIC (atomic_hold_decl) = 1;
38645 DECL_EXTERNAL (atomic_hold_decl) = 1;
38646 }
38647
38648 if (atomic_clear_decl == NULL_TREE)
38649 {
38650 atomic_clear_decl
38651 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38652 get_identifier ("__atomic_feclearexcept"),
38653 build_function_type_list (void_type_node,
38654 NULL_TREE));
38655 TREE_PUBLIC (atomic_clear_decl) = 1;
38656 DECL_EXTERNAL (atomic_clear_decl) = 1;
38657 }
38658
38659 tree const_double = build_qualified_type (double_type_node,
38660 TYPE_QUAL_CONST);
38661 tree const_double_ptr = build_pointer_type (const_double);
38662 if (atomic_update_decl == NULL_TREE)
38663 {
38664 atomic_update_decl
38665 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38666 get_identifier ("__atomic_feupdateenv"),
38667 build_function_type_list (void_type_node,
38668 const_double_ptr,
38669 NULL_TREE));
38670 TREE_PUBLIC (atomic_update_decl) = 1;
38671 DECL_EXTERNAL (atomic_update_decl) = 1;
38672 }
38673
38674 tree fenv_var = create_tmp_var_raw (double_type_node);
38675 TREE_ADDRESSABLE (fenv_var) = 1;
38676 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38677
38678 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38679 *clear = build_call_expr (atomic_clear_decl, 0);
38680 *update = build_call_expr (atomic_update_decl, 1,
38681 fold_convert (const_double_ptr, fenv_addr));
38682 #endif
38683 return;
38684 }
38685
38686 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38687 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38688 tree call_mffs = build_call_expr (mffs, 0);
38689
38690 /* Generates the equivalent of feholdexcept (&fenv_var)
38691
38692 *fenv_var = __builtin_mffs ();
38693 double fenv_hold;
38694 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38695 __builtin_mtfsf (0xff, fenv_hold); */
38696
38697 /* Mask to clear everything except for the rounding modes and non-IEEE
38698 arithmetic flag. */
38699 const unsigned HOST_WIDE_INT hold_exception_mask =
38700 HOST_WIDE_INT_C (0xffffffff00000007);
38701
38702 tree fenv_var = create_tmp_var_raw (double_type_node);
38703
38704 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38705
38706 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38707 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38708 build_int_cst (uint64_type_node,
38709 hold_exception_mask));
38710
38711 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38712 fenv_llu_and);
38713
38714 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38715 build_int_cst (unsigned_type_node, 0xff),
38716 fenv_hold_mtfsf);
38717
38718 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38719
38720 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38721
38722 double fenv_clear = __builtin_mffs ();
38723 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38724 __builtin_mtfsf (0xff, fenv_clear); */
38725
38726 /* Mask to clear everything except for the rounding modes and non-IEEE
38727 arithmetic flag. */
38728 const unsigned HOST_WIDE_INT clear_exception_mask =
38729 HOST_WIDE_INT_C (0xffffffff00000000);
38730
38731 tree fenv_clear = create_tmp_var_raw (double_type_node);
38732
38733 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38734
38735 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38736 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38737 fenv_clean_llu,
38738 build_int_cst (uint64_type_node,
38739 clear_exception_mask));
38740
38741 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38742 fenv_clear_llu_and);
38743
38744 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38745 build_int_cst (unsigned_type_node, 0xff),
38746 fenv_clear_mtfsf);
38747
38748 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38749
38750 /* Generates the equivalent of feupdateenv (&fenv_var)
38751
38752 double old_fenv = __builtin_mffs ();
38753 double fenv_update;
38754 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38755 (*(uint64_t*)fenv_var 0x1ff80fff);
38756 __builtin_mtfsf (0xff, fenv_update); */
38757
38758 const unsigned HOST_WIDE_INT update_exception_mask =
38759 HOST_WIDE_INT_C (0xffffffff1fffff00);
38760 const unsigned HOST_WIDE_INT new_exception_mask =
38761 HOST_WIDE_INT_C (0x1ff80fff);
38762
38763 tree old_fenv = create_tmp_var_raw (double_type_node);
38764 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38765
38766 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38767 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38768 build_int_cst (uint64_type_node,
38769 update_exception_mask));
38770
38771 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38772 build_int_cst (uint64_type_node,
38773 new_exception_mask));
38774
38775 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38776 old_llu_and, new_llu_and);
38777
38778 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38779 new_llu_mask);
38780
38781 tree update_mtfsf = build_call_expr (mtfsf, 2,
38782 build_int_cst (unsigned_type_node, 0xff),
38783 fenv_update_mtfsf);
38784
38785 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
38786 }
38787
38788 void
38789 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
38790 {
38791 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38792
38793 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38794 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38795
38796 /* The destination of the vmrgew instruction layout is:
38797 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38798 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38799 vmrgew instruction will be correct. */
38800 if (BYTES_BIG_ENDIAN)
38801 {
38802 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
38803 GEN_INT (0)));
38804 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
38805 GEN_INT (3)));
38806 }
38807 else
38808 {
38809 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
38810 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
38811 }
38812
38813 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38814 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38815
38816 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
38817 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
38818
38819 if (BYTES_BIG_ENDIAN)
38820 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38821 else
38822 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38823 }
38824
38825 void
38826 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
38827 {
38828 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38829
38830 rtx_tmp0 = gen_reg_rtx (V2DImode);
38831 rtx_tmp1 = gen_reg_rtx (V2DImode);
38832
38833 /* The destination of the vmrgew instruction layout is:
38834 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38835 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38836 vmrgew instruction will be correct. */
38837 if (BYTES_BIG_ENDIAN)
38838 {
38839 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
38840 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
38841 }
38842 else
38843 {
38844 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
38845 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
38846 }
38847
38848 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38849 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38850
38851 if (signed_convert)
38852 {
38853 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
38854 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
38855 }
38856 else
38857 {
38858 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
38859 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
38860 }
38861
38862 if (BYTES_BIG_ENDIAN)
38863 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38864 else
38865 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38866 }
38867
38868 void
38869 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
38870 rtx src2)
38871 {
38872 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38873
38874 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38875 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38876
38877 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
38878 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
38879
38880 rtx_tmp2 = gen_reg_rtx (V4SImode);
38881 rtx_tmp3 = gen_reg_rtx (V4SImode);
38882
38883 if (signed_convert)
38884 {
38885 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
38886 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
38887 }
38888 else
38889 {
38890 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
38891 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
38892 }
38893
38894 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
38895 }
38896
38897 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
38898
38899 static bool
38900 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
38901 optimization_type opt_type)
38902 {
38903 switch (op)
38904 {
38905 case rsqrt_optab:
38906 return (opt_type == OPTIMIZE_FOR_SPEED
38907 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
38908
38909 default:
38910 return true;
38911 }
38912 }
38913
38914 /* Implement TARGET_CONSTANT_ALIGNMENT. */
38915
38916 static HOST_WIDE_INT
38917 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
38918 {
38919 if (TREE_CODE (exp) == STRING_CST
38920 && (STRICT_ALIGNMENT || !optimize_size))
38921 return MAX (align, BITS_PER_WORD);
38922 return align;
38923 }
38924
38925 /* Implement TARGET_STARTING_FRAME_OFFSET. */
38926
38927 static HOST_WIDE_INT
38928 rs6000_starting_frame_offset (void)
38929 {
38930 if (FRAME_GROWS_DOWNWARD)
38931 return 0;
38932 return RS6000_STARTING_FRAME_OFFSET;
38933 }
38934 \f
38935
38936 /* Create an alias for a mangled name where we have changed the mangling (in
38937 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
38938 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
38939
38940 #if TARGET_ELF && RS6000_WEAK
38941 static void
38942 rs6000_globalize_decl_name (FILE * stream, tree decl)
38943 {
38944 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
38945
38946 targetm.asm_out.globalize_label (stream, name);
38947
38948 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
38949 {
38950 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
38951 const char *old_name;
38952
38953 ieee128_mangling_gcc_8_1 = true;
38954 lang_hooks.set_decl_assembler_name (decl);
38955 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
38956 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
38957 ieee128_mangling_gcc_8_1 = false;
38958
38959 if (strcmp (name, old_name) != 0)
38960 {
38961 fprintf (stream, "\t.weak %s\n", old_name);
38962 fprintf (stream, "\t.set %s,%s\n", old_name, name);
38963 }
38964 }
38965 }
38966 #endif
38967
38968 \f
38969 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
38970 function names from <foo>l to <foo>f128 if the default long double type is
38971 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
38972 include file switches the names on systems that support long double as IEEE
38973 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
38974 In the future, glibc will export names like __ieee128_sinf128 and we can
38975 switch to using those instead of using sinf128, which pollutes the user's
38976 namespace.
38977
38978 This will switch the names for Fortran math functions as well (which doesn't
38979 use math.h). However, Fortran needs other changes to the compiler and
38980 library before you can switch the real*16 type at compile time.
38981
38982 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
38983 only do this if the default is that long double is IBM extended double, and
38984 the user asked for IEEE 128-bit. */
38985
38986 static tree
38987 rs6000_mangle_decl_assembler_name (tree decl, tree id)
38988 {
38989 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
38990 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
38991 {
38992 size_t len = IDENTIFIER_LENGTH (id);
38993 const char *name = IDENTIFIER_POINTER (id);
38994
38995 if (name[len - 1] == 'l')
38996 {
38997 bool uses_ieee128_p = false;
38998 tree type = TREE_TYPE (decl);
38999 machine_mode ret_mode = TYPE_MODE (type);
39000
39001 /* See if the function returns a IEEE 128-bit floating point type or
39002 complex type. */
39003 if (ret_mode == TFmode || ret_mode == TCmode)
39004 uses_ieee128_p = true;
39005 else
39006 {
39007 function_args_iterator args_iter;
39008 tree arg;
39009
39010 /* See if the function passes a IEEE 128-bit floating point type
39011 or complex type. */
39012 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39013 {
39014 machine_mode arg_mode = TYPE_MODE (arg);
39015 if (arg_mode == TFmode || arg_mode == TCmode)
39016 {
39017 uses_ieee128_p = true;
39018 break;
39019 }
39020 }
39021 }
39022
39023 /* If we passed or returned an IEEE 128-bit floating point type,
39024 change the name. */
39025 if (uses_ieee128_p)
39026 {
39027 char *name2 = (char *) alloca (len + 4);
39028 memcpy (name2, name, len - 1);
39029 strcpy (name2 + len - 1, "f128");
39030 id = get_identifier (name2);
39031 }
39032 }
39033 }
39034
39035 return id;
39036 }
39037
39038 \f
39039 struct gcc_target targetm = TARGET_INITIALIZER;
39040
39041 #include "gt-rs6000.h"