re PR target/88234 (UBsan and runtime error: signed integer overflow using unsigned...
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84
85 /* This file should be included last. */
86 #include "target-def.h"
87
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
91
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
105
106 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
107
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack {
110 int reload_completed; /* stack info won't change from here on */
111 int first_gp_reg_save; /* first callee saved GP register used */
112 int first_fp_reg_save; /* first callee saved FP register used */
113 int first_altivec_reg_save; /* first callee saved AltiVec register used */
114 int lr_save_p; /* true if the link reg needs to be saved */
115 int cr_save_p; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask; /* mask of vec registers to save */
117 int push_p; /* true if we need to allocate stack space */
118 int calls_p; /* true if the function makes any calls */
119 int world_save_p; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi; /* which ABI to use */
122 int gp_save_offset; /* offset to save GP regs from initial SP */
123 int fp_save_offset; /* offset to save FP regs from initial SP */
124 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset; /* offset to save LR from initial SP */
126 int cr_save_offset; /* offset to save CR from initial SP */
127 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset; /* offset to save the varargs registers */
129 int ehrd_offset; /* offset to EH return data */
130 int ehcr_offset; /* offset to EH CR field data */
131 int reg_size; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size; /* variable save area size */
133 int parm_size; /* outgoing parameter size */
134 int save_size; /* save area size */
135 int fixed_size; /* fixed size of stack frame */
136 int gp_size; /* size of saved GP registers */
137 int fp_size; /* size of saved FP registers */
138 int altivec_size; /* size of saved AltiVec registers */
139 int cr_size; /* size to hold CR if not in fixed area */
140 int vrsave_size; /* size to hold VRSAVE */
141 int altivec_padding_size; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
143 int savres_strategy;
144 } rs6000_stack_t;
145
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
149 {
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame;
152 /* Flags if __builtin_return_address (0) was used. */
153 int ra_need_lr;
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
155 int lr_save_state;
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer;
164 bool split_stack_argp_used;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 /* The number of components we use for separate shrink-wrapping. */
168 int n_components;
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately[32];
172 bool fpr_is_wrapped_separately[32];
173 bool lr_is_wrapped_separately;
174 bool toc_is_wrapped_separately;
175 } machine_function;
176
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load;
179
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined;
182
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno;
186
187 #ifdef USING_ELFOS_H
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno = 0;
190 #endif
191
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
193 int dot_symbols;
194
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode;
199
200 #if TARGET_ELF
201 /* Note whether IEEE 128-bit floating point was passed or returned, either as
202 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
203 floating point. We changed the default C++ mangling for these types and we
204 may want to generate a weak alias of the old mangling (U10__float128) to the
205 new mangling (u9__ieee128). */
206 static bool rs6000_passes_ieee128;
207 #endif
208
209 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
210 name used in current releases (i.e. u9__ieee128). */
211 static bool ieee128_mangling_gcc_8_1;
212
213 /* Width in bits of a pointer. */
214 unsigned rs6000_pointer_size;
215
216 #ifdef HAVE_AS_GNU_ATTRIBUTE
217 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
218 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
219 # endif
220 /* Flag whether floating point values have been passed/returned.
221 Note that this doesn't say whether fprs are used, since the
222 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
223 should be set for soft-float values passed in gprs and ieee128
224 values passed in vsx registers. */
225 static bool rs6000_passes_float;
226 static bool rs6000_passes_long_double;
227 /* Flag whether vector values have been passed/returned. */
228 static bool rs6000_passes_vector;
229 /* Flag whether small (<= 8 byte) structures have been returned. */
230 static bool rs6000_returns_struct;
231 #endif
232
233 /* Value is TRUE if register/mode pair is acceptable. */
234 static bool rs6000_hard_regno_mode_ok_p
235 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
236
237 /* Maximum number of registers needed for a given register class and mode. */
238 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
239
240 /* How many registers are needed for a given register and mode. */
241 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
242
243 /* Map register number to register class. */
244 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
245
246 static int dbg_cost_ctrl;
247
248 /* Built in types. */
249 tree rs6000_builtin_types[RS6000_BTI_MAX];
250 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
251
252 /* Flag to say the TOC is initialized */
253 int toc_initialized, need_toc_init;
254 char toc_label_name[10];
255
256 /* Cached value of rs6000_variable_issue. This is cached in
257 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
258 static short cached_can_issue_more;
259
260 static GTY(()) section *read_only_data_section;
261 static GTY(()) section *private_data_section;
262 static GTY(()) section *tls_data_section;
263 static GTY(()) section *tls_private_data_section;
264 static GTY(()) section *read_only_private_data_section;
265 static GTY(()) section *sdata2_section;
266 static GTY(()) section *toc_section;
267
268 struct builtin_description
269 {
270 const HOST_WIDE_INT mask;
271 const enum insn_code icode;
272 const char *const name;
273 const enum rs6000_builtins code;
274 };
275
276 /* Describe the vector unit used for modes. */
277 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
278 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
279
280 /* Register classes for various constraints that are based on the target
281 switches. */
282 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
283
284 /* Describe the alignment of a vector. */
285 int rs6000_vector_align[NUM_MACHINE_MODES];
286
287 /* Map selected modes to types for builtins. */
288 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
289
290 /* What modes to automatically generate reciprocal divide estimate (fre) and
291 reciprocal sqrt (frsqrte) for. */
292 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
293
294 /* Masks to determine which reciprocal esitmate instructions to generate
295 automatically. */
296 enum rs6000_recip_mask {
297 RECIP_SF_DIV = 0x001, /* Use divide estimate */
298 RECIP_DF_DIV = 0x002,
299 RECIP_V4SF_DIV = 0x004,
300 RECIP_V2DF_DIV = 0x008,
301
302 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
303 RECIP_DF_RSQRT = 0x020,
304 RECIP_V4SF_RSQRT = 0x040,
305 RECIP_V2DF_RSQRT = 0x080,
306
307 /* Various combination of flags for -mrecip=xxx. */
308 RECIP_NONE = 0,
309 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
310 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
311 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
312
313 RECIP_HIGH_PRECISION = RECIP_ALL,
314
315 /* On low precision machines like the power5, don't enable double precision
316 reciprocal square root estimate, since it isn't accurate enough. */
317 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
318 };
319
320 /* -mrecip options. */
321 static struct
322 {
323 const char *string; /* option name */
324 unsigned int mask; /* mask bits to set */
325 } recip_options[] = {
326 { "all", RECIP_ALL },
327 { "none", RECIP_NONE },
328 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
329 | RECIP_V2DF_DIV) },
330 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
331 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
332 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
333 | RECIP_V2DF_RSQRT) },
334 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
335 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
336 };
337
338 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
339 static const struct
340 {
341 const char *cpu;
342 unsigned int cpuid;
343 } cpu_is_info[] = {
344 { "power9", PPC_PLATFORM_POWER9 },
345 { "power8", PPC_PLATFORM_POWER8 },
346 { "power7", PPC_PLATFORM_POWER7 },
347 { "power6x", PPC_PLATFORM_POWER6X },
348 { "power6", PPC_PLATFORM_POWER6 },
349 { "power5+", PPC_PLATFORM_POWER5_PLUS },
350 { "power5", PPC_PLATFORM_POWER5 },
351 { "ppc970", PPC_PLATFORM_PPC970 },
352 { "power4", PPC_PLATFORM_POWER4 },
353 { "ppca2", PPC_PLATFORM_PPCA2 },
354 { "ppc476", PPC_PLATFORM_PPC476 },
355 { "ppc464", PPC_PLATFORM_PPC464 },
356 { "ppc440", PPC_PLATFORM_PPC440 },
357 { "ppc405", PPC_PLATFORM_PPC405 },
358 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
359 };
360
361 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
362 static const struct
363 {
364 const char *hwcap;
365 int mask;
366 unsigned int id;
367 } cpu_supports_info[] = {
368 /* AT_HWCAP masks. */
369 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
370 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
371 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
372 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
373 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
374 { "booke", PPC_FEATURE_BOOKE, 0 },
375 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
376 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
377 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
378 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
379 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
380 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
381 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
382 { "notb", PPC_FEATURE_NO_TB, 0 },
383 { "pa6t", PPC_FEATURE_PA6T, 0 },
384 { "power4", PPC_FEATURE_POWER4, 0 },
385 { "power5", PPC_FEATURE_POWER5, 0 },
386 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
387 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
388 { "ppc32", PPC_FEATURE_32, 0 },
389 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
390 { "ppc64", PPC_FEATURE_64, 0 },
391 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
392 { "smt", PPC_FEATURE_SMT, 0 },
393 { "spe", PPC_FEATURE_HAS_SPE, 0 },
394 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
395 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
396 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
397
398 /* AT_HWCAP2 masks. */
399 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
400 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
401 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
402 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
403 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
404 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
405 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
406 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
407 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
408 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
409 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
410 { "darn", PPC_FEATURE2_DARN, 1 },
411 { "scv", PPC_FEATURE2_SCV, 1 }
412 };
413
414 /* On PowerPC, we have a limited number of target clones that we care about
415 which means we can use an array to hold the options, rather than having more
416 elaborate data structures to identify each possible variation. Order the
417 clones from the default to the highest ISA. */
418 enum {
419 CLONE_DEFAULT = 0, /* default clone. */
420 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
421 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
422 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
423 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
424 CLONE_MAX
425 };
426
427 /* Map compiler ISA bits into HWCAP names. */
428 struct clone_map {
429 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
430 const char *name; /* name to use in __builtin_cpu_supports. */
431 };
432
433 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
434 { 0, "" }, /* Default options. */
435 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
436 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
437 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
438 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
439 };
440
441
442 /* Newer LIBCs explicitly export this symbol to declare that they provide
443 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
444 reference to this symbol whenever we expand a CPU builtin, so that
445 we never link against an old LIBC. */
446 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
447
448 /* True if we have expanded a CPU builtin. */
449 bool cpu_builtin_p;
450
451 /* Pointer to function (in rs6000-c.c) that can define or undefine target
452 macros that have changed. Languages that don't support the preprocessor
453 don't link in rs6000-c.c, so we can't call it directly. */
454 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
455
456 /* Simplfy register classes into simpler classifications. We assume
457 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
458 check for standard register classes (gpr/floating/altivec/vsx) and
459 floating/vector classes (float/altivec/vsx). */
460
461 enum rs6000_reg_type {
462 NO_REG_TYPE,
463 PSEUDO_REG_TYPE,
464 GPR_REG_TYPE,
465 VSX_REG_TYPE,
466 ALTIVEC_REG_TYPE,
467 FPR_REG_TYPE,
468 SPR_REG_TYPE,
469 CR_REG_TYPE
470 };
471
472 /* Map register class to register type. */
473 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
474
475 /* First/last register type for the 'normal' register types (i.e. general
476 purpose, floating point, altivec, and VSX registers). */
477 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
478
479 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
480
481
482 /* Register classes we care about in secondary reload or go if legitimate
483 address. We only need to worry about GPR, FPR, and Altivec registers here,
484 along an ANY field that is the OR of the 3 register classes. */
485
486 enum rs6000_reload_reg_type {
487 RELOAD_REG_GPR, /* General purpose registers. */
488 RELOAD_REG_FPR, /* Traditional floating point regs. */
489 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
490 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
491 N_RELOAD_REG
492 };
493
494 /* For setting up register classes, loop through the 3 register classes mapping
495 into real registers, and skip the ANY class, which is just an OR of the
496 bits. */
497 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
498 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
499
500 /* Map reload register type to a register in the register class. */
501 struct reload_reg_map_type {
502 const char *name; /* Register class name. */
503 int reg; /* Register in the register class. */
504 };
505
506 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
507 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
508 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
509 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
510 { "Any", -1 }, /* RELOAD_REG_ANY. */
511 };
512
513 /* Mask bits for each register class, indexed per mode. Historically the
514 compiler has been more restrictive which types can do PRE_MODIFY instead of
515 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
516 typedef unsigned char addr_mask_type;
517
518 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
519 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
520 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
521 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
522 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
523 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
524 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
525 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
526
527 /* Register type masks based on the type, of valid addressing modes. */
528 struct rs6000_reg_addr {
529 enum insn_code reload_load; /* INSN to reload for loading. */
530 enum insn_code reload_store; /* INSN to reload for storing. */
531 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
532 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
533 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
534 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
535 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
536 };
537
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
539
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
543 {
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
546 }
547
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
551 {
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
554 }
555
556 /* Return true if we have D-form addressing in altivec registers. */
557 static inline bool
558 mode_supports_vmx_dform (machine_mode mode)
559 {
560 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
561 }
562
563 /* Return true if we have D-form addressing in VSX registers. This addressing
564 is more limited than normal d-form addressing in that the offset must be
565 aligned on a 16-byte boundary. */
566 static inline bool
567 mode_supports_dq_form (machine_mode mode)
568 {
569 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
570 != 0);
571 }
572
573 /* Given that there exists at least one variable that is set (produced)
574 by OUT_INSN and read (consumed) by IN_INSN, return true iff
575 IN_INSN represents one or more memory store operations and none of
576 the variables set by OUT_INSN is used by IN_INSN as the address of a
577 store operation. If either IN_INSN or OUT_INSN does not represent
578 a "single" RTL SET expression (as loosely defined by the
579 implementation of the single_set function) or a PARALLEL with only
580 SETs, CLOBBERs, and USEs inside, this function returns false.
581
582 This rs6000-specific version of store_data_bypass_p checks for
583 certain conditions that result in assertion failures (and internal
584 compiler errors) in the generic store_data_bypass_p function and
585 returns false rather than calling store_data_bypass_p if one of the
586 problematic conditions is detected. */
587
588 int
589 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
590 {
591 rtx out_set, in_set;
592 rtx out_pat, in_pat;
593 rtx out_exp, in_exp;
594 int i, j;
595
596 in_set = single_set (in_insn);
597 if (in_set)
598 {
599 if (MEM_P (SET_DEST (in_set)))
600 {
601 out_set = single_set (out_insn);
602 if (!out_set)
603 {
604 out_pat = PATTERN (out_insn);
605 if (GET_CODE (out_pat) == PARALLEL)
606 {
607 for (i = 0; i < XVECLEN (out_pat, 0); i++)
608 {
609 out_exp = XVECEXP (out_pat, 0, i);
610 if ((GET_CODE (out_exp) == CLOBBER)
611 || (GET_CODE (out_exp) == USE))
612 continue;
613 else if (GET_CODE (out_exp) != SET)
614 return false;
615 }
616 }
617 }
618 }
619 }
620 else
621 {
622 in_pat = PATTERN (in_insn);
623 if (GET_CODE (in_pat) != PARALLEL)
624 return false;
625
626 for (i = 0; i < XVECLEN (in_pat, 0); i++)
627 {
628 in_exp = XVECEXP (in_pat, 0, i);
629 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
630 continue;
631 else if (GET_CODE (in_exp) != SET)
632 return false;
633
634 if (MEM_P (SET_DEST (in_exp)))
635 {
636 out_set = single_set (out_insn);
637 if (!out_set)
638 {
639 out_pat = PATTERN (out_insn);
640 if (GET_CODE (out_pat) != PARALLEL)
641 return false;
642 for (j = 0; j < XVECLEN (out_pat, 0); j++)
643 {
644 out_exp = XVECEXP (out_pat, 0, j);
645 if ((GET_CODE (out_exp) == CLOBBER)
646 || (GET_CODE (out_exp) == USE))
647 continue;
648 else if (GET_CODE (out_exp) != SET)
649 return false;
650 }
651 }
652 }
653 }
654 }
655 return store_data_bypass_p (out_insn, in_insn);
656 }
657
658 \f
659 /* Processor costs (relative to an add) */
660
661 const struct processor_costs *rs6000_cost;
662
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
681 };
682
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
701 };
702
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
721 };
722
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
741 };
742
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
761 };
762
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
781 };
782
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
801 };
802
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
821 };
822
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
841 };
842
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
861 };
862
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
881 };
882
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
901 };
902
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
921 };
922
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
941 };
942
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
962 };
963
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
982 };
983
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1002 };
1003
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1022 };
1023
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1037 32,
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1042 };
1043
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1062 };
1063
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1082 };
1083
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1102 };
1103
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1122 };
1123
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1142 };
1143
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1162 };
1163
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1182 };
1183
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1202 };
1203
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1222 };
1223
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1242 };
1243
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1257 64,
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1262 };
1263
1264 \f
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_X
1275
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1278
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1281
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1284
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1287
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1290
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1293
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1296
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1299
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1302
1303 struct rs6000_builtin_info_type {
1304 const char *name;
1305 const enum insn_code icode;
1306 const HOST_WIDE_INT mask;
1307 const unsigned attr;
1308 };
1309
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1311 {
1312 #include "rs6000-builtin.def"
1313 };
1314
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_X
1324
1325 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1326 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1327
1328 \f
1329 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1330 static struct machine_function * rs6000_init_machine_status (void);
1331 static int rs6000_ra_ever_killed (void);
1332 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1333 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1334 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1336 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1337 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1338 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1339 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1340 bool);
1341 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1342 unsigned int);
1343 static bool is_microcoded_insn (rtx_insn *);
1344 static bool is_nonpipeline_insn (rtx_insn *);
1345 static bool is_cracked_insn (rtx_insn *);
1346 static bool is_load_insn (rtx, rtx *);
1347 static bool is_store_insn (rtx, rtx *);
1348 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1349 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1350 static bool insn_must_be_first_in_group (rtx_insn *);
1351 static bool insn_must_be_last_in_group (rtx_insn *);
1352 static void altivec_init_builtins (void);
1353 static tree builtin_function_type (machine_mode, machine_mode,
1354 machine_mode, machine_mode,
1355 enum rs6000_builtins, const char *name);
1356 static void rs6000_common_init_builtins (void);
1357 static void htm_init_builtins (void);
1358 static rs6000_stack_t *rs6000_stack_info (void);
1359 static void is_altivec_return_reg (rtx, void *);
1360 int easy_vector_constant (rtx, machine_mode);
1361 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1362 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1363 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1364 bool, bool);
1365 #if TARGET_MACHO
1366 static void macho_branch_islands (void);
1367 #endif
1368 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1369 int, int *);
1370 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1371 int, int, int *);
1372 static bool rs6000_mode_dependent_address (const_rtx);
1373 static bool rs6000_debug_mode_dependent_address (const_rtx);
1374 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1375 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1376 machine_mode, rtx);
1377 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1378 machine_mode,
1379 rtx);
1380 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1381 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1382 enum reg_class);
1383 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1384 reg_class_t,
1385 reg_class_t);
1386 static bool rs6000_debug_can_change_mode_class (machine_mode,
1387 machine_mode,
1388 reg_class_t);
1389 static bool rs6000_save_toc_in_prologue_p (void);
1390 static rtx rs6000_internal_arg_pointer (void);
1391
1392 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1393 int, int *)
1394 = rs6000_legitimize_reload_address;
1395
1396 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1397 = rs6000_mode_dependent_address;
1398
1399 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1400 machine_mode, rtx)
1401 = rs6000_secondary_reload_class;
1402
1403 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1404 = rs6000_preferred_reload_class;
1405
1406 const int INSN_NOT_AVAILABLE = -1;
1407
1408 static void rs6000_print_isa_options (FILE *, int, const char *,
1409 HOST_WIDE_INT);
1410 static void rs6000_print_builtin_options (FILE *, int, const char *,
1411 HOST_WIDE_INT);
1412 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1413
1414 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1415 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1416 enum rs6000_reg_type,
1417 machine_mode,
1418 secondary_reload_info *,
1419 bool);
1420 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1421 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1422 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1423
1424 /* Hash table stuff for keeping track of TOC entries. */
1425
1426 struct GTY((for_user)) toc_hash_struct
1427 {
1428 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1429 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1430 rtx key;
1431 machine_mode key_mode;
1432 int labelno;
1433 };
1434
1435 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1436 {
1437 static hashval_t hash (toc_hash_struct *);
1438 static bool equal (toc_hash_struct *, toc_hash_struct *);
1439 };
1440
1441 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1442
1443 /* Hash table to keep track of the argument types for builtin functions. */
1444
1445 struct GTY((for_user)) builtin_hash_struct
1446 {
1447 tree type;
1448 machine_mode mode[4]; /* return value + 3 arguments. */
1449 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1450 };
1451
1452 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1453 {
1454 static hashval_t hash (builtin_hash_struct *);
1455 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1456 };
1457
1458 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1459
1460 \f
1461 /* Default register names. */
1462 char rs6000_reg_names[][8] =
1463 {
1464 "0", "1", "2", "3", "4", "5", "6", "7",
1465 "8", "9", "10", "11", "12", "13", "14", "15",
1466 "16", "17", "18", "19", "20", "21", "22", "23",
1467 "24", "25", "26", "27", "28", "29", "30", "31",
1468 "0", "1", "2", "3", "4", "5", "6", "7",
1469 "8", "9", "10", "11", "12", "13", "14", "15",
1470 "16", "17", "18", "19", "20", "21", "22", "23",
1471 "24", "25", "26", "27", "28", "29", "30", "31",
1472 "mq", "lr", "ctr","ap",
1473 "0", "1", "2", "3", "4", "5", "6", "7",
1474 "ca",
1475 /* AltiVec registers. */
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "8", "9", "10", "11", "12", "13", "14", "15",
1478 "16", "17", "18", "19", "20", "21", "22", "23",
1479 "24", "25", "26", "27", "28", "29", "30", "31",
1480 "vrsave", "vscr",
1481 /* Soft frame pointer. */
1482 "sfp",
1483 /* HTM SPR registers. */
1484 "tfhar", "tfiar", "texasr"
1485 };
1486
1487 #ifdef TARGET_REGNAMES
1488 static const char alt_reg_names[][8] =
1489 {
1490 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1491 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1492 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1493 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1494 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1495 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1496 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1497 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1498 "mq", "lr", "ctr", "ap",
1499 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1500 "ca",
1501 /* AltiVec registers. */
1502 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1503 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1504 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1505 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1506 "vrsave", "vscr",
1507 /* Soft frame pointer. */
1508 "sfp",
1509 /* HTM SPR registers. */
1510 "tfhar", "tfiar", "texasr"
1511 };
1512 #endif
1513
1514 /* Table of valid machine attributes. */
1515
1516 static const struct attribute_spec rs6000_attribute_table[] =
1517 {
1518 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1519 affects_type_identity, handler, exclude } */
1520 { "altivec", 1, 1, false, true, false, false,
1521 rs6000_handle_altivec_attribute, NULL },
1522 { "longcall", 0, 0, false, true, true, false,
1523 rs6000_handle_longcall_attribute, NULL },
1524 { "shortcall", 0, 0, false, true, true, false,
1525 rs6000_handle_longcall_attribute, NULL },
1526 { "ms_struct", 0, 0, false, false, false, false,
1527 rs6000_handle_struct_attribute, NULL },
1528 { "gcc_struct", 0, 0, false, false, false, false,
1529 rs6000_handle_struct_attribute, NULL },
1530 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1531 SUBTARGET_ATTRIBUTE_TABLE,
1532 #endif
1533 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1534 };
1535 \f
1536 #ifndef TARGET_PROFILE_KERNEL
1537 #define TARGET_PROFILE_KERNEL 0
1538 #endif
1539
1540 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1541 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1542 \f
1543 /* Initialize the GCC target structure. */
1544 #undef TARGET_ATTRIBUTE_TABLE
1545 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1546 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1547 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1548 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1549 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1550
1551 #undef TARGET_ASM_ALIGNED_DI_OP
1552 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1553
1554 /* Default unaligned ops are only provided for ELF. Find the ops needed
1555 for non-ELF systems. */
1556 #ifndef OBJECT_FORMAT_ELF
1557 #if TARGET_XCOFF
1558 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1559 64-bit targets. */
1560 #undef TARGET_ASM_UNALIGNED_HI_OP
1561 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1562 #undef TARGET_ASM_UNALIGNED_SI_OP
1563 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1564 #undef TARGET_ASM_UNALIGNED_DI_OP
1565 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1566 #else
1567 /* For Darwin. */
1568 #undef TARGET_ASM_UNALIGNED_HI_OP
1569 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1570 #undef TARGET_ASM_UNALIGNED_SI_OP
1571 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1572 #undef TARGET_ASM_UNALIGNED_DI_OP
1573 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1574 #undef TARGET_ASM_ALIGNED_DI_OP
1575 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1576 #endif
1577 #endif
1578
1579 /* This hook deals with fixups for relocatable code and DI-mode objects
1580 in 64-bit code. */
1581 #undef TARGET_ASM_INTEGER
1582 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1583
1584 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1585 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1586 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1587 #endif
1588
1589 #undef TARGET_SET_UP_BY_PROLOGUE
1590 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1591
1592 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1593 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1594 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1595 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1596 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1597 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1598 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1599 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1600 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1601 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1602 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1604
1605 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1606 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1607
1608 #undef TARGET_INTERNAL_ARG_POINTER
1609 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1610
1611 #undef TARGET_HAVE_TLS
1612 #define TARGET_HAVE_TLS HAVE_AS_TLS
1613
1614 #undef TARGET_CANNOT_FORCE_CONST_MEM
1615 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1616
1617 #undef TARGET_DELEGITIMIZE_ADDRESS
1618 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1619
1620 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1621 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1622
1623 #undef TARGET_LEGITIMATE_COMBINED_INSN
1624 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1625
1626 #undef TARGET_ASM_FUNCTION_PROLOGUE
1627 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1628 #undef TARGET_ASM_FUNCTION_EPILOGUE
1629 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1630
1631 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1632 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1633
1634 #undef TARGET_LEGITIMIZE_ADDRESS
1635 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1636
1637 #undef TARGET_SCHED_VARIABLE_ISSUE
1638 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1639
1640 #undef TARGET_SCHED_ISSUE_RATE
1641 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1642 #undef TARGET_SCHED_ADJUST_COST
1643 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1644 #undef TARGET_SCHED_ADJUST_PRIORITY
1645 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1646 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1647 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1648 #undef TARGET_SCHED_INIT
1649 #define TARGET_SCHED_INIT rs6000_sched_init
1650 #undef TARGET_SCHED_FINISH
1651 #define TARGET_SCHED_FINISH rs6000_sched_finish
1652 #undef TARGET_SCHED_REORDER
1653 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1654 #undef TARGET_SCHED_REORDER2
1655 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1656
1657 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1658 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1659
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1662
1663 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1664 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1665 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1666 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1667 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1668 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1669 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1670 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1671
1672 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1673 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1674
1675 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1676 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1677 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1678 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1679 rs6000_builtin_support_vector_misalignment
1680 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1681 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1682 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1683 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1684 rs6000_builtin_vectorization_cost
1685 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1686 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1687 rs6000_preferred_simd_mode
1688 #undef TARGET_VECTORIZE_INIT_COST
1689 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1690 #undef TARGET_VECTORIZE_ADD_STMT_COST
1691 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1692 #undef TARGET_VECTORIZE_FINISH_COST
1693 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1694 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1695 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1696
1697 #undef TARGET_INIT_BUILTINS
1698 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1699 #undef TARGET_BUILTIN_DECL
1700 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1701
1702 #undef TARGET_FOLD_BUILTIN
1703 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1704 #undef TARGET_GIMPLE_FOLD_BUILTIN
1705 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1706
1707 #undef TARGET_EXPAND_BUILTIN
1708 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1709
1710 #undef TARGET_MANGLE_TYPE
1711 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1712
1713 #undef TARGET_INIT_LIBFUNCS
1714 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1715
1716 #if TARGET_MACHO
1717 #undef TARGET_BINDS_LOCAL_P
1718 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1719 #endif
1720
1721 #undef TARGET_MS_BITFIELD_LAYOUT_P
1722 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1723
1724 #undef TARGET_ASM_OUTPUT_MI_THUNK
1725 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1726
1727 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1729
1730 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1731 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1732
1733 #undef TARGET_REGISTER_MOVE_COST
1734 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1735 #undef TARGET_MEMORY_MOVE_COST
1736 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1737 #undef TARGET_CANNOT_COPY_INSN_P
1738 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1739 #undef TARGET_RTX_COSTS
1740 #define TARGET_RTX_COSTS rs6000_rtx_costs
1741 #undef TARGET_ADDRESS_COST
1742 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1743 #undef TARGET_INSN_COST
1744 #define TARGET_INSN_COST rs6000_insn_cost
1745
1746 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1747 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1748
1749 #undef TARGET_PROMOTE_FUNCTION_MODE
1750 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1751
1752 #undef TARGET_RETURN_IN_MEMORY
1753 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1754
1755 #undef TARGET_RETURN_IN_MSB
1756 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1757
1758 #undef TARGET_SETUP_INCOMING_VARARGS
1759 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1760
1761 /* Always strict argument naming on rs6000. */
1762 #undef TARGET_STRICT_ARGUMENT_NAMING
1763 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1764 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1765 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1766 #undef TARGET_SPLIT_COMPLEX_ARG
1767 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1768 #undef TARGET_MUST_PASS_IN_STACK
1769 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1770 #undef TARGET_PASS_BY_REFERENCE
1771 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1772 #undef TARGET_ARG_PARTIAL_BYTES
1773 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1774 #undef TARGET_FUNCTION_ARG_ADVANCE
1775 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1776 #undef TARGET_FUNCTION_ARG
1777 #define TARGET_FUNCTION_ARG rs6000_function_arg
1778 #undef TARGET_FUNCTION_ARG_PADDING
1779 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1780 #undef TARGET_FUNCTION_ARG_BOUNDARY
1781 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1782
1783 #undef TARGET_BUILD_BUILTIN_VA_LIST
1784 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1785
1786 #undef TARGET_EXPAND_BUILTIN_VA_START
1787 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1788
1789 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1790 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1791
1792 #undef TARGET_EH_RETURN_FILTER_MODE
1793 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1794
1795 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1796 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1797
1798 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1799 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1800
1801 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1802 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1803
1804 #undef TARGET_FLOATN_MODE
1805 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1806
1807 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1808 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1809
1810 #undef TARGET_MD_ASM_ADJUST
1811 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1812
1813 #undef TARGET_OPTION_OVERRIDE
1814 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1815
1816 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1817 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1818 rs6000_builtin_vectorized_function
1819
1820 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1821 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1822 rs6000_builtin_md_vectorized_function
1823
1824 #undef TARGET_STACK_PROTECT_GUARD
1825 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1826
1827 #if !TARGET_MACHO
1828 #undef TARGET_STACK_PROTECT_FAIL
1829 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1830 #endif
1831
1832 #ifdef HAVE_AS_TLS
1833 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1834 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1835 #endif
1836
1837 /* Use a 32-bit anchor range. This leads to sequences like:
1838
1839 addis tmp,anchor,high
1840 add dest,tmp,low
1841
1842 where tmp itself acts as an anchor, and can be shared between
1843 accesses to the same 64k page. */
1844 #undef TARGET_MIN_ANCHOR_OFFSET
1845 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1846 #undef TARGET_MAX_ANCHOR_OFFSET
1847 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1848 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1849 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1850 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1851 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1852
1853 #undef TARGET_BUILTIN_RECIPROCAL
1854 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1855
1856 #undef TARGET_SECONDARY_RELOAD
1857 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1858 #undef TARGET_SECONDARY_MEMORY_NEEDED
1859 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1860 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1861 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1862
1863 #undef TARGET_LEGITIMATE_ADDRESS_P
1864 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1865
1866 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1867 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1868
1869 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1870 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1871
1872 #undef TARGET_CAN_ELIMINATE
1873 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1874
1875 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1876 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1877
1878 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1879 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1880
1881 #undef TARGET_TRAMPOLINE_INIT
1882 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1883
1884 #undef TARGET_FUNCTION_VALUE
1885 #define TARGET_FUNCTION_VALUE rs6000_function_value
1886
1887 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1888 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1889
1890 #undef TARGET_OPTION_SAVE
1891 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1892
1893 #undef TARGET_OPTION_RESTORE
1894 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1895
1896 #undef TARGET_OPTION_PRINT
1897 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1898
1899 #undef TARGET_CAN_INLINE_P
1900 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1901
1902 #undef TARGET_SET_CURRENT_FUNCTION
1903 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1904
1905 #undef TARGET_LEGITIMATE_CONSTANT_P
1906 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1907
1908 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1909 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1910
1911 #undef TARGET_CAN_USE_DOLOOP_P
1912 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1913
1914 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1915 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1916
1917 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1918 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1919 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1920 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1921 #undef TARGET_UNWIND_WORD_MODE
1922 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1923
1924 #undef TARGET_OFFLOAD_OPTIONS
1925 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1926
1927 #undef TARGET_C_MODE_FOR_SUFFIX
1928 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1929
1930 #undef TARGET_INVALID_BINARY_OP
1931 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1932
1933 #undef TARGET_OPTAB_SUPPORTED_P
1934 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1935
1936 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1937 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1938
1939 #undef TARGET_COMPARE_VERSION_PRIORITY
1940 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1941
1942 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1943 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1944 rs6000_generate_version_dispatcher_body
1945
1946 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1947 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1948 rs6000_get_function_versions_dispatcher
1949
1950 #undef TARGET_OPTION_FUNCTION_VERSIONS
1951 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1952
1953 #undef TARGET_HARD_REGNO_NREGS
1954 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1955 #undef TARGET_HARD_REGNO_MODE_OK
1956 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1957
1958 #undef TARGET_MODES_TIEABLE_P
1959 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1960
1961 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1962 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1963 rs6000_hard_regno_call_part_clobbered
1964
1965 #undef TARGET_SLOW_UNALIGNED_ACCESS
1966 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1967
1968 #undef TARGET_CAN_CHANGE_MODE_CLASS
1969 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1970
1971 #undef TARGET_CONSTANT_ALIGNMENT
1972 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1973
1974 #undef TARGET_STARTING_FRAME_OFFSET
1975 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1976
1977 #if TARGET_ELF && RS6000_WEAK
1978 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1979 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1980 #endif
1981
1982 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1983 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1984
1985 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1986 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1987 \f
1988
1989 /* Processor table. */
1990 struct rs6000_ptt
1991 {
1992 const char *const name; /* Canonical processor name. */
1993 const enum processor_type processor; /* Processor type enum value. */
1994 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1995 };
1996
1997 static struct rs6000_ptt const processor_target_table[] =
1998 {
1999 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2000 #include "rs6000-cpus.def"
2001 #undef RS6000_CPU
2002 };
2003
2004 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2005 name is invalid. */
2006
2007 static int
2008 rs6000_cpu_name_lookup (const char *name)
2009 {
2010 size_t i;
2011
2012 if (name != NULL)
2013 {
2014 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2015 if (! strcmp (name, processor_target_table[i].name))
2016 return (int)i;
2017 }
2018
2019 return -1;
2020 }
2021
2022 \f
2023 /* Return number of consecutive hard regs needed starting at reg REGNO
2024 to hold something of mode MODE.
2025 This is ordinarily the length in words of a value of mode MODE
2026 but can be less for certain modes in special long registers.
2027
2028 POWER and PowerPC GPRs hold 32 bits worth;
2029 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2030
2031 static int
2032 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2033 {
2034 unsigned HOST_WIDE_INT reg_size;
2035
2036 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2037 128-bit floating point that can go in vector registers, which has VSX
2038 memory addressing. */
2039 if (FP_REGNO_P (regno))
2040 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2041 ? UNITS_PER_VSX_WORD
2042 : UNITS_PER_FP_WORD);
2043
2044 else if (ALTIVEC_REGNO_P (regno))
2045 reg_size = UNITS_PER_ALTIVEC_WORD;
2046
2047 else
2048 reg_size = UNITS_PER_WORD;
2049
2050 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2051 }
2052
2053 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2054 MODE. */
2055 static int
2056 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2057 {
2058 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2059
2060 if (COMPLEX_MODE_P (mode))
2061 mode = GET_MODE_INNER (mode);
2062
2063 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2064 register combinations, and use PTImode where we need to deal with quad
2065 word memory operations. Don't allow quad words in the argument or frame
2066 pointer registers, just registers 0..31. */
2067 if (mode == PTImode)
2068 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2069 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2070 && ((regno & 1) == 0));
2071
2072 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2073 implementations. Don't allow an item to be split between a FP register
2074 and an Altivec register. Allow TImode in all VSX registers if the user
2075 asked for it. */
2076 if (TARGET_VSX && VSX_REGNO_P (regno)
2077 && (VECTOR_MEM_VSX_P (mode)
2078 || FLOAT128_VECTOR_P (mode)
2079 || reg_addr[mode].scalar_in_vmx_p
2080 || mode == TImode
2081 || (TARGET_VADDUQM && mode == V1TImode)))
2082 {
2083 if (FP_REGNO_P (regno))
2084 return FP_REGNO_P (last_regno);
2085
2086 if (ALTIVEC_REGNO_P (regno))
2087 {
2088 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2089 return 0;
2090
2091 return ALTIVEC_REGNO_P (last_regno);
2092 }
2093 }
2094
2095 /* The GPRs can hold any mode, but values bigger than one register
2096 cannot go past R31. */
2097 if (INT_REGNO_P (regno))
2098 return INT_REGNO_P (last_regno);
2099
2100 /* The float registers (except for VSX vector modes) can only hold floating
2101 modes and DImode. */
2102 if (FP_REGNO_P (regno))
2103 {
2104 if (FLOAT128_VECTOR_P (mode))
2105 return false;
2106
2107 if (SCALAR_FLOAT_MODE_P (mode)
2108 && (mode != TDmode || (regno % 2) == 0)
2109 && FP_REGNO_P (last_regno))
2110 return 1;
2111
2112 if (GET_MODE_CLASS (mode) == MODE_INT)
2113 {
2114 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2115 return 1;
2116
2117 if (TARGET_P8_VECTOR && (mode == SImode))
2118 return 1;
2119
2120 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2121 return 1;
2122 }
2123
2124 return 0;
2125 }
2126
2127 /* The CR register can only hold CC modes. */
2128 if (CR_REGNO_P (regno))
2129 return GET_MODE_CLASS (mode) == MODE_CC;
2130
2131 if (CA_REGNO_P (regno))
2132 return mode == Pmode || mode == SImode;
2133
2134 /* AltiVec only in AldyVec registers. */
2135 if (ALTIVEC_REGNO_P (regno))
2136 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2137 || mode == V1TImode);
2138
2139 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2140 and it must be able to fit within the register set. */
2141
2142 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2143 }
2144
2145 /* Implement TARGET_HARD_REGNO_NREGS. */
2146
2147 static unsigned int
2148 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2149 {
2150 return rs6000_hard_regno_nregs[mode][regno];
2151 }
2152
2153 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2154
2155 static bool
2156 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2157 {
2158 return rs6000_hard_regno_mode_ok_p[mode][regno];
2159 }
2160
2161 /* Implement TARGET_MODES_TIEABLE_P.
2162
2163 PTImode cannot tie with other modes because PTImode is restricted to even
2164 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2165 57744).
2166
2167 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2168 128-bit floating point on VSX systems ties with other vectors. */
2169
2170 static bool
2171 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2172 {
2173 if (mode1 == PTImode)
2174 return mode2 == PTImode;
2175 if (mode2 == PTImode)
2176 return false;
2177
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2179 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2180 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2181 return false;
2182
2183 if (SCALAR_FLOAT_MODE_P (mode1))
2184 return SCALAR_FLOAT_MODE_P (mode2);
2185 if (SCALAR_FLOAT_MODE_P (mode2))
2186 return false;
2187
2188 if (GET_MODE_CLASS (mode1) == MODE_CC)
2189 return GET_MODE_CLASS (mode2) == MODE_CC;
2190 if (GET_MODE_CLASS (mode2) == MODE_CC)
2191 return false;
2192
2193 return true;
2194 }
2195
2196 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2197
2198 static bool
2199 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2200 {
2201 if (TARGET_32BIT
2202 && TARGET_POWERPC64
2203 && GET_MODE_SIZE (mode) > 4
2204 && INT_REGNO_P (regno))
2205 return true;
2206
2207 if (TARGET_VSX
2208 && FP_REGNO_P (regno)
2209 && GET_MODE_SIZE (mode) > 8
2210 && !FLOAT128_2REG_P (mode))
2211 return true;
2212
2213 return false;
2214 }
2215
2216 /* Print interesting facts about registers. */
2217 static void
2218 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2219 {
2220 int r, m;
2221
2222 for (r = first_regno; r <= last_regno; ++r)
2223 {
2224 const char *comma = "";
2225 int len;
2226
2227 if (first_regno == last_regno)
2228 fprintf (stderr, "%s:\t", reg_name);
2229 else
2230 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2231
2232 len = 8;
2233 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2234 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2235 {
2236 if (len > 70)
2237 {
2238 fprintf (stderr, ",\n\t");
2239 len = 8;
2240 comma = "";
2241 }
2242
2243 if (rs6000_hard_regno_nregs[m][r] > 1)
2244 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2245 rs6000_hard_regno_nregs[m][r]);
2246 else
2247 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2248
2249 comma = ", ";
2250 }
2251
2252 if (call_used_regs[r])
2253 {
2254 if (len > 70)
2255 {
2256 fprintf (stderr, ",\n\t");
2257 len = 8;
2258 comma = "";
2259 }
2260
2261 len += fprintf (stderr, "%s%s", comma, "call-used");
2262 comma = ", ";
2263 }
2264
2265 if (fixed_regs[r])
2266 {
2267 if (len > 70)
2268 {
2269 fprintf (stderr, ",\n\t");
2270 len = 8;
2271 comma = "";
2272 }
2273
2274 len += fprintf (stderr, "%s%s", comma, "fixed");
2275 comma = ", ";
2276 }
2277
2278 if (len > 70)
2279 {
2280 fprintf (stderr, ",\n\t");
2281 comma = "";
2282 }
2283
2284 len += fprintf (stderr, "%sreg-class = %s", comma,
2285 reg_class_names[(int)rs6000_regno_regclass[r]]);
2286 comma = ", ";
2287
2288 if (len > 70)
2289 {
2290 fprintf (stderr, ",\n\t");
2291 comma = "";
2292 }
2293
2294 fprintf (stderr, "%sregno = %d\n", comma, r);
2295 }
2296 }
2297
2298 static const char *
2299 rs6000_debug_vector_unit (enum rs6000_vector v)
2300 {
2301 const char *ret;
2302
2303 switch (v)
2304 {
2305 case VECTOR_NONE: ret = "none"; break;
2306 case VECTOR_ALTIVEC: ret = "altivec"; break;
2307 case VECTOR_VSX: ret = "vsx"; break;
2308 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2309 default: ret = "unknown"; break;
2310 }
2311
2312 return ret;
2313 }
2314
2315 /* Inner function printing just the address mask for a particular reload
2316 register class. */
2317 DEBUG_FUNCTION char *
2318 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2319 {
2320 static char ret[8];
2321 char *p = ret;
2322
2323 if ((mask & RELOAD_REG_VALID) != 0)
2324 *p++ = 'v';
2325 else if (keep_spaces)
2326 *p++ = ' ';
2327
2328 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2329 *p++ = 'm';
2330 else if (keep_spaces)
2331 *p++ = ' ';
2332
2333 if ((mask & RELOAD_REG_INDEXED) != 0)
2334 *p++ = 'i';
2335 else if (keep_spaces)
2336 *p++ = ' ';
2337
2338 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2339 *p++ = 'O';
2340 else if ((mask & RELOAD_REG_OFFSET) != 0)
2341 *p++ = 'o';
2342 else if (keep_spaces)
2343 *p++ = ' ';
2344
2345 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2346 *p++ = '+';
2347 else if (keep_spaces)
2348 *p++ = ' ';
2349
2350 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2351 *p++ = '+';
2352 else if (keep_spaces)
2353 *p++ = ' ';
2354
2355 if ((mask & RELOAD_REG_AND_M16) != 0)
2356 *p++ = '&';
2357 else if (keep_spaces)
2358 *p++ = ' ';
2359
2360 *p = '\0';
2361
2362 return ret;
2363 }
2364
2365 /* Print the address masks in a human readble fashion. */
2366 DEBUG_FUNCTION void
2367 rs6000_debug_print_mode (ssize_t m)
2368 {
2369 ssize_t rc;
2370 int spaces = 0;
2371
2372 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2373 for (rc = 0; rc < N_RELOAD_REG; rc++)
2374 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2375 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2376
2377 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2378 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2379 {
2380 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2381 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2382 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2383 spaces = 0;
2384 }
2385 else
2386 spaces += sizeof (" Reload=sl") - 1;
2387
2388 if (reg_addr[m].scalar_in_vmx_p)
2389 {
2390 fprintf (stderr, "%*s Upper=y", spaces, "");
2391 spaces = 0;
2392 }
2393 else
2394 spaces += sizeof (" Upper=y") - 1;
2395
2396 if (rs6000_vector_unit[m] != VECTOR_NONE
2397 || rs6000_vector_mem[m] != VECTOR_NONE)
2398 {
2399 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2400 spaces, "",
2401 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2402 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2403 }
2404
2405 fputs ("\n", stderr);
2406 }
2407
2408 #define DEBUG_FMT_ID "%-32s= "
2409 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2410 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2411 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2412
2413 /* Print various interesting information with -mdebug=reg. */
2414 static void
2415 rs6000_debug_reg_global (void)
2416 {
2417 static const char *const tf[2] = { "false", "true" };
2418 const char *nl = (const char *)0;
2419 int m;
2420 size_t m1, m2, v;
2421 char costly_num[20];
2422 char nop_num[20];
2423 char flags_buffer[40];
2424 const char *costly_str;
2425 const char *nop_str;
2426 const char *trace_str;
2427 const char *abi_str;
2428 const char *cmodel_str;
2429 struct cl_target_option cl_opts;
2430
2431 /* Modes we want tieable information on. */
2432 static const machine_mode print_tieable_modes[] = {
2433 QImode,
2434 HImode,
2435 SImode,
2436 DImode,
2437 TImode,
2438 PTImode,
2439 SFmode,
2440 DFmode,
2441 TFmode,
2442 IFmode,
2443 KFmode,
2444 SDmode,
2445 DDmode,
2446 TDmode,
2447 V16QImode,
2448 V8HImode,
2449 V4SImode,
2450 V2DImode,
2451 V1TImode,
2452 V32QImode,
2453 V16HImode,
2454 V8SImode,
2455 V4DImode,
2456 V2TImode,
2457 V4SFmode,
2458 V2DFmode,
2459 V8SFmode,
2460 V4DFmode,
2461 CCmode,
2462 CCUNSmode,
2463 CCEQmode,
2464 };
2465
2466 /* Virtual regs we are interested in. */
2467 const static struct {
2468 int regno; /* register number. */
2469 const char *name; /* register name. */
2470 } virtual_regs[] = {
2471 { STACK_POINTER_REGNUM, "stack pointer:" },
2472 { TOC_REGNUM, "toc: " },
2473 { STATIC_CHAIN_REGNUM, "static chain: " },
2474 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2475 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2476 { ARG_POINTER_REGNUM, "arg pointer: " },
2477 { FRAME_POINTER_REGNUM, "frame pointer:" },
2478 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2479 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2480 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2481 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2482 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2483 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2484 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2485 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2486 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2487 };
2488
2489 fputs ("\nHard register information:\n", stderr);
2490 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2491 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2492 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2493 LAST_ALTIVEC_REGNO,
2494 "vs");
2495 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2496 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2497 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2498 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2499 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2500 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2501
2502 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2503 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2504 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2505
2506 fprintf (stderr,
2507 "\n"
2508 "d reg_class = %s\n"
2509 "f reg_class = %s\n"
2510 "v reg_class = %s\n"
2511 "wa reg_class = %s\n"
2512 "wb reg_class = %s\n"
2513 "wd reg_class = %s\n"
2514 "we reg_class = %s\n"
2515 "wf reg_class = %s\n"
2516 "wg reg_class = %s\n"
2517 "wh reg_class = %s\n"
2518 "wi reg_class = %s\n"
2519 "wj reg_class = %s\n"
2520 "wk reg_class = %s\n"
2521 "wl reg_class = %s\n"
2522 "wm reg_class = %s\n"
2523 "wo reg_class = %s\n"
2524 "wp reg_class = %s\n"
2525 "wq reg_class = %s\n"
2526 "wr reg_class = %s\n"
2527 "ws reg_class = %s\n"
2528 "wt reg_class = %s\n"
2529 "wu reg_class = %s\n"
2530 "wv reg_class = %s\n"
2531 "ww reg_class = %s\n"
2532 "wx reg_class = %s\n"
2533 "wy reg_class = %s\n"
2534 "wz reg_class = %s\n"
2535 "wA reg_class = %s\n"
2536 "wH reg_class = %s\n"
2537 "wI reg_class = %s\n"
2538 "wJ reg_class = %s\n"
2539 "wK reg_class = %s\n"
2540 "\n",
2541 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2573
2574 nl = "\n";
2575 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2576 rs6000_debug_print_mode (m);
2577
2578 fputs ("\n", stderr);
2579
2580 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2581 {
2582 machine_mode mode1 = print_tieable_modes[m1];
2583 bool first_time = true;
2584
2585 nl = (const char *)0;
2586 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2587 {
2588 machine_mode mode2 = print_tieable_modes[m2];
2589 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2590 {
2591 if (first_time)
2592 {
2593 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2594 nl = "\n";
2595 first_time = false;
2596 }
2597
2598 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2599 }
2600 }
2601
2602 if (!first_time)
2603 fputs ("\n", stderr);
2604 }
2605
2606 if (nl)
2607 fputs (nl, stderr);
2608
2609 if (rs6000_recip_control)
2610 {
2611 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2612
2613 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2614 if (rs6000_recip_bits[m])
2615 {
2616 fprintf (stderr,
2617 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2618 GET_MODE_NAME (m),
2619 (RS6000_RECIP_AUTO_RE_P (m)
2620 ? "auto"
2621 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2622 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2623 ? "auto"
2624 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2625 }
2626
2627 fputs ("\n", stderr);
2628 }
2629
2630 if (rs6000_cpu_index >= 0)
2631 {
2632 const char *name = processor_target_table[rs6000_cpu_index].name;
2633 HOST_WIDE_INT flags
2634 = processor_target_table[rs6000_cpu_index].target_enable;
2635
2636 sprintf (flags_buffer, "-mcpu=%s flags", name);
2637 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2638 }
2639 else
2640 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2641
2642 if (rs6000_tune_index >= 0)
2643 {
2644 const char *name = processor_target_table[rs6000_tune_index].name;
2645 HOST_WIDE_INT flags
2646 = processor_target_table[rs6000_tune_index].target_enable;
2647
2648 sprintf (flags_buffer, "-mtune=%s flags", name);
2649 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2650 }
2651 else
2652 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2653
2654 cl_target_option_save (&cl_opts, &global_options);
2655 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2656 rs6000_isa_flags);
2657
2658 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2659 rs6000_isa_flags_explicit);
2660
2661 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2662 rs6000_builtin_mask);
2663
2664 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2665
2666 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2667 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2668
2669 switch (rs6000_sched_costly_dep)
2670 {
2671 case max_dep_latency:
2672 costly_str = "max_dep_latency";
2673 break;
2674
2675 case no_dep_costly:
2676 costly_str = "no_dep_costly";
2677 break;
2678
2679 case all_deps_costly:
2680 costly_str = "all_deps_costly";
2681 break;
2682
2683 case true_store_to_load_dep_costly:
2684 costly_str = "true_store_to_load_dep_costly";
2685 break;
2686
2687 case store_to_load_dep_costly:
2688 costly_str = "store_to_load_dep_costly";
2689 break;
2690
2691 default:
2692 costly_str = costly_num;
2693 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2694 break;
2695 }
2696
2697 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2698
2699 switch (rs6000_sched_insert_nops)
2700 {
2701 case sched_finish_regroup_exact:
2702 nop_str = "sched_finish_regroup_exact";
2703 break;
2704
2705 case sched_finish_pad_groups:
2706 nop_str = "sched_finish_pad_groups";
2707 break;
2708
2709 case sched_finish_none:
2710 nop_str = "sched_finish_none";
2711 break;
2712
2713 default:
2714 nop_str = nop_num;
2715 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2716 break;
2717 }
2718
2719 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2720
2721 switch (rs6000_sdata)
2722 {
2723 default:
2724 case SDATA_NONE:
2725 break;
2726
2727 case SDATA_DATA:
2728 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2729 break;
2730
2731 case SDATA_SYSV:
2732 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2733 break;
2734
2735 case SDATA_EABI:
2736 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2737 break;
2738
2739 }
2740
2741 switch (rs6000_traceback)
2742 {
2743 case traceback_default: trace_str = "default"; break;
2744 case traceback_none: trace_str = "none"; break;
2745 case traceback_part: trace_str = "part"; break;
2746 case traceback_full: trace_str = "full"; break;
2747 default: trace_str = "unknown"; break;
2748 }
2749
2750 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2751
2752 switch (rs6000_current_cmodel)
2753 {
2754 case CMODEL_SMALL: cmodel_str = "small"; break;
2755 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2756 case CMODEL_LARGE: cmodel_str = "large"; break;
2757 default: cmodel_str = "unknown"; break;
2758 }
2759
2760 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2761
2762 switch (rs6000_current_abi)
2763 {
2764 case ABI_NONE: abi_str = "none"; break;
2765 case ABI_AIX: abi_str = "aix"; break;
2766 case ABI_ELFv2: abi_str = "ELFv2"; break;
2767 case ABI_V4: abi_str = "V4"; break;
2768 case ABI_DARWIN: abi_str = "darwin"; break;
2769 default: abi_str = "unknown"; break;
2770 }
2771
2772 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2773
2774 if (rs6000_altivec_abi)
2775 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2776
2777 if (rs6000_darwin64_abi)
2778 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2779
2780 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2781 (TARGET_SOFT_FLOAT ? "true" : "false"));
2782
2783 if (TARGET_LINK_STACK)
2784 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2785
2786 if (TARGET_P8_FUSION)
2787 {
2788 char options[80];
2789
2790 strcpy (options, "power8");
2791 if (TARGET_P8_FUSION_SIGN)
2792 strcat (options, ", sign");
2793
2794 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2795 }
2796
2797 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2798 TARGET_SECURE_PLT ? "secure" : "bss");
2799 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2800 aix_struct_return ? "aix" : "sysv");
2801 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2802 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2803 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2804 tf[!!rs6000_align_branch_targets]);
2805 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2806 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2807 rs6000_long_double_type_size);
2808 if (rs6000_long_double_type_size > 64)
2809 {
2810 fprintf (stderr, DEBUG_FMT_S, "long double type",
2811 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2812 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2813 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2814 }
2815 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2816 (int)rs6000_sched_restricted_insns_priority);
2817 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2818 (int)END_BUILTINS);
2819 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2820 (int)RS6000_BUILTIN_COUNT);
2821
2822 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2823 (int)TARGET_FLOAT128_ENABLE_TYPE);
2824
2825 if (TARGET_VSX)
2826 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2827 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2828
2829 if (TARGET_DIRECT_MOVE_128)
2830 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2831 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2832 }
2833
2834 \f
2835 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2836 legitimate address support to figure out the appropriate addressing to
2837 use. */
2838
2839 static void
2840 rs6000_setup_reg_addr_masks (void)
2841 {
2842 ssize_t rc, reg, m, nregs;
2843 addr_mask_type any_addr_mask, addr_mask;
2844
2845 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2846 {
2847 machine_mode m2 = (machine_mode) m;
2848 bool complex_p = false;
2849 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2850 size_t msize;
2851
2852 if (COMPLEX_MODE_P (m2))
2853 {
2854 complex_p = true;
2855 m2 = GET_MODE_INNER (m2);
2856 }
2857
2858 msize = GET_MODE_SIZE (m2);
2859
2860 /* SDmode is special in that we want to access it only via REG+REG
2861 addressing on power7 and above, since we want to use the LFIWZX and
2862 STFIWZX instructions to load it. */
2863 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2864
2865 any_addr_mask = 0;
2866 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2867 {
2868 addr_mask = 0;
2869 reg = reload_reg_map[rc].reg;
2870
2871 /* Can mode values go in the GPR/FPR/Altivec registers? */
2872 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2873 {
2874 bool small_int_vsx_p = (small_int_p
2875 && (rc == RELOAD_REG_FPR
2876 || rc == RELOAD_REG_VMX));
2877
2878 nregs = rs6000_hard_regno_nregs[m][reg];
2879 addr_mask |= RELOAD_REG_VALID;
2880
2881 /* Indicate if the mode takes more than 1 physical register. If
2882 it takes a single register, indicate it can do REG+REG
2883 addressing. Small integers in VSX registers can only do
2884 REG+REG addressing. */
2885 if (small_int_vsx_p)
2886 addr_mask |= RELOAD_REG_INDEXED;
2887 else if (nregs > 1 || m == BLKmode || complex_p)
2888 addr_mask |= RELOAD_REG_MULTIPLE;
2889 else
2890 addr_mask |= RELOAD_REG_INDEXED;
2891
2892 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2893 addressing. If we allow scalars into Altivec registers,
2894 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2895
2896 For VSX systems, we don't allow update addressing for
2897 DFmode/SFmode if those registers can go in both the
2898 traditional floating point registers and Altivec registers.
2899 The load/store instructions for the Altivec registers do not
2900 have update forms. If we allowed update addressing, it seems
2901 to break IV-OPT code using floating point if the index type is
2902 int instead of long (PR target/81550 and target/84042). */
2903
2904 if (TARGET_UPDATE
2905 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2906 && msize <= 8
2907 && !VECTOR_MODE_P (m2)
2908 && !FLOAT128_VECTOR_P (m2)
2909 && !complex_p
2910 && (m != E_DFmode || !TARGET_VSX)
2911 && (m != E_SFmode || !TARGET_P8_VECTOR)
2912 && !small_int_vsx_p)
2913 {
2914 addr_mask |= RELOAD_REG_PRE_INCDEC;
2915
2916 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2917 we don't allow PRE_MODIFY for some multi-register
2918 operations. */
2919 switch (m)
2920 {
2921 default:
2922 addr_mask |= RELOAD_REG_PRE_MODIFY;
2923 break;
2924
2925 case E_DImode:
2926 if (TARGET_POWERPC64)
2927 addr_mask |= RELOAD_REG_PRE_MODIFY;
2928 break;
2929
2930 case E_DFmode:
2931 case E_DDmode:
2932 if (TARGET_HARD_FLOAT)
2933 addr_mask |= RELOAD_REG_PRE_MODIFY;
2934 break;
2935 }
2936 }
2937 }
2938
2939 /* GPR and FPR registers can do REG+OFFSET addressing, except
2940 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2941 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2942 if ((addr_mask != 0) && !indexed_only_p
2943 && msize <= 8
2944 && (rc == RELOAD_REG_GPR
2945 || ((msize == 8 || m2 == SFmode)
2946 && (rc == RELOAD_REG_FPR
2947 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2948 addr_mask |= RELOAD_REG_OFFSET;
2949
2950 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2951 instructions are enabled. The offset for 128-bit VSX registers is
2952 only 12-bits. While GPRs can handle the full offset range, VSX
2953 registers can only handle the restricted range. */
2954 else if ((addr_mask != 0) && !indexed_only_p
2955 && msize == 16 && TARGET_P9_VECTOR
2956 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2957 || (m2 == TImode && TARGET_VSX)))
2958 {
2959 addr_mask |= RELOAD_REG_OFFSET;
2960 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2961 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2962 }
2963
2964 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2965 addressing on 128-bit types. */
2966 if (rc == RELOAD_REG_VMX && msize == 16
2967 && (addr_mask & RELOAD_REG_VALID) != 0)
2968 addr_mask |= RELOAD_REG_AND_M16;
2969
2970 reg_addr[m].addr_mask[rc] = addr_mask;
2971 any_addr_mask |= addr_mask;
2972 }
2973
2974 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2975 }
2976 }
2977
2978 \f
2979 /* Initialize the various global tables that are based on register size. */
2980 static void
2981 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2982 {
2983 ssize_t r, m, c;
2984 int align64;
2985 int align32;
2986
2987 /* Precalculate REGNO_REG_CLASS. */
2988 rs6000_regno_regclass[0] = GENERAL_REGS;
2989 for (r = 1; r < 32; ++r)
2990 rs6000_regno_regclass[r] = BASE_REGS;
2991
2992 for (r = 32; r < 64; ++r)
2993 rs6000_regno_regclass[r] = FLOAT_REGS;
2994
2995 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2996 rs6000_regno_regclass[r] = NO_REGS;
2997
2998 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2999 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3000
3001 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3002 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3003 rs6000_regno_regclass[r] = CR_REGS;
3004
3005 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3006 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3007 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3008 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3009 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3010 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3011 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3012 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3013 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3014 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3015
3016 /* Precalculate register class to simpler reload register class. We don't
3017 need all of the register classes that are combinations of different
3018 classes, just the simple ones that have constraint letters. */
3019 for (c = 0; c < N_REG_CLASSES; c++)
3020 reg_class_to_reg_type[c] = NO_REG_TYPE;
3021
3022 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3023 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3024 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3025 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3026 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3029 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3031 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3032
3033 if (TARGET_VSX)
3034 {
3035 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3036 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3037 }
3038 else
3039 {
3040 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3041 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3042 }
3043
3044 /* Precalculate the valid memory formats as well as the vector information,
3045 this must be set up before the rs6000_hard_regno_nregs_internal calls
3046 below. */
3047 gcc_assert ((int)VECTOR_NONE == 0);
3048 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3049 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3050
3051 gcc_assert ((int)CODE_FOR_nothing == 0);
3052 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3053
3054 gcc_assert ((int)NO_REGS == 0);
3055 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3056
3057 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3058 believes it can use native alignment or still uses 128-bit alignment. */
3059 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3060 {
3061 align64 = 64;
3062 align32 = 32;
3063 }
3064 else
3065 {
3066 align64 = 128;
3067 align32 = 128;
3068 }
3069
3070 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3071 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3072 if (TARGET_FLOAT128_TYPE)
3073 {
3074 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3075 rs6000_vector_align[KFmode] = 128;
3076
3077 if (FLOAT128_IEEE_P (TFmode))
3078 {
3079 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3080 rs6000_vector_align[TFmode] = 128;
3081 }
3082 }
3083
3084 /* V2DF mode, VSX only. */
3085 if (TARGET_VSX)
3086 {
3087 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3088 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3089 rs6000_vector_align[V2DFmode] = align64;
3090 }
3091
3092 /* V4SF mode, either VSX or Altivec. */
3093 if (TARGET_VSX)
3094 {
3095 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3096 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3097 rs6000_vector_align[V4SFmode] = align32;
3098 }
3099 else if (TARGET_ALTIVEC)
3100 {
3101 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3102 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3103 rs6000_vector_align[V4SFmode] = align32;
3104 }
3105
3106 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3107 and stores. */
3108 if (TARGET_ALTIVEC)
3109 {
3110 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3111 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3112 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3113 rs6000_vector_align[V4SImode] = align32;
3114 rs6000_vector_align[V8HImode] = align32;
3115 rs6000_vector_align[V16QImode] = align32;
3116
3117 if (TARGET_VSX)
3118 {
3119 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3120 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3121 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3122 }
3123 else
3124 {
3125 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3126 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3127 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3128 }
3129 }
3130
3131 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3132 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3133 if (TARGET_VSX)
3134 {
3135 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3136 rs6000_vector_unit[V2DImode]
3137 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3138 rs6000_vector_align[V2DImode] = align64;
3139
3140 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3141 rs6000_vector_unit[V1TImode]
3142 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3143 rs6000_vector_align[V1TImode] = 128;
3144 }
3145
3146 /* DFmode, see if we want to use the VSX unit. Memory is handled
3147 differently, so don't set rs6000_vector_mem. */
3148 if (TARGET_VSX)
3149 {
3150 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3151 rs6000_vector_align[DFmode] = 64;
3152 }
3153
3154 /* SFmode, see if we want to use the VSX unit. */
3155 if (TARGET_P8_VECTOR)
3156 {
3157 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3158 rs6000_vector_align[SFmode] = 32;
3159 }
3160
3161 /* Allow TImode in VSX register and set the VSX memory macros. */
3162 if (TARGET_VSX)
3163 {
3164 rs6000_vector_mem[TImode] = VECTOR_VSX;
3165 rs6000_vector_align[TImode] = align64;
3166 }
3167
3168 /* Register class constraints for the constraints that depend on compile
3169 switches. When the VSX code was added, different constraints were added
3170 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3171 of the VSX registers are used. The register classes for scalar floating
3172 point types is set, based on whether we allow that type into the upper
3173 (Altivec) registers. GCC has register classes to target the Altivec
3174 registers for load/store operations, to select using a VSX memory
3175 operation instead of the traditional floating point operation. The
3176 constraints are:
3177
3178 d - Register class to use with traditional DFmode instructions.
3179 f - Register class to use with traditional SFmode instructions.
3180 v - Altivec register.
3181 wa - Any VSX register.
3182 wc - Reserved to represent individual CR bits (used in LLVM).
3183 wd - Preferred register class for V2DFmode.
3184 wf - Preferred register class for V4SFmode.
3185 wg - Float register for power6x move insns.
3186 wh - FP register for direct move instructions.
3187 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3188 wj - FP or VSX register to hold 64-bit integers for direct moves.
3189 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3190 wl - Float register if we can do 32-bit signed int loads.
3191 wm - VSX register for ISA 2.07 direct move operations.
3192 wn - always NO_REGS.
3193 wr - GPR if 64-bit mode is permitted.
3194 ws - Register class to do ISA 2.06 DF operations.
3195 wt - VSX register for TImode in VSX registers.
3196 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3197 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3198 ww - Register class to do SF conversions in with VSX operations.
3199 wx - Float register if we can do 32-bit int stores.
3200 wy - Register class to do ISA 2.07 SF operations.
3201 wz - Float register if we can do 32-bit unsigned int loads.
3202 wH - Altivec register if SImode is allowed in VSX registers.
3203 wI - VSX register if SImode is allowed in VSX registers.
3204 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3205 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3206
3207 if (TARGET_HARD_FLOAT)
3208 {
3209 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3210 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3211 }
3212
3213 if (TARGET_VSX)
3214 {
3215 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3216 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3217 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3218 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3219 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3220 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3222 }
3223
3224 /* Add conditional constraints based on various options, to allow us to
3225 collapse multiple insn patterns. */
3226 if (TARGET_ALTIVEC)
3227 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3228
3229 if (TARGET_MFPGPR) /* DFmode */
3230 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3231
3232 if (TARGET_LFIWAX)
3233 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3234
3235 if (TARGET_DIRECT_MOVE)
3236 {
3237 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3238 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3239 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3240 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3241 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3242 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3243 }
3244
3245 if (TARGET_POWERPC64)
3246 {
3247 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3248 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3249 }
3250
3251 if (TARGET_P8_VECTOR) /* SFmode */
3252 {
3253 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3254 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3255 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3256 }
3257 else if (TARGET_VSX)
3258 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3259
3260 if (TARGET_STFIWX)
3261 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3262
3263 if (TARGET_LFIWZX)
3264 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3265
3266 if (TARGET_FLOAT128_TYPE)
3267 {
3268 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3269 if (FLOAT128_IEEE_P (TFmode))
3270 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3271 }
3272
3273 if (TARGET_P9_VECTOR)
3274 {
3275 /* Support for new D-form instructions. */
3276 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3277
3278 /* Support for ISA 3.0 (power9) vectors. */
3279 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3280 }
3281
3282 /* Support for new direct moves (ISA 3.0 + 64bit). */
3283 if (TARGET_DIRECT_MOVE_128)
3284 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3285
3286 /* Support small integers in VSX registers. */
3287 if (TARGET_P8_VECTOR)
3288 {
3289 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3290 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3291 if (TARGET_P9_VECTOR)
3292 {
3293 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3294 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3295 }
3296 }
3297
3298 /* Set up the reload helper and direct move functions. */
3299 if (TARGET_VSX || TARGET_ALTIVEC)
3300 {
3301 if (TARGET_64BIT)
3302 {
3303 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3304 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3305 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3306 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3307 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3308 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3309 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3310 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3311 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3312 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3313 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3314 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3315 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3316 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3317 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3318 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3319 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3320 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3321 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3322 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3323
3324 if (FLOAT128_VECTOR_P (KFmode))
3325 {
3326 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3327 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3328 }
3329
3330 if (FLOAT128_VECTOR_P (TFmode))
3331 {
3332 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3333 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3334 }
3335
3336 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3337 available. */
3338 if (TARGET_NO_SDMODE_STACK)
3339 {
3340 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3341 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3342 }
3343
3344 if (TARGET_VSX)
3345 {
3346 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3347 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3348 }
3349
3350 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3351 {
3352 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3353 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3354 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3355 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3356 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3357 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3358 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3359 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3360 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3361
3362 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3363 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3364 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3365 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3366 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3367 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3368 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3369 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3370 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3371
3372 if (FLOAT128_VECTOR_P (KFmode))
3373 {
3374 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3375 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3376 }
3377
3378 if (FLOAT128_VECTOR_P (TFmode))
3379 {
3380 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3381 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3382 }
3383 }
3384 }
3385 else
3386 {
3387 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3388 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3389 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3390 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3391 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3392 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3393 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3394 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3395 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3396 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3397 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3398 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3399 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3400 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3401 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3402 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3403 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3404 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3405 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3406 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3407
3408 if (FLOAT128_VECTOR_P (KFmode))
3409 {
3410 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3411 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3412 }
3413
3414 if (FLOAT128_IEEE_P (TFmode))
3415 {
3416 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3417 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3418 }
3419
3420 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3421 available. */
3422 if (TARGET_NO_SDMODE_STACK)
3423 {
3424 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3425 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3426 }
3427
3428 if (TARGET_VSX)
3429 {
3430 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3431 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3432 }
3433
3434 if (TARGET_DIRECT_MOVE)
3435 {
3436 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3437 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3438 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3439 }
3440 }
3441
3442 reg_addr[DFmode].scalar_in_vmx_p = true;
3443 reg_addr[DImode].scalar_in_vmx_p = true;
3444
3445 if (TARGET_P8_VECTOR)
3446 {
3447 reg_addr[SFmode].scalar_in_vmx_p = true;
3448 reg_addr[SImode].scalar_in_vmx_p = true;
3449
3450 if (TARGET_P9_VECTOR)
3451 {
3452 reg_addr[HImode].scalar_in_vmx_p = true;
3453 reg_addr[QImode].scalar_in_vmx_p = true;
3454 }
3455 }
3456 }
3457
3458 /* Precalculate HARD_REGNO_NREGS. */
3459 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3460 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3461 rs6000_hard_regno_nregs[m][r]
3462 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3463
3464 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3465 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3466 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3467 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3468 rs6000_hard_regno_mode_ok_p[m][r] = true;
3469
3470 /* Precalculate CLASS_MAX_NREGS sizes. */
3471 for (c = 0; c < LIM_REG_CLASSES; ++c)
3472 {
3473 int reg_size;
3474
3475 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3476 reg_size = UNITS_PER_VSX_WORD;
3477
3478 else if (c == ALTIVEC_REGS)
3479 reg_size = UNITS_PER_ALTIVEC_WORD;
3480
3481 else if (c == FLOAT_REGS)
3482 reg_size = UNITS_PER_FP_WORD;
3483
3484 else
3485 reg_size = UNITS_PER_WORD;
3486
3487 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3488 {
3489 machine_mode m2 = (machine_mode)m;
3490 int reg_size2 = reg_size;
3491
3492 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3493 in VSX. */
3494 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3495 reg_size2 = UNITS_PER_FP_WORD;
3496
3497 rs6000_class_max_nregs[m][c]
3498 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3499 }
3500 }
3501
3502 /* Calculate which modes to automatically generate code to use a the
3503 reciprocal divide and square root instructions. In the future, possibly
3504 automatically generate the instructions even if the user did not specify
3505 -mrecip. The older machines double precision reciprocal sqrt estimate is
3506 not accurate enough. */
3507 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3508 if (TARGET_FRES)
3509 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3510 if (TARGET_FRE)
3511 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3512 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3513 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3514 if (VECTOR_UNIT_VSX_P (V2DFmode))
3515 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3516
3517 if (TARGET_FRSQRTES)
3518 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3519 if (TARGET_FRSQRTE)
3520 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3521 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3522 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3523 if (VECTOR_UNIT_VSX_P (V2DFmode))
3524 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3525
3526 if (rs6000_recip_control)
3527 {
3528 if (!flag_finite_math_only)
3529 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3530 "-ffast-math");
3531 if (flag_trapping_math)
3532 warning (0, "%qs requires %qs or %qs", "-mrecip",
3533 "-fno-trapping-math", "-ffast-math");
3534 if (!flag_reciprocal_math)
3535 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3536 "-ffast-math");
3537 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3538 {
3539 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3540 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3541 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3542
3543 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3544 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3545 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546
3547 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3548 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3549 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3550
3551 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3552 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3553 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3554
3555 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3556 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3557 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3558
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3560 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3561 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3562
3563 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3564 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3565 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3566
3567 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3568 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3569 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3570 }
3571 }
3572
3573 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3574 legitimate address support to figure out the appropriate addressing to
3575 use. */
3576 rs6000_setup_reg_addr_masks ();
3577
3578 if (global_init_p || TARGET_DEBUG_TARGET)
3579 {
3580 if (TARGET_DEBUG_REG)
3581 rs6000_debug_reg_global ();
3582
3583 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3584 fprintf (stderr,
3585 "SImode variable mult cost = %d\n"
3586 "SImode constant mult cost = %d\n"
3587 "SImode short constant mult cost = %d\n"
3588 "DImode multipliciation cost = %d\n"
3589 "SImode division cost = %d\n"
3590 "DImode division cost = %d\n"
3591 "Simple fp operation cost = %d\n"
3592 "DFmode multiplication cost = %d\n"
3593 "SFmode division cost = %d\n"
3594 "DFmode division cost = %d\n"
3595 "cache line size = %d\n"
3596 "l1 cache size = %d\n"
3597 "l2 cache size = %d\n"
3598 "simultaneous prefetches = %d\n"
3599 "\n",
3600 rs6000_cost->mulsi,
3601 rs6000_cost->mulsi_const,
3602 rs6000_cost->mulsi_const9,
3603 rs6000_cost->muldi,
3604 rs6000_cost->divsi,
3605 rs6000_cost->divdi,
3606 rs6000_cost->fp,
3607 rs6000_cost->dmul,
3608 rs6000_cost->sdiv,
3609 rs6000_cost->ddiv,
3610 rs6000_cost->cache_line_size,
3611 rs6000_cost->l1_cache_size,
3612 rs6000_cost->l2_cache_size,
3613 rs6000_cost->simultaneous_prefetches);
3614 }
3615 }
3616
3617 #if TARGET_MACHO
3618 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3619
3620 static void
3621 darwin_rs6000_override_options (void)
3622 {
3623 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3624 off. */
3625 rs6000_altivec_abi = 1;
3626 TARGET_ALTIVEC_VRSAVE = 1;
3627 rs6000_current_abi = ABI_DARWIN;
3628
3629 if (DEFAULT_ABI == ABI_DARWIN
3630 && TARGET_64BIT)
3631 darwin_one_byte_bool = 1;
3632
3633 if (TARGET_64BIT && ! TARGET_POWERPC64)
3634 {
3635 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3636 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3637 }
3638 if (flag_mkernel)
3639 {
3640 rs6000_default_long_calls = 1;
3641 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3642 }
3643
3644 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3645 Altivec. */
3646 if (!flag_mkernel && !flag_apple_kext
3647 && TARGET_64BIT
3648 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3649 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3650
3651 /* Unless the user (not the configurer) has explicitly overridden
3652 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3653 G4 unless targeting the kernel. */
3654 if (!flag_mkernel
3655 && !flag_apple_kext
3656 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3657 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3658 && ! global_options_set.x_rs6000_cpu_index)
3659 {
3660 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3661 }
3662 }
3663 #endif
3664
3665 /* If not otherwise specified by a target, make 'long double' equivalent to
3666 'double'. */
3667
3668 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3669 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3670 #endif
3671
3672 /* Return the builtin mask of the various options used that could affect which
3673 builtins were used. In the past we used target_flags, but we've run out of
3674 bits, and some options are no longer in target_flags. */
3675
3676 HOST_WIDE_INT
3677 rs6000_builtin_mask_calculate (void)
3678 {
3679 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3680 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3681 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3682 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3683 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3684 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3685 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3686 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3687 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3688 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3689 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3690 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3691 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3692 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3693 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3694 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3695 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3696 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3697 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3698 | ((TARGET_LONG_DOUBLE_128
3699 && TARGET_HARD_FLOAT
3700 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3701 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3702 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3703 }
3704
3705 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3706 to clobber the XER[CA] bit because clobbering that bit without telling
3707 the compiler worked just fine with versions of GCC before GCC 5, and
3708 breaking a lot of older code in ways that are hard to track down is
3709 not such a great idea. */
3710
3711 static rtx_insn *
3712 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3713 vec<const char *> &/*constraints*/,
3714 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3715 {
3716 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3717 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3718 return NULL;
3719 }
3720
3721 /* Override command line options.
3722
3723 Combine build-specific configuration information with options
3724 specified on the command line to set various state variables which
3725 influence code generation, optimization, and expansion of built-in
3726 functions. Assure that command-line configuration preferences are
3727 compatible with each other and with the build configuration; issue
3728 warnings while adjusting configuration or error messages while
3729 rejecting configuration.
3730
3731 Upon entry to this function:
3732
3733 This function is called once at the beginning of
3734 compilation, and then again at the start and end of compiling
3735 each section of code that has a different configuration, as
3736 indicated, for example, by adding the
3737
3738 __attribute__((__target__("cpu=power9")))
3739
3740 qualifier to a function definition or, for example, by bracketing
3741 code between
3742
3743 #pragma GCC target("altivec")
3744
3745 and
3746
3747 #pragma GCC reset_options
3748
3749 directives. Parameter global_init_p is true for the initial
3750 invocation, which initializes global variables, and false for all
3751 subsequent invocations.
3752
3753
3754 Various global state information is assumed to be valid. This
3755 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3756 default CPU specified at build configure time, TARGET_DEFAULT,
3757 representing the default set of option flags for the default
3758 target, and global_options_set.x_rs6000_isa_flags, representing
3759 which options were requested on the command line.
3760
3761 Upon return from this function:
3762
3763 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3764 was set by name on the command line. Additionally, if certain
3765 attributes are automatically enabled or disabled by this function
3766 in order to assure compatibility between options and
3767 configuration, the flags associated with those attributes are
3768 also set. By setting these "explicit bits", we avoid the risk
3769 that other code might accidentally overwrite these particular
3770 attributes with "default values".
3771
3772 The various bits of rs6000_isa_flags are set to indicate the
3773 target options that have been selected for the most current
3774 compilation efforts. This has the effect of also turning on the
3775 associated TARGET_XXX values since these are macros which are
3776 generally defined to test the corresponding bit of the
3777 rs6000_isa_flags variable.
3778
3779 The variable rs6000_builtin_mask is set to represent the target
3780 options for the most current compilation efforts, consistent with
3781 the current contents of rs6000_isa_flags. This variable controls
3782 expansion of built-in functions.
3783
3784 Various other global variables and fields of global structures
3785 (over 50 in all) are initialized to reflect the desired options
3786 for the most current compilation efforts. */
3787
3788 static bool
3789 rs6000_option_override_internal (bool global_init_p)
3790 {
3791 bool ret = true;
3792
3793 HOST_WIDE_INT set_masks;
3794 HOST_WIDE_INT ignore_masks;
3795 int cpu_index = -1;
3796 int tune_index;
3797 struct cl_target_option *main_target_opt
3798 = ((global_init_p || target_option_default_node == NULL)
3799 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3800
3801 /* Print defaults. */
3802 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3803 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3804
3805 /* Remember the explicit arguments. */
3806 if (global_init_p)
3807 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3808
3809 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3810 library functions, so warn about it. The flag may be useful for
3811 performance studies from time to time though, so don't disable it
3812 entirely. */
3813 if (global_options_set.x_rs6000_alignment_flags
3814 && rs6000_alignment_flags == MASK_ALIGN_POWER
3815 && DEFAULT_ABI == ABI_DARWIN
3816 && TARGET_64BIT)
3817 warning (0, "%qs is not supported for 64-bit Darwin;"
3818 " it is incompatible with the installed C and C++ libraries",
3819 "-malign-power");
3820
3821 /* Numerous experiment shows that IRA based loop pressure
3822 calculation works better for RTL loop invariant motion on targets
3823 with enough (>= 32) registers. It is an expensive optimization.
3824 So it is on only for peak performance. */
3825 if (optimize >= 3 && global_init_p
3826 && !global_options_set.x_flag_ira_loop_pressure)
3827 flag_ira_loop_pressure = 1;
3828
3829 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3830 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3831 options were already specified. */
3832 if (flag_sanitize & SANITIZE_USER_ADDRESS
3833 && !global_options_set.x_flag_asynchronous_unwind_tables)
3834 flag_asynchronous_unwind_tables = 1;
3835
3836 /* Set the pointer size. */
3837 if (TARGET_64BIT)
3838 {
3839 rs6000_pmode = DImode;
3840 rs6000_pointer_size = 64;
3841 }
3842 else
3843 {
3844 rs6000_pmode = SImode;
3845 rs6000_pointer_size = 32;
3846 }
3847
3848 /* Some OSs don't support saving the high part of 64-bit registers on context
3849 switch. Other OSs don't support saving Altivec registers. On those OSs,
3850 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3851 if the user wants either, the user must explicitly specify them and we
3852 won't interfere with the user's specification. */
3853
3854 set_masks = POWERPC_MASKS;
3855 #ifdef OS_MISSING_POWERPC64
3856 if (OS_MISSING_POWERPC64)
3857 set_masks &= ~OPTION_MASK_POWERPC64;
3858 #endif
3859 #ifdef OS_MISSING_ALTIVEC
3860 if (OS_MISSING_ALTIVEC)
3861 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3862 | OTHER_VSX_VECTOR_MASKS);
3863 #endif
3864
3865 /* Don't override by the processor default if given explicitly. */
3866 set_masks &= ~rs6000_isa_flags_explicit;
3867
3868 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3869 the cpu in a target attribute or pragma, but did not specify a tuning
3870 option, use the cpu for the tuning option rather than the option specified
3871 with -mtune on the command line. Process a '--with-cpu' configuration
3872 request as an implicit --cpu. */
3873 if (rs6000_cpu_index >= 0)
3874 cpu_index = rs6000_cpu_index;
3875 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3876 cpu_index = main_target_opt->x_rs6000_cpu_index;
3877 else if (OPTION_TARGET_CPU_DEFAULT)
3878 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3879
3880 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3881 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3882 with those from the cpu, except for options that were explicitly set. If
3883 we don't have a cpu, do not override the target bits set in
3884 TARGET_DEFAULT. */
3885 if (cpu_index >= 0)
3886 {
3887 rs6000_cpu_index = cpu_index;
3888 rs6000_isa_flags &= ~set_masks;
3889 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3890 & set_masks);
3891 }
3892 else
3893 {
3894 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3895 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3896 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3897 to using rs6000_isa_flags, we need to do the initialization here.
3898
3899 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3900 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3901 HOST_WIDE_INT flags;
3902 if (TARGET_DEFAULT)
3903 flags = TARGET_DEFAULT;
3904 else
3905 {
3906 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3907 const char *default_cpu = (!TARGET_POWERPC64
3908 ? "powerpc"
3909 : (BYTES_BIG_ENDIAN
3910 ? "powerpc64"
3911 : "powerpc64le"));
3912 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3913 flags = processor_target_table[default_cpu_index].target_enable;
3914 }
3915 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3916 }
3917
3918 if (rs6000_tune_index >= 0)
3919 tune_index = rs6000_tune_index;
3920 else if (cpu_index >= 0)
3921 rs6000_tune_index = tune_index = cpu_index;
3922 else
3923 {
3924 size_t i;
3925 enum processor_type tune_proc
3926 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3927
3928 tune_index = -1;
3929 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3930 if (processor_target_table[i].processor == tune_proc)
3931 {
3932 tune_index = i;
3933 break;
3934 }
3935 }
3936
3937 if (cpu_index >= 0)
3938 rs6000_cpu = processor_target_table[cpu_index].processor;
3939 else
3940 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3941
3942 gcc_assert (tune_index >= 0);
3943 rs6000_tune = processor_target_table[tune_index].processor;
3944
3945 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3946 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3947 || rs6000_cpu == PROCESSOR_PPCE5500)
3948 {
3949 if (TARGET_ALTIVEC)
3950 error ("AltiVec not supported in this target");
3951 }
3952
3953 /* If we are optimizing big endian systems for space, use the load/store
3954 multiple instructions. */
3955 if (BYTES_BIG_ENDIAN && optimize_size)
3956 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3957
3958 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3959 because the hardware doesn't support the instructions used in little
3960 endian mode, and causes an alignment trap. The 750 does not cause an
3961 alignment trap (except when the target is unaligned). */
3962
3963 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3964 {
3965 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3966 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3967 warning (0, "%qs is not supported on little endian systems",
3968 "-mmultiple");
3969 }
3970
3971 /* If little-endian, default to -mstrict-align on older processors.
3972 Testing for htm matches power8 and later. */
3973 if (!BYTES_BIG_ENDIAN
3974 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3975 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3976
3977 if (!rs6000_fold_gimple)
3978 fprintf (stderr,
3979 "gimple folding of rs6000 builtins has been disabled.\n");
3980
3981 /* Add some warnings for VSX. */
3982 if (TARGET_VSX)
3983 {
3984 const char *msg = NULL;
3985 if (!TARGET_HARD_FLOAT)
3986 {
3987 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3988 msg = N_("-mvsx requires hardware floating point");
3989 else
3990 {
3991 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3992 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3993 }
3994 }
3995 else if (TARGET_AVOID_XFORM > 0)
3996 msg = N_("-mvsx needs indexed addressing");
3997 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3998 & OPTION_MASK_ALTIVEC))
3999 {
4000 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4001 msg = N_("-mvsx and -mno-altivec are incompatible");
4002 else
4003 msg = N_("-mno-altivec disables vsx");
4004 }
4005
4006 if (msg)
4007 {
4008 warning (0, msg);
4009 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4010 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4011 }
4012 }
4013
4014 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4015 the -mcpu setting to enable options that conflict. */
4016 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4017 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4018 | OPTION_MASK_ALTIVEC
4019 | OPTION_MASK_VSX)) != 0)
4020 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4021 | OPTION_MASK_DIRECT_MOVE)
4022 & ~rs6000_isa_flags_explicit);
4023
4024 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4025 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4026
4027 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4028 off all of the options that depend on those flags. */
4029 ignore_masks = rs6000_disable_incompatible_switches ();
4030
4031 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4032 unless the user explicitly used the -mno-<option> to disable the code. */
4033 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4034 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4035 else if (TARGET_P9_MINMAX)
4036 {
4037 if (cpu_index >= 0)
4038 {
4039 if (cpu_index == PROCESSOR_POWER9)
4040 {
4041 /* legacy behavior: allow -mcpu=power9 with certain
4042 capabilities explicitly disabled. */
4043 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4044 }
4045 else
4046 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4047 "for <xxx> less than power9", "-mcpu");
4048 }
4049 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4050 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4051 & rs6000_isa_flags_explicit))
4052 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4053 were explicitly cleared. */
4054 error ("%qs incompatible with explicitly disabled options",
4055 "-mpower9-minmax");
4056 else
4057 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4058 }
4059 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4060 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4061 else if (TARGET_VSX)
4062 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4063 else if (TARGET_POPCNTD)
4064 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4065 else if (TARGET_DFP)
4066 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4067 else if (TARGET_CMPB)
4068 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4069 else if (TARGET_FPRND)
4070 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4071 else if (TARGET_POPCNTB)
4072 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4073 else if (TARGET_ALTIVEC)
4074 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4075
4076 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4077 {
4078 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4079 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4080 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4081 }
4082
4083 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4084 {
4085 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4086 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4087 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4088 }
4089
4090 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4091 {
4092 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4093 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4094 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4095 }
4096
4097 if (TARGET_P8_VECTOR && !TARGET_VSX)
4098 {
4099 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4100 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4101 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4102 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4103 {
4104 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4105 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4106 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4107 }
4108 else
4109 {
4110 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4111 not explicit. */
4112 rs6000_isa_flags |= OPTION_MASK_VSX;
4113 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4114 }
4115 }
4116
4117 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4118 {
4119 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4120 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4121 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4122 }
4123
4124 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4125 silently turn off quad memory mode. */
4126 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4127 {
4128 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4129 warning (0, N_("-mquad-memory requires 64-bit mode"));
4130
4131 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4132 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4133
4134 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4135 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4136 }
4137
4138 /* Non-atomic quad memory load/store are disabled for little endian, since
4139 the words are reversed, but atomic operations can still be done by
4140 swapping the words. */
4141 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4142 {
4143 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4144 warning (0, N_("-mquad-memory is not available in little endian "
4145 "mode"));
4146
4147 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4148 }
4149
4150 /* Assume if the user asked for normal quad memory instructions, they want
4151 the atomic versions as well, unless they explicity told us not to use quad
4152 word atomic instructions. */
4153 if (TARGET_QUAD_MEMORY
4154 && !TARGET_QUAD_MEMORY_ATOMIC
4155 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4156 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4157
4158 /* If we can shrink-wrap the TOC register save separately, then use
4159 -msave-toc-indirect unless explicitly disabled. */
4160 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4161 && flag_shrink_wrap_separate
4162 && optimize_function_for_speed_p (cfun))
4163 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4164
4165 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4166 generating power8 instructions. Power9 does not optimize power8 fusion
4167 cases. */
4168 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4169 {
4170 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4171 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4172 else
4173 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4174 }
4175
4176 /* Setting additional fusion flags turns on base fusion. */
4177 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4178 {
4179 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4180 {
4181 if (TARGET_P8_FUSION_SIGN)
4182 error ("%qs requires %qs", "-mpower8-fusion-sign",
4183 "-mpower8-fusion");
4184
4185 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4186 }
4187 else
4188 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4189 }
4190
4191 /* Power8 does not fuse sign extended loads with the addis. If we are
4192 optimizing at high levels for speed, convert a sign extended load into a
4193 zero extending load, and an explicit sign extension. */
4194 if (TARGET_P8_FUSION
4195 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4196 && optimize_function_for_speed_p (cfun)
4197 && optimize >= 3)
4198 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4199
4200 /* ISA 3.0 vector instructions include ISA 2.07. */
4201 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4202 {
4203 /* We prefer to not mention undocumented options in
4204 error messages. However, if users have managed to select
4205 power9-vector without selecting power8-vector, they
4206 already know about undocumented flags. */
4207 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4208 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4209 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4210 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4211 {
4212 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4213 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4214 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4215 }
4216 else
4217 {
4218 /* OPTION_MASK_P9_VECTOR is explicit and
4219 OPTION_MASK_P8_VECTOR is not explicit. */
4220 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4221 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4222 }
4223 }
4224
4225 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4226 support. If we only have ISA 2.06 support, and the user did not specify
4227 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4228 but we don't enable the full vectorization support */
4229 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4230 TARGET_ALLOW_MOVMISALIGN = 1;
4231
4232 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4233 {
4234 if (TARGET_ALLOW_MOVMISALIGN > 0
4235 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4236 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4237
4238 TARGET_ALLOW_MOVMISALIGN = 0;
4239 }
4240
4241 /* Determine when unaligned vector accesses are permitted, and when
4242 they are preferred over masked Altivec loads. Note that if
4243 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4244 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4245 not true. */
4246 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4247 {
4248 if (!TARGET_VSX)
4249 {
4250 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4251 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4252
4253 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4254 }
4255
4256 else if (!TARGET_ALLOW_MOVMISALIGN)
4257 {
4258 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4259 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4260 "-mallow-movmisalign");
4261
4262 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4263 }
4264 }
4265
4266 /* Use long double size to select the appropriate long double. We use
4267 TYPE_PRECISION to differentiate the 3 different long double types. We map
4268 128 into the precision used for TFmode. */
4269 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4270 ? 64
4271 : FLOAT_PRECISION_TFmode);
4272
4273 /* Set long double size before the IEEE 128-bit tests. */
4274 if (!global_options_set.x_rs6000_long_double_type_size)
4275 {
4276 if (main_target_opt != NULL
4277 && (main_target_opt->x_rs6000_long_double_type_size
4278 != default_long_double_size))
4279 error ("target attribute or pragma changes long double size");
4280 else
4281 rs6000_long_double_type_size = default_long_double_size;
4282 }
4283 else if (rs6000_long_double_type_size == 128)
4284 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4285
4286 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4287 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4288 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4289 those systems will not pick up this default. Warn if the user changes the
4290 default unless -Wno-psabi. */
4291 if (!global_options_set.x_rs6000_ieeequad)
4292 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4293
4294 else if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4295 {
4296 static bool warned_change_long_double;
4297 if (!warned_change_long_double)
4298 {
4299 warned_change_long_double = true;
4300 if (TARGET_IEEEQUAD)
4301 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4302 else
4303 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4304 }
4305 }
4306
4307 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4308 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4309 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4310 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4311 the keyword as well as the type. */
4312 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4313
4314 /* IEEE 128-bit floating point requires VSX support. */
4315 if (TARGET_FLOAT128_KEYWORD)
4316 {
4317 if (!TARGET_VSX)
4318 {
4319 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4320 error ("%qs requires VSX support", "-mfloat128");
4321
4322 TARGET_FLOAT128_TYPE = 0;
4323 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4324 | OPTION_MASK_FLOAT128_HW);
4325 }
4326 else if (!TARGET_FLOAT128_TYPE)
4327 {
4328 TARGET_FLOAT128_TYPE = 1;
4329 warning (0, "The -mfloat128 option may not be fully supported");
4330 }
4331 }
4332
4333 /* Enable the __float128 keyword under Linux by default. */
4334 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4335 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4336 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4337
4338 /* If we have are supporting the float128 type and full ISA 3.0 support,
4339 enable -mfloat128-hardware by default. However, don't enable the
4340 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4341 because sometimes the compiler wants to put things in an integer
4342 container, and if we don't have __int128 support, it is impossible. */
4343 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4344 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4345 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4346 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4347
4348 if (TARGET_FLOAT128_HW
4349 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4350 {
4351 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4352 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4353
4354 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4355 }
4356
4357 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4358 {
4359 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4360 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4361
4362 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4363 }
4364
4365 /* Print the options after updating the defaults. */
4366 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4367 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4368
4369 /* E500mc does "better" if we inline more aggressively. Respect the
4370 user's opinion, though. */
4371 if (rs6000_block_move_inline_limit == 0
4372 && (rs6000_tune == PROCESSOR_PPCE500MC
4373 || rs6000_tune == PROCESSOR_PPCE500MC64
4374 || rs6000_tune == PROCESSOR_PPCE5500
4375 || rs6000_tune == PROCESSOR_PPCE6500))
4376 rs6000_block_move_inline_limit = 128;
4377
4378 /* store_one_arg depends on expand_block_move to handle at least the
4379 size of reg_parm_stack_space. */
4380 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4381 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4382
4383 if (global_init_p)
4384 {
4385 /* If the appropriate debug option is enabled, replace the target hooks
4386 with debug versions that call the real version and then prints
4387 debugging information. */
4388 if (TARGET_DEBUG_COST)
4389 {
4390 targetm.rtx_costs = rs6000_debug_rtx_costs;
4391 targetm.address_cost = rs6000_debug_address_cost;
4392 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4393 }
4394
4395 if (TARGET_DEBUG_ADDR)
4396 {
4397 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4398 targetm.legitimize_address = rs6000_debug_legitimize_address;
4399 rs6000_secondary_reload_class_ptr
4400 = rs6000_debug_secondary_reload_class;
4401 targetm.secondary_memory_needed
4402 = rs6000_debug_secondary_memory_needed;
4403 targetm.can_change_mode_class
4404 = rs6000_debug_can_change_mode_class;
4405 rs6000_preferred_reload_class_ptr
4406 = rs6000_debug_preferred_reload_class;
4407 rs6000_legitimize_reload_address_ptr
4408 = rs6000_debug_legitimize_reload_address;
4409 rs6000_mode_dependent_address_ptr
4410 = rs6000_debug_mode_dependent_address;
4411 }
4412
4413 if (rs6000_veclibabi_name)
4414 {
4415 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4416 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4417 else
4418 {
4419 error ("unknown vectorization library ABI type (%qs) for "
4420 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4421 ret = false;
4422 }
4423 }
4424 }
4425
4426 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4427 target attribute or pragma which automatically enables both options,
4428 unless the altivec ABI was set. This is set by default for 64-bit, but
4429 not for 32-bit. */
4430 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4431 {
4432 TARGET_FLOAT128_TYPE = 0;
4433 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4434 | OPTION_MASK_FLOAT128_KEYWORD)
4435 & ~rs6000_isa_flags_explicit);
4436 }
4437
4438 /* Enable Altivec ABI for AIX -maltivec. */
4439 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4440 {
4441 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4442 error ("target attribute or pragma changes AltiVec ABI");
4443 else
4444 rs6000_altivec_abi = 1;
4445 }
4446
4447 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4448 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4449 be explicitly overridden in either case. */
4450 if (TARGET_ELF)
4451 {
4452 if (!global_options_set.x_rs6000_altivec_abi
4453 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4454 {
4455 if (main_target_opt != NULL &&
4456 !main_target_opt->x_rs6000_altivec_abi)
4457 error ("target attribute or pragma changes AltiVec ABI");
4458 else
4459 rs6000_altivec_abi = 1;
4460 }
4461 }
4462
4463 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4464 So far, the only darwin64 targets are also MACH-O. */
4465 if (TARGET_MACHO
4466 && DEFAULT_ABI == ABI_DARWIN
4467 && TARGET_64BIT)
4468 {
4469 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4470 error ("target attribute or pragma changes darwin64 ABI");
4471 else
4472 {
4473 rs6000_darwin64_abi = 1;
4474 /* Default to natural alignment, for better performance. */
4475 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4476 }
4477 }
4478
4479 /* Place FP constants in the constant pool instead of TOC
4480 if section anchors enabled. */
4481 if (flag_section_anchors
4482 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4483 TARGET_NO_FP_IN_TOC = 1;
4484
4485 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4486 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4487
4488 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4489 SUBTARGET_OVERRIDE_OPTIONS;
4490 #endif
4491 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4492 SUBSUBTARGET_OVERRIDE_OPTIONS;
4493 #endif
4494 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4495 SUB3TARGET_OVERRIDE_OPTIONS;
4496 #endif
4497
4498 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4499 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4500
4501 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4502 && rs6000_tune != PROCESSOR_POWER5
4503 && rs6000_tune != PROCESSOR_POWER6
4504 && rs6000_tune != PROCESSOR_POWER7
4505 && rs6000_tune != PROCESSOR_POWER8
4506 && rs6000_tune != PROCESSOR_POWER9
4507 && rs6000_tune != PROCESSOR_PPCA2
4508 && rs6000_tune != PROCESSOR_CELL
4509 && rs6000_tune != PROCESSOR_PPC476);
4510 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4511 || rs6000_tune == PROCESSOR_POWER5
4512 || rs6000_tune == PROCESSOR_POWER7
4513 || rs6000_tune == PROCESSOR_POWER8);
4514 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4515 || rs6000_tune == PROCESSOR_POWER5
4516 || rs6000_tune == PROCESSOR_POWER6
4517 || rs6000_tune == PROCESSOR_POWER7
4518 || rs6000_tune == PROCESSOR_POWER8
4519 || rs6000_tune == PROCESSOR_POWER9
4520 || rs6000_tune == PROCESSOR_PPCE500MC
4521 || rs6000_tune == PROCESSOR_PPCE500MC64
4522 || rs6000_tune == PROCESSOR_PPCE5500
4523 || rs6000_tune == PROCESSOR_PPCE6500);
4524
4525 /* Allow debug switches to override the above settings. These are set to -1
4526 in rs6000.opt to indicate the user hasn't directly set the switch. */
4527 if (TARGET_ALWAYS_HINT >= 0)
4528 rs6000_always_hint = TARGET_ALWAYS_HINT;
4529
4530 if (TARGET_SCHED_GROUPS >= 0)
4531 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4532
4533 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4534 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4535
4536 rs6000_sched_restricted_insns_priority
4537 = (rs6000_sched_groups ? 1 : 0);
4538
4539 /* Handle -msched-costly-dep option. */
4540 rs6000_sched_costly_dep
4541 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4542
4543 if (rs6000_sched_costly_dep_str)
4544 {
4545 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4546 rs6000_sched_costly_dep = no_dep_costly;
4547 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4548 rs6000_sched_costly_dep = all_deps_costly;
4549 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4550 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4551 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4552 rs6000_sched_costly_dep = store_to_load_dep_costly;
4553 else
4554 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4555 atoi (rs6000_sched_costly_dep_str));
4556 }
4557
4558 /* Handle -minsert-sched-nops option. */
4559 rs6000_sched_insert_nops
4560 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4561
4562 if (rs6000_sched_insert_nops_str)
4563 {
4564 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4565 rs6000_sched_insert_nops = sched_finish_none;
4566 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4567 rs6000_sched_insert_nops = sched_finish_pad_groups;
4568 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4569 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4570 else
4571 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4572 atoi (rs6000_sched_insert_nops_str));
4573 }
4574
4575 /* Handle stack protector */
4576 if (!global_options_set.x_rs6000_stack_protector_guard)
4577 #ifdef TARGET_THREAD_SSP_OFFSET
4578 rs6000_stack_protector_guard = SSP_TLS;
4579 #else
4580 rs6000_stack_protector_guard = SSP_GLOBAL;
4581 #endif
4582
4583 #ifdef TARGET_THREAD_SSP_OFFSET
4584 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4585 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4586 #endif
4587
4588 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4589 {
4590 char *endp;
4591 const char *str = rs6000_stack_protector_guard_offset_str;
4592
4593 errno = 0;
4594 long offset = strtol (str, &endp, 0);
4595 if (!*str || *endp || errno)
4596 error ("%qs is not a valid number in %qs", str,
4597 "-mstack-protector-guard-offset=");
4598
4599 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4600 || (TARGET_64BIT && (offset & 3)))
4601 error ("%qs is not a valid offset in %qs", str,
4602 "-mstack-protector-guard-offset=");
4603
4604 rs6000_stack_protector_guard_offset = offset;
4605 }
4606
4607 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4608 {
4609 const char *str = rs6000_stack_protector_guard_reg_str;
4610 int reg = decode_reg_name (str);
4611
4612 if (!IN_RANGE (reg, 1, 31))
4613 error ("%qs is not a valid base register in %qs", str,
4614 "-mstack-protector-guard-reg=");
4615
4616 rs6000_stack_protector_guard_reg = reg;
4617 }
4618
4619 if (rs6000_stack_protector_guard == SSP_TLS
4620 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4621 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4622
4623 if (global_init_p)
4624 {
4625 #ifdef TARGET_REGNAMES
4626 /* If the user desires alternate register names, copy in the
4627 alternate names now. */
4628 if (TARGET_REGNAMES)
4629 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4630 #endif
4631
4632 /* Set aix_struct_return last, after the ABI is determined.
4633 If -maix-struct-return or -msvr4-struct-return was explicitly
4634 used, don't override with the ABI default. */
4635 if (!global_options_set.x_aix_struct_return)
4636 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4637
4638 #if 0
4639 /* IBM XL compiler defaults to unsigned bitfields. */
4640 if (TARGET_XL_COMPAT)
4641 flag_signed_bitfields = 0;
4642 #endif
4643
4644 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4645 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4646
4647 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4648
4649 /* We can only guarantee the availability of DI pseudo-ops when
4650 assembling for 64-bit targets. */
4651 if (!TARGET_64BIT)
4652 {
4653 targetm.asm_out.aligned_op.di = NULL;
4654 targetm.asm_out.unaligned_op.di = NULL;
4655 }
4656
4657
4658 /* Set branch target alignment, if not optimizing for size. */
4659 if (!optimize_size)
4660 {
4661 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4662 aligned 8byte to avoid misprediction by the branch predictor. */
4663 if (rs6000_tune == PROCESSOR_TITAN
4664 || rs6000_tune == PROCESSOR_CELL)
4665 {
4666 if (flag_align_functions && !str_align_functions)
4667 str_align_functions = "8";
4668 if (flag_align_jumps && !str_align_jumps)
4669 str_align_jumps = "8";
4670 if (flag_align_loops && !str_align_loops)
4671 str_align_loops = "8";
4672 }
4673 if (rs6000_align_branch_targets)
4674 {
4675 if (flag_align_functions && !str_align_functions)
4676 str_align_functions = "16";
4677 if (flag_align_jumps && !str_align_jumps)
4678 str_align_jumps = "16";
4679 if (flag_align_loops && !str_align_loops)
4680 {
4681 can_override_loop_align = 1;
4682 str_align_loops = "16";
4683 }
4684 }
4685
4686 if (flag_align_jumps && !str_align_jumps)
4687 str_align_jumps = "16";
4688 if (flag_align_loops && !str_align_loops)
4689 str_align_loops = "16";
4690 }
4691
4692 /* Arrange to save and restore machine status around nested functions. */
4693 init_machine_status = rs6000_init_machine_status;
4694
4695 /* We should always be splitting complex arguments, but we can't break
4696 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4697 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4698 targetm.calls.split_complex_arg = NULL;
4699
4700 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4701 if (DEFAULT_ABI == ABI_AIX)
4702 targetm.calls.custom_function_descriptors = 0;
4703 }
4704
4705 /* Initialize rs6000_cost with the appropriate target costs. */
4706 if (optimize_size)
4707 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4708 else
4709 switch (rs6000_tune)
4710 {
4711 case PROCESSOR_RS64A:
4712 rs6000_cost = &rs64a_cost;
4713 break;
4714
4715 case PROCESSOR_MPCCORE:
4716 rs6000_cost = &mpccore_cost;
4717 break;
4718
4719 case PROCESSOR_PPC403:
4720 rs6000_cost = &ppc403_cost;
4721 break;
4722
4723 case PROCESSOR_PPC405:
4724 rs6000_cost = &ppc405_cost;
4725 break;
4726
4727 case PROCESSOR_PPC440:
4728 rs6000_cost = &ppc440_cost;
4729 break;
4730
4731 case PROCESSOR_PPC476:
4732 rs6000_cost = &ppc476_cost;
4733 break;
4734
4735 case PROCESSOR_PPC601:
4736 rs6000_cost = &ppc601_cost;
4737 break;
4738
4739 case PROCESSOR_PPC603:
4740 rs6000_cost = &ppc603_cost;
4741 break;
4742
4743 case PROCESSOR_PPC604:
4744 rs6000_cost = &ppc604_cost;
4745 break;
4746
4747 case PROCESSOR_PPC604e:
4748 rs6000_cost = &ppc604e_cost;
4749 break;
4750
4751 case PROCESSOR_PPC620:
4752 rs6000_cost = &ppc620_cost;
4753 break;
4754
4755 case PROCESSOR_PPC630:
4756 rs6000_cost = &ppc630_cost;
4757 break;
4758
4759 case PROCESSOR_CELL:
4760 rs6000_cost = &ppccell_cost;
4761 break;
4762
4763 case PROCESSOR_PPC750:
4764 case PROCESSOR_PPC7400:
4765 rs6000_cost = &ppc750_cost;
4766 break;
4767
4768 case PROCESSOR_PPC7450:
4769 rs6000_cost = &ppc7450_cost;
4770 break;
4771
4772 case PROCESSOR_PPC8540:
4773 case PROCESSOR_PPC8548:
4774 rs6000_cost = &ppc8540_cost;
4775 break;
4776
4777 case PROCESSOR_PPCE300C2:
4778 case PROCESSOR_PPCE300C3:
4779 rs6000_cost = &ppce300c2c3_cost;
4780 break;
4781
4782 case PROCESSOR_PPCE500MC:
4783 rs6000_cost = &ppce500mc_cost;
4784 break;
4785
4786 case PROCESSOR_PPCE500MC64:
4787 rs6000_cost = &ppce500mc64_cost;
4788 break;
4789
4790 case PROCESSOR_PPCE5500:
4791 rs6000_cost = &ppce5500_cost;
4792 break;
4793
4794 case PROCESSOR_PPCE6500:
4795 rs6000_cost = &ppce6500_cost;
4796 break;
4797
4798 case PROCESSOR_TITAN:
4799 rs6000_cost = &titan_cost;
4800 break;
4801
4802 case PROCESSOR_POWER4:
4803 case PROCESSOR_POWER5:
4804 rs6000_cost = &power4_cost;
4805 break;
4806
4807 case PROCESSOR_POWER6:
4808 rs6000_cost = &power6_cost;
4809 break;
4810
4811 case PROCESSOR_POWER7:
4812 rs6000_cost = &power7_cost;
4813 break;
4814
4815 case PROCESSOR_POWER8:
4816 rs6000_cost = &power8_cost;
4817 break;
4818
4819 case PROCESSOR_POWER9:
4820 rs6000_cost = &power9_cost;
4821 break;
4822
4823 case PROCESSOR_PPCA2:
4824 rs6000_cost = &ppca2_cost;
4825 break;
4826
4827 default:
4828 gcc_unreachable ();
4829 }
4830
4831 if (global_init_p)
4832 {
4833 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4834 rs6000_cost->simultaneous_prefetches,
4835 global_options.x_param_values,
4836 global_options_set.x_param_values);
4837 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4838 global_options.x_param_values,
4839 global_options_set.x_param_values);
4840 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4841 rs6000_cost->cache_line_size,
4842 global_options.x_param_values,
4843 global_options_set.x_param_values);
4844 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4845 global_options.x_param_values,
4846 global_options_set.x_param_values);
4847
4848 /* Increase loop peeling limits based on performance analysis. */
4849 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4850 global_options.x_param_values,
4851 global_options_set.x_param_values);
4852 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4853 global_options.x_param_values,
4854 global_options_set.x_param_values);
4855
4856 /* Use the 'model' -fsched-pressure algorithm by default. */
4857 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4858 SCHED_PRESSURE_MODEL,
4859 global_options.x_param_values,
4860 global_options_set.x_param_values);
4861
4862 /* If using typedef char *va_list, signal that
4863 __builtin_va_start (&ap, 0) can be optimized to
4864 ap = __builtin_next_arg (0). */
4865 if (DEFAULT_ABI != ABI_V4)
4866 targetm.expand_builtin_va_start = NULL;
4867 }
4868
4869 /* If not explicitly specified via option, decide whether to generate indexed
4870 load/store instructions. A value of -1 indicates that the
4871 initial value of this variable has not been overwritten. During
4872 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4873 if (TARGET_AVOID_XFORM == -1)
4874 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4875 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4876 need indexed accesses and the type used is the scalar type of the element
4877 being loaded or stored. */
4878 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4879 && !TARGET_ALTIVEC);
4880
4881 /* Set the -mrecip options. */
4882 if (rs6000_recip_name)
4883 {
4884 char *p = ASTRDUP (rs6000_recip_name);
4885 char *q;
4886 unsigned int mask, i;
4887 bool invert;
4888
4889 while ((q = strtok (p, ",")) != NULL)
4890 {
4891 p = NULL;
4892 if (*q == '!')
4893 {
4894 invert = true;
4895 q++;
4896 }
4897 else
4898 invert = false;
4899
4900 if (!strcmp (q, "default"))
4901 mask = ((TARGET_RECIP_PRECISION)
4902 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4903 else
4904 {
4905 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4906 if (!strcmp (q, recip_options[i].string))
4907 {
4908 mask = recip_options[i].mask;
4909 break;
4910 }
4911
4912 if (i == ARRAY_SIZE (recip_options))
4913 {
4914 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4915 invert = false;
4916 mask = 0;
4917 ret = false;
4918 }
4919 }
4920
4921 if (invert)
4922 rs6000_recip_control &= ~mask;
4923 else
4924 rs6000_recip_control |= mask;
4925 }
4926 }
4927
4928 /* Set the builtin mask of the various options used that could affect which
4929 builtins were used. In the past we used target_flags, but we've run out
4930 of bits, and some options are no longer in target_flags. */
4931 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4932 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4933 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4934 rs6000_builtin_mask);
4935
4936 /* Initialize all of the registers. */
4937 rs6000_init_hard_regno_mode_ok (global_init_p);
4938
4939 /* Save the initial options in case the user does function specific options */
4940 if (global_init_p)
4941 target_option_default_node = target_option_current_node
4942 = build_target_option_node (&global_options);
4943
4944 /* If not explicitly specified via option, decide whether to generate the
4945 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4946 if (TARGET_LINK_STACK == -1)
4947 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4948
4949 /* Deprecate use of -mno-speculate-indirect-jumps. */
4950 if (!rs6000_speculate_indirect_jumps)
4951 warning (0, "%qs is deprecated and not recommended in any circumstances",
4952 "-mno-speculate-indirect-jumps");
4953
4954 return ret;
4955 }
4956
4957 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4958 define the target cpu type. */
4959
4960 static void
4961 rs6000_option_override (void)
4962 {
4963 (void) rs6000_option_override_internal (true);
4964 }
4965
4966 \f
4967 /* Implement targetm.vectorize.builtin_mask_for_load. */
4968 static tree
4969 rs6000_builtin_mask_for_load (void)
4970 {
4971 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4972 if ((TARGET_ALTIVEC && !TARGET_VSX)
4973 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4974 return altivec_builtin_mask_for_load;
4975 else
4976 return 0;
4977 }
4978
4979 /* Implement LOOP_ALIGN. */
4980 align_flags
4981 rs6000_loop_align (rtx label)
4982 {
4983 basic_block bb;
4984 int ninsns;
4985
4986 /* Don't override loop alignment if -falign-loops was specified. */
4987 if (!can_override_loop_align)
4988 return align_loops;
4989
4990 bb = BLOCK_FOR_INSN (label);
4991 ninsns = num_loop_insns(bb->loop_father);
4992
4993 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4994 if (ninsns > 4 && ninsns <= 8
4995 && (rs6000_tune == PROCESSOR_POWER4
4996 || rs6000_tune == PROCESSOR_POWER5
4997 || rs6000_tune == PROCESSOR_POWER6
4998 || rs6000_tune == PROCESSOR_POWER7
4999 || rs6000_tune == PROCESSOR_POWER8))
5000 return align_flags (5);
5001 else
5002 return align_loops;
5003 }
5004
5005 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5006 after applying N number of iterations. This routine does not determine
5007 how may iterations are required to reach desired alignment. */
5008
5009 static bool
5010 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5011 {
5012 if (is_packed)
5013 return false;
5014
5015 if (TARGET_32BIT)
5016 {
5017 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5018 return true;
5019
5020 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5021 return true;
5022
5023 return false;
5024 }
5025 else
5026 {
5027 if (TARGET_MACHO)
5028 return false;
5029
5030 /* Assuming that all other types are naturally aligned. CHECKME! */
5031 return true;
5032 }
5033 }
5034
5035 /* Return true if the vector misalignment factor is supported by the
5036 target. */
5037 static bool
5038 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5039 const_tree type,
5040 int misalignment,
5041 bool is_packed)
5042 {
5043 if (TARGET_VSX)
5044 {
5045 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5046 return true;
5047
5048 /* Return if movmisalign pattern is not supported for this mode. */
5049 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5050 return false;
5051
5052 if (misalignment == -1)
5053 {
5054 /* Misalignment factor is unknown at compile time but we know
5055 it's word aligned. */
5056 if (rs6000_vector_alignment_reachable (type, is_packed))
5057 {
5058 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5059
5060 if (element_size == 64 || element_size == 32)
5061 return true;
5062 }
5063
5064 return false;
5065 }
5066
5067 /* VSX supports word-aligned vector. */
5068 if (misalignment % 4 == 0)
5069 return true;
5070 }
5071 return false;
5072 }
5073
5074 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5075 static int
5076 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5077 tree vectype, int misalign)
5078 {
5079 unsigned elements;
5080 tree elem_type;
5081
5082 switch (type_of_cost)
5083 {
5084 case scalar_stmt:
5085 case scalar_load:
5086 case scalar_store:
5087 case vector_stmt:
5088 case vector_load:
5089 case vector_store:
5090 case vec_to_scalar:
5091 case scalar_to_vec:
5092 case cond_branch_not_taken:
5093 return 1;
5094
5095 case vec_perm:
5096 if (TARGET_VSX)
5097 return 3;
5098 else
5099 return 1;
5100
5101 case vec_promote_demote:
5102 if (TARGET_VSX)
5103 return 4;
5104 else
5105 return 1;
5106
5107 case cond_branch_taken:
5108 return 3;
5109
5110 case unaligned_load:
5111 case vector_gather_load:
5112 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5113 return 1;
5114
5115 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5116 {
5117 elements = TYPE_VECTOR_SUBPARTS (vectype);
5118 if (elements == 2)
5119 /* Double word aligned. */
5120 return 2;
5121
5122 if (elements == 4)
5123 {
5124 switch (misalign)
5125 {
5126 case 8:
5127 /* Double word aligned. */
5128 return 2;
5129
5130 case -1:
5131 /* Unknown misalignment. */
5132 case 4:
5133 case 12:
5134 /* Word aligned. */
5135 return 22;
5136
5137 default:
5138 gcc_unreachable ();
5139 }
5140 }
5141 }
5142
5143 if (TARGET_ALTIVEC)
5144 /* Misaligned loads are not supported. */
5145 gcc_unreachable ();
5146
5147 return 2;
5148
5149 case unaligned_store:
5150 case vector_scatter_store:
5151 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5152 return 1;
5153
5154 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5155 {
5156 elements = TYPE_VECTOR_SUBPARTS (vectype);
5157 if (elements == 2)
5158 /* Double word aligned. */
5159 return 2;
5160
5161 if (elements == 4)
5162 {
5163 switch (misalign)
5164 {
5165 case 8:
5166 /* Double word aligned. */
5167 return 2;
5168
5169 case -1:
5170 /* Unknown misalignment. */
5171 case 4:
5172 case 12:
5173 /* Word aligned. */
5174 return 23;
5175
5176 default:
5177 gcc_unreachable ();
5178 }
5179 }
5180 }
5181
5182 if (TARGET_ALTIVEC)
5183 /* Misaligned stores are not supported. */
5184 gcc_unreachable ();
5185
5186 return 2;
5187
5188 case vec_construct:
5189 /* This is a rough approximation assuming non-constant elements
5190 constructed into a vector via element insertion. FIXME:
5191 vec_construct is not granular enough for uniformly good
5192 decisions. If the initialization is a splat, this is
5193 cheaper than we estimate. Improve this someday. */
5194 elem_type = TREE_TYPE (vectype);
5195 /* 32-bit vectors loaded into registers are stored as double
5196 precision, so we need 2 permutes, 2 converts, and 1 merge
5197 to construct a vector of short floats from them. */
5198 if (SCALAR_FLOAT_TYPE_P (elem_type)
5199 && TYPE_PRECISION (elem_type) == 32)
5200 return 5;
5201 /* On POWER9, integer vector types are built up in GPRs and then
5202 use a direct move (2 cycles). For POWER8 this is even worse,
5203 as we need two direct moves and a merge, and the direct moves
5204 are five cycles. */
5205 else if (INTEGRAL_TYPE_P (elem_type))
5206 {
5207 if (TARGET_P9_VECTOR)
5208 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5209 else
5210 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5211 }
5212 else
5213 /* V2DFmode doesn't need a direct move. */
5214 return 2;
5215
5216 default:
5217 gcc_unreachable ();
5218 }
5219 }
5220
5221 /* Implement targetm.vectorize.preferred_simd_mode. */
5222
5223 static machine_mode
5224 rs6000_preferred_simd_mode (scalar_mode mode)
5225 {
5226 if (TARGET_VSX)
5227 switch (mode)
5228 {
5229 case E_DFmode:
5230 return V2DFmode;
5231 default:;
5232 }
5233 if (TARGET_ALTIVEC || TARGET_VSX)
5234 switch (mode)
5235 {
5236 case E_SFmode:
5237 return V4SFmode;
5238 case E_TImode:
5239 return V1TImode;
5240 case E_DImode:
5241 return V2DImode;
5242 case E_SImode:
5243 return V4SImode;
5244 case E_HImode:
5245 return V8HImode;
5246 case E_QImode:
5247 return V16QImode;
5248 default:;
5249 }
5250 return word_mode;
5251 }
5252
5253 typedef struct _rs6000_cost_data
5254 {
5255 struct loop *loop_info;
5256 unsigned cost[3];
5257 } rs6000_cost_data;
5258
5259 /* Test for likely overcommitment of vector hardware resources. If a
5260 loop iteration is relatively large, and too large a percentage of
5261 instructions in the loop are vectorized, the cost model may not
5262 adequately reflect delays from unavailable vector resources.
5263 Penalize the loop body cost for this case. */
5264
5265 static void
5266 rs6000_density_test (rs6000_cost_data *data)
5267 {
5268 const int DENSITY_PCT_THRESHOLD = 85;
5269 const int DENSITY_SIZE_THRESHOLD = 70;
5270 const int DENSITY_PENALTY = 10;
5271 struct loop *loop = data->loop_info;
5272 basic_block *bbs = get_loop_body (loop);
5273 int nbbs = loop->num_nodes;
5274 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5275 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5276 int i, density_pct;
5277
5278 for (i = 0; i < nbbs; i++)
5279 {
5280 basic_block bb = bbs[i];
5281 gimple_stmt_iterator gsi;
5282
5283 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5284 {
5285 gimple *stmt = gsi_stmt (gsi);
5286 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5287
5288 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5289 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5290 not_vec_cost++;
5291 }
5292 }
5293
5294 free (bbs);
5295 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5296
5297 if (density_pct > DENSITY_PCT_THRESHOLD
5298 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5299 {
5300 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5301 if (dump_enabled_p ())
5302 dump_printf_loc (MSG_NOTE, vect_location,
5303 "density %d%%, cost %d exceeds threshold, penalizing "
5304 "loop body cost by %d%%", density_pct,
5305 vec_cost + not_vec_cost, DENSITY_PENALTY);
5306 }
5307 }
5308
5309 /* Implement targetm.vectorize.init_cost. */
5310
5311 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5312 instruction is needed by the vectorization. */
5313 static bool rs6000_vect_nonmem;
5314
5315 static void *
5316 rs6000_init_cost (struct loop *loop_info)
5317 {
5318 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5319 data->loop_info = loop_info;
5320 data->cost[vect_prologue] = 0;
5321 data->cost[vect_body] = 0;
5322 data->cost[vect_epilogue] = 0;
5323 rs6000_vect_nonmem = false;
5324 return data;
5325 }
5326
5327 /* Implement targetm.vectorize.add_stmt_cost. */
5328
5329 static unsigned
5330 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5331 struct _stmt_vec_info *stmt_info, int misalign,
5332 enum vect_cost_model_location where)
5333 {
5334 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5335 unsigned retval = 0;
5336
5337 if (flag_vect_cost_model)
5338 {
5339 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5340 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5341 misalign);
5342 /* Statements in an inner loop relative to the loop being
5343 vectorized are weighted more heavily. The value here is
5344 arbitrary and could potentially be improved with analysis. */
5345 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5346 count *= 50; /* FIXME. */
5347
5348 retval = (unsigned) (count * stmt_cost);
5349 cost_data->cost[where] += retval;
5350
5351 /* Check whether we're doing something other than just a copy loop.
5352 Not all such loops may be profitably vectorized; see
5353 rs6000_finish_cost. */
5354 if ((kind == vec_to_scalar || kind == vec_perm
5355 || kind == vec_promote_demote || kind == vec_construct
5356 || kind == scalar_to_vec)
5357 || (where == vect_body && kind == vector_stmt))
5358 rs6000_vect_nonmem = true;
5359 }
5360
5361 return retval;
5362 }
5363
5364 /* Implement targetm.vectorize.finish_cost. */
5365
5366 static void
5367 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5368 unsigned *body_cost, unsigned *epilogue_cost)
5369 {
5370 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5371
5372 if (cost_data->loop_info)
5373 rs6000_density_test (cost_data);
5374
5375 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5376 that require versioning for any reason. The vectorization is at
5377 best a wash inside the loop, and the versioning checks make
5378 profitability highly unlikely and potentially quite harmful. */
5379 if (cost_data->loop_info)
5380 {
5381 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5382 if (!rs6000_vect_nonmem
5383 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5384 && LOOP_REQUIRES_VERSIONING (vec_info))
5385 cost_data->cost[vect_body] += 10000;
5386 }
5387
5388 *prologue_cost = cost_data->cost[vect_prologue];
5389 *body_cost = cost_data->cost[vect_body];
5390 *epilogue_cost = cost_data->cost[vect_epilogue];
5391 }
5392
5393 /* Implement targetm.vectorize.destroy_cost_data. */
5394
5395 static void
5396 rs6000_destroy_cost_data (void *data)
5397 {
5398 free (data);
5399 }
5400
5401 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5402 library with vectorized intrinsics. */
5403
5404 static tree
5405 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5406 tree type_in)
5407 {
5408 char name[32];
5409 const char *suffix = NULL;
5410 tree fntype, new_fndecl, bdecl = NULL_TREE;
5411 int n_args = 1;
5412 const char *bname;
5413 machine_mode el_mode, in_mode;
5414 int n, in_n;
5415
5416 /* Libmass is suitable for unsafe math only as it does not correctly support
5417 parts of IEEE with the required precision such as denormals. Only support
5418 it if we have VSX to use the simd d2 or f4 functions.
5419 XXX: Add variable length support. */
5420 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5421 return NULL_TREE;
5422
5423 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5424 n = TYPE_VECTOR_SUBPARTS (type_out);
5425 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5426 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5427 if (el_mode != in_mode
5428 || n != in_n)
5429 return NULL_TREE;
5430
5431 switch (fn)
5432 {
5433 CASE_CFN_ATAN2:
5434 CASE_CFN_HYPOT:
5435 CASE_CFN_POW:
5436 n_args = 2;
5437 gcc_fallthrough ();
5438
5439 CASE_CFN_ACOS:
5440 CASE_CFN_ACOSH:
5441 CASE_CFN_ASIN:
5442 CASE_CFN_ASINH:
5443 CASE_CFN_ATAN:
5444 CASE_CFN_ATANH:
5445 CASE_CFN_CBRT:
5446 CASE_CFN_COS:
5447 CASE_CFN_COSH:
5448 CASE_CFN_ERF:
5449 CASE_CFN_ERFC:
5450 CASE_CFN_EXP2:
5451 CASE_CFN_EXP:
5452 CASE_CFN_EXPM1:
5453 CASE_CFN_LGAMMA:
5454 CASE_CFN_LOG10:
5455 CASE_CFN_LOG1P:
5456 CASE_CFN_LOG2:
5457 CASE_CFN_LOG:
5458 CASE_CFN_SIN:
5459 CASE_CFN_SINH:
5460 CASE_CFN_SQRT:
5461 CASE_CFN_TAN:
5462 CASE_CFN_TANH:
5463 if (el_mode == DFmode && n == 2)
5464 {
5465 bdecl = mathfn_built_in (double_type_node, fn);
5466 suffix = "d2"; /* pow -> powd2 */
5467 }
5468 else if (el_mode == SFmode && n == 4)
5469 {
5470 bdecl = mathfn_built_in (float_type_node, fn);
5471 suffix = "4"; /* powf -> powf4 */
5472 }
5473 else
5474 return NULL_TREE;
5475 if (!bdecl)
5476 return NULL_TREE;
5477 break;
5478
5479 default:
5480 return NULL_TREE;
5481 }
5482
5483 gcc_assert (suffix != NULL);
5484 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5485 if (!bname)
5486 return NULL_TREE;
5487
5488 strcpy (name, bname + sizeof ("__builtin_") - 1);
5489 strcat (name, suffix);
5490
5491 if (n_args == 1)
5492 fntype = build_function_type_list (type_out, type_in, NULL);
5493 else if (n_args == 2)
5494 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5495 else
5496 gcc_unreachable ();
5497
5498 /* Build a function declaration for the vectorized function. */
5499 new_fndecl = build_decl (BUILTINS_LOCATION,
5500 FUNCTION_DECL, get_identifier (name), fntype);
5501 TREE_PUBLIC (new_fndecl) = 1;
5502 DECL_EXTERNAL (new_fndecl) = 1;
5503 DECL_IS_NOVOPS (new_fndecl) = 1;
5504 TREE_READONLY (new_fndecl) = 1;
5505
5506 return new_fndecl;
5507 }
5508
5509 /* Returns a function decl for a vectorized version of the builtin function
5510 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5511 if it is not available. */
5512
5513 static tree
5514 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5515 tree type_in)
5516 {
5517 machine_mode in_mode, out_mode;
5518 int in_n, out_n;
5519
5520 if (TARGET_DEBUG_BUILTIN)
5521 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5522 combined_fn_name (combined_fn (fn)),
5523 GET_MODE_NAME (TYPE_MODE (type_out)),
5524 GET_MODE_NAME (TYPE_MODE (type_in)));
5525
5526 if (TREE_CODE (type_out) != VECTOR_TYPE
5527 || TREE_CODE (type_in) != VECTOR_TYPE)
5528 return NULL_TREE;
5529
5530 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5531 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5532 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5533 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5534
5535 switch (fn)
5536 {
5537 CASE_CFN_COPYSIGN:
5538 if (VECTOR_UNIT_VSX_P (V2DFmode)
5539 && out_mode == DFmode && out_n == 2
5540 && in_mode == DFmode && in_n == 2)
5541 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5542 if (VECTOR_UNIT_VSX_P (V4SFmode)
5543 && out_mode == SFmode && out_n == 4
5544 && in_mode == SFmode && in_n == 4)
5545 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5546 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5547 && out_mode == SFmode && out_n == 4
5548 && in_mode == SFmode && in_n == 4)
5549 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5550 break;
5551 CASE_CFN_CEIL:
5552 if (VECTOR_UNIT_VSX_P (V2DFmode)
5553 && out_mode == DFmode && out_n == 2
5554 && in_mode == DFmode && in_n == 2)
5555 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5556 if (VECTOR_UNIT_VSX_P (V4SFmode)
5557 && out_mode == SFmode && out_n == 4
5558 && in_mode == SFmode && in_n == 4)
5559 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5560 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5561 && out_mode == SFmode && out_n == 4
5562 && in_mode == SFmode && in_n == 4)
5563 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5564 break;
5565 CASE_CFN_FLOOR:
5566 if (VECTOR_UNIT_VSX_P (V2DFmode)
5567 && out_mode == DFmode && out_n == 2
5568 && in_mode == DFmode && in_n == 2)
5569 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5570 if (VECTOR_UNIT_VSX_P (V4SFmode)
5571 && out_mode == SFmode && out_n == 4
5572 && in_mode == SFmode && in_n == 4)
5573 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5574 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5575 && out_mode == SFmode && out_n == 4
5576 && in_mode == SFmode && in_n == 4)
5577 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5578 break;
5579 CASE_CFN_FMA:
5580 if (VECTOR_UNIT_VSX_P (V2DFmode)
5581 && out_mode == DFmode && out_n == 2
5582 && in_mode == DFmode && in_n == 2)
5583 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5584 if (VECTOR_UNIT_VSX_P (V4SFmode)
5585 && out_mode == SFmode && out_n == 4
5586 && in_mode == SFmode && in_n == 4)
5587 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5588 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5589 && out_mode == SFmode && out_n == 4
5590 && in_mode == SFmode && in_n == 4)
5591 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5592 break;
5593 CASE_CFN_TRUNC:
5594 if (VECTOR_UNIT_VSX_P (V2DFmode)
5595 && out_mode == DFmode && out_n == 2
5596 && in_mode == DFmode && in_n == 2)
5597 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5598 if (VECTOR_UNIT_VSX_P (V4SFmode)
5599 && out_mode == SFmode && out_n == 4
5600 && in_mode == SFmode && in_n == 4)
5601 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5602 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5603 && out_mode == SFmode && out_n == 4
5604 && in_mode == SFmode && in_n == 4)
5605 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5606 break;
5607 CASE_CFN_NEARBYINT:
5608 if (VECTOR_UNIT_VSX_P (V2DFmode)
5609 && flag_unsafe_math_optimizations
5610 && out_mode == DFmode && out_n == 2
5611 && in_mode == DFmode && in_n == 2)
5612 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5613 if (VECTOR_UNIT_VSX_P (V4SFmode)
5614 && flag_unsafe_math_optimizations
5615 && out_mode == SFmode && out_n == 4
5616 && in_mode == SFmode && in_n == 4)
5617 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5618 break;
5619 CASE_CFN_RINT:
5620 if (VECTOR_UNIT_VSX_P (V2DFmode)
5621 && !flag_trapping_math
5622 && out_mode == DFmode && out_n == 2
5623 && in_mode == DFmode && in_n == 2)
5624 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5625 if (VECTOR_UNIT_VSX_P (V4SFmode)
5626 && !flag_trapping_math
5627 && out_mode == SFmode && out_n == 4
5628 && in_mode == SFmode && in_n == 4)
5629 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5630 break;
5631 default:
5632 break;
5633 }
5634
5635 /* Generate calls to libmass if appropriate. */
5636 if (rs6000_veclib_handler)
5637 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5638
5639 return NULL_TREE;
5640 }
5641
5642 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5643
5644 static tree
5645 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5646 tree type_in)
5647 {
5648 machine_mode in_mode, out_mode;
5649 int in_n, out_n;
5650
5651 if (TARGET_DEBUG_BUILTIN)
5652 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5653 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5654 GET_MODE_NAME (TYPE_MODE (type_out)),
5655 GET_MODE_NAME (TYPE_MODE (type_in)));
5656
5657 if (TREE_CODE (type_out) != VECTOR_TYPE
5658 || TREE_CODE (type_in) != VECTOR_TYPE)
5659 return NULL_TREE;
5660
5661 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5662 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5663 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5664 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5665
5666 enum rs6000_builtins fn
5667 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5668 switch (fn)
5669 {
5670 case RS6000_BUILTIN_RSQRTF:
5671 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5672 && out_mode == SFmode && out_n == 4
5673 && in_mode == SFmode && in_n == 4)
5674 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5675 break;
5676 case RS6000_BUILTIN_RSQRT:
5677 if (VECTOR_UNIT_VSX_P (V2DFmode)
5678 && out_mode == DFmode && out_n == 2
5679 && in_mode == DFmode && in_n == 2)
5680 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5681 break;
5682 case RS6000_BUILTIN_RECIPF:
5683 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5684 && out_mode == SFmode && out_n == 4
5685 && in_mode == SFmode && in_n == 4)
5686 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5687 break;
5688 case RS6000_BUILTIN_RECIP:
5689 if (VECTOR_UNIT_VSX_P (V2DFmode)
5690 && out_mode == DFmode && out_n == 2
5691 && in_mode == DFmode && in_n == 2)
5692 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5693 break;
5694 default:
5695 break;
5696 }
5697 return NULL_TREE;
5698 }
5699 \f
5700 /* Default CPU string for rs6000*_file_start functions. */
5701 static const char *rs6000_default_cpu;
5702
5703 /* Do anything needed at the start of the asm file. */
5704
5705 static void
5706 rs6000_file_start (void)
5707 {
5708 char buffer[80];
5709 const char *start = buffer;
5710 FILE *file = asm_out_file;
5711
5712 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5713
5714 default_file_start ();
5715
5716 if (flag_verbose_asm)
5717 {
5718 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5719
5720 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5721 {
5722 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5723 start = "";
5724 }
5725
5726 if (global_options_set.x_rs6000_cpu_index)
5727 {
5728 fprintf (file, "%s -mcpu=%s", start,
5729 processor_target_table[rs6000_cpu_index].name);
5730 start = "";
5731 }
5732
5733 if (global_options_set.x_rs6000_tune_index)
5734 {
5735 fprintf (file, "%s -mtune=%s", start,
5736 processor_target_table[rs6000_tune_index].name);
5737 start = "";
5738 }
5739
5740 if (PPC405_ERRATUM77)
5741 {
5742 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5743 start = "";
5744 }
5745
5746 #ifdef USING_ELFOS_H
5747 switch (rs6000_sdata)
5748 {
5749 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5750 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5751 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5752 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5753 }
5754
5755 if (rs6000_sdata && g_switch_value)
5756 {
5757 fprintf (file, "%s -G %d", start,
5758 g_switch_value);
5759 start = "";
5760 }
5761 #endif
5762
5763 if (*start == '\0')
5764 putc ('\n', file);
5765 }
5766
5767 #ifdef USING_ELFOS_H
5768 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5769 && !global_options_set.x_rs6000_cpu_index)
5770 {
5771 fputs ("\t.machine ", asm_out_file);
5772 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5773 fputs ("power9\n", asm_out_file);
5774 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5775 fputs ("power8\n", asm_out_file);
5776 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5777 fputs ("power7\n", asm_out_file);
5778 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5779 fputs ("power6\n", asm_out_file);
5780 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5781 fputs ("power5\n", asm_out_file);
5782 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5783 fputs ("power4\n", asm_out_file);
5784 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5785 fputs ("ppc64\n", asm_out_file);
5786 else
5787 fputs ("ppc\n", asm_out_file);
5788 }
5789 #endif
5790
5791 if (DEFAULT_ABI == ABI_ELFv2)
5792 fprintf (file, "\t.abiversion 2\n");
5793 }
5794
5795 \f
5796 /* Return nonzero if this function is known to have a null epilogue. */
5797
5798 int
5799 direct_return (void)
5800 {
5801 if (reload_completed)
5802 {
5803 rs6000_stack_t *info = rs6000_stack_info ();
5804
5805 if (info->first_gp_reg_save == 32
5806 && info->first_fp_reg_save == 64
5807 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5808 && ! info->lr_save_p
5809 && ! info->cr_save_p
5810 && info->vrsave_size == 0
5811 && ! info->push_p)
5812 return 1;
5813 }
5814
5815 return 0;
5816 }
5817
5818 /* Return the number of instructions it takes to form a constant in an
5819 integer register. */
5820
5821 int
5822 num_insns_constant_wide (HOST_WIDE_INT value)
5823 {
5824 /* signed constant loadable with addi */
5825 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5826 return 1;
5827
5828 /* constant loadable with addis */
5829 else if ((value & 0xffff) == 0
5830 && (value >> 31 == -1 || value >> 31 == 0))
5831 return 1;
5832
5833 else if (TARGET_POWERPC64)
5834 {
5835 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5836 HOST_WIDE_INT high = value >> 31;
5837
5838 if (high == 0 || high == -1)
5839 return 2;
5840
5841 high >>= 1;
5842
5843 if (low == 0)
5844 return num_insns_constant_wide (high) + 1;
5845 else if (high == 0)
5846 return num_insns_constant_wide (low) + 1;
5847 else
5848 return (num_insns_constant_wide (high)
5849 + num_insns_constant_wide (low) + 1);
5850 }
5851
5852 else
5853 return 2;
5854 }
5855
5856 int
5857 num_insns_constant (rtx op, machine_mode mode)
5858 {
5859 HOST_WIDE_INT low, high;
5860
5861 switch (GET_CODE (op))
5862 {
5863 case CONST_INT:
5864 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5865 && rs6000_is_valid_and_mask (op, mode))
5866 return 2;
5867 else
5868 return num_insns_constant_wide (INTVAL (op));
5869
5870 case CONST_WIDE_INT:
5871 {
5872 int i;
5873 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5874 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5875 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5876 return ins;
5877 }
5878
5879 case CONST_DOUBLE:
5880 if (mode == SFmode || mode == SDmode)
5881 {
5882 long l;
5883
5884 if (DECIMAL_FLOAT_MODE_P (mode))
5885 REAL_VALUE_TO_TARGET_DECIMAL32
5886 (*CONST_DOUBLE_REAL_VALUE (op), l);
5887 else
5888 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5889 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5890 }
5891
5892 long l[2];
5893 if (DECIMAL_FLOAT_MODE_P (mode))
5894 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
5895 else
5896 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5897 high = l[WORDS_BIG_ENDIAN == 0];
5898 low = l[WORDS_BIG_ENDIAN != 0];
5899
5900 if (TARGET_32BIT)
5901 return (num_insns_constant_wide (low)
5902 + num_insns_constant_wide (high));
5903 else
5904 {
5905 if ((high == 0 && low >= 0)
5906 || (high == -1 && low < 0))
5907 return num_insns_constant_wide (low);
5908
5909 else if (rs6000_is_valid_and_mask (op, mode))
5910 return 2;
5911
5912 else if (low == 0)
5913 return num_insns_constant_wide (high) + 1;
5914
5915 else
5916 return (num_insns_constant_wide (high)
5917 + num_insns_constant_wide (low) + 1);
5918 }
5919
5920 default:
5921 gcc_unreachable ();
5922 }
5923 }
5924
5925 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5926 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5927 corresponding element of the vector, but for V4SFmode, the
5928 corresponding "float" is interpreted as an SImode integer. */
5929
5930 HOST_WIDE_INT
5931 const_vector_elt_as_int (rtx op, unsigned int elt)
5932 {
5933 rtx tmp;
5934
5935 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5936 gcc_assert (GET_MODE (op) != V2DImode
5937 && GET_MODE (op) != V2DFmode);
5938
5939 tmp = CONST_VECTOR_ELT (op, elt);
5940 if (GET_MODE (op) == V4SFmode)
5941 tmp = gen_lowpart (SImode, tmp);
5942 return INTVAL (tmp);
5943 }
5944
5945 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5946 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5947 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5948 all items are set to the same value and contain COPIES replicas of the
5949 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5950 operand and the others are set to the value of the operand's msb. */
5951
5952 static bool
5953 vspltis_constant (rtx op, unsigned step, unsigned copies)
5954 {
5955 machine_mode mode = GET_MODE (op);
5956 machine_mode inner = GET_MODE_INNER (mode);
5957
5958 unsigned i;
5959 unsigned nunits;
5960 unsigned bitsize;
5961 unsigned mask;
5962
5963 HOST_WIDE_INT val;
5964 HOST_WIDE_INT splat_val;
5965 HOST_WIDE_INT msb_val;
5966
5967 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5968 return false;
5969
5970 nunits = GET_MODE_NUNITS (mode);
5971 bitsize = GET_MODE_BITSIZE (inner);
5972 mask = GET_MODE_MASK (inner);
5973
5974 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5975 splat_val = val;
5976 msb_val = val >= 0 ? 0 : -1;
5977
5978 /* Construct the value to be splatted, if possible. If not, return 0. */
5979 for (i = 2; i <= copies; i *= 2)
5980 {
5981 HOST_WIDE_INT small_val;
5982 bitsize /= 2;
5983 small_val = splat_val >> bitsize;
5984 mask >>= bitsize;
5985 if (splat_val != ((HOST_WIDE_INT)
5986 ((unsigned HOST_WIDE_INT) small_val << bitsize)
5987 | (small_val & mask)))
5988 return false;
5989 splat_val = small_val;
5990 }
5991
5992 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5993 if (EASY_VECTOR_15 (splat_val))
5994 ;
5995
5996 /* Also check if we can splat, and then add the result to itself. Do so if
5997 the value is positive, of if the splat instruction is using OP's mode;
5998 for splat_val < 0, the splat and the add should use the same mode. */
5999 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6000 && (splat_val >= 0 || (step == 1 && copies == 1)))
6001 ;
6002
6003 /* Also check if are loading up the most significant bit which can be done by
6004 loading up -1 and shifting the value left by -1. */
6005 else if (EASY_VECTOR_MSB (splat_val, inner))
6006 ;
6007
6008 else
6009 return false;
6010
6011 /* Check if VAL is present in every STEP-th element, and the
6012 other elements are filled with its most significant bit. */
6013 for (i = 1; i < nunits; ++i)
6014 {
6015 HOST_WIDE_INT desired_val;
6016 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6017 if ((i & (step - 1)) == 0)
6018 desired_val = val;
6019 else
6020 desired_val = msb_val;
6021
6022 if (desired_val != const_vector_elt_as_int (op, elt))
6023 return false;
6024 }
6025
6026 return true;
6027 }
6028
6029 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6030 instruction, filling in the bottom elements with 0 or -1.
6031
6032 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6033 for the number of zeroes to shift in, or negative for the number of 0xff
6034 bytes to shift in.
6035
6036 OP is a CONST_VECTOR. */
6037
6038 int
6039 vspltis_shifted (rtx op)
6040 {
6041 machine_mode mode = GET_MODE (op);
6042 machine_mode inner = GET_MODE_INNER (mode);
6043
6044 unsigned i, j;
6045 unsigned nunits;
6046 unsigned mask;
6047
6048 HOST_WIDE_INT val;
6049
6050 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6051 return false;
6052
6053 /* We need to create pseudo registers to do the shift, so don't recognize
6054 shift vector constants after reload. */
6055 if (!can_create_pseudo_p ())
6056 return false;
6057
6058 nunits = GET_MODE_NUNITS (mode);
6059 mask = GET_MODE_MASK (inner);
6060
6061 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6062
6063 /* Check if the value can really be the operand of a vspltis[bhw]. */
6064 if (EASY_VECTOR_15 (val))
6065 ;
6066
6067 /* Also check if we are loading up the most significant bit which can be done
6068 by loading up -1 and shifting the value left by -1. */
6069 else if (EASY_VECTOR_MSB (val, inner))
6070 ;
6071
6072 else
6073 return 0;
6074
6075 /* Check if VAL is present in every STEP-th element until we find elements
6076 that are 0 or all 1 bits. */
6077 for (i = 1; i < nunits; ++i)
6078 {
6079 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6080 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6081
6082 /* If the value isn't the splat value, check for the remaining elements
6083 being 0/-1. */
6084 if (val != elt_val)
6085 {
6086 if (elt_val == 0)
6087 {
6088 for (j = i+1; j < nunits; ++j)
6089 {
6090 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6091 if (const_vector_elt_as_int (op, elt2) != 0)
6092 return 0;
6093 }
6094
6095 return (nunits - i) * GET_MODE_SIZE (inner);
6096 }
6097
6098 else if ((elt_val & mask) == mask)
6099 {
6100 for (j = i+1; j < nunits; ++j)
6101 {
6102 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6103 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6104 return 0;
6105 }
6106
6107 return -((nunits - i) * GET_MODE_SIZE (inner));
6108 }
6109
6110 else
6111 return 0;
6112 }
6113 }
6114
6115 /* If all elements are equal, we don't need to do VLSDOI. */
6116 return 0;
6117 }
6118
6119
6120 /* Return true if OP is of the given MODE and can be synthesized
6121 with a vspltisb, vspltish or vspltisw. */
6122
6123 bool
6124 easy_altivec_constant (rtx op, machine_mode mode)
6125 {
6126 unsigned step, copies;
6127
6128 if (mode == VOIDmode)
6129 mode = GET_MODE (op);
6130 else if (mode != GET_MODE (op))
6131 return false;
6132
6133 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6134 constants. */
6135 if (mode == V2DFmode)
6136 return zero_constant (op, mode);
6137
6138 else if (mode == V2DImode)
6139 {
6140 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6141 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6142 return false;
6143
6144 if (zero_constant (op, mode))
6145 return true;
6146
6147 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6148 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6149 return true;
6150
6151 return false;
6152 }
6153
6154 /* V1TImode is a special container for TImode. Ignore for now. */
6155 else if (mode == V1TImode)
6156 return false;
6157
6158 /* Start with a vspltisw. */
6159 step = GET_MODE_NUNITS (mode) / 4;
6160 copies = 1;
6161
6162 if (vspltis_constant (op, step, copies))
6163 return true;
6164
6165 /* Then try with a vspltish. */
6166 if (step == 1)
6167 copies <<= 1;
6168 else
6169 step >>= 1;
6170
6171 if (vspltis_constant (op, step, copies))
6172 return true;
6173
6174 /* And finally a vspltisb. */
6175 if (step == 1)
6176 copies <<= 1;
6177 else
6178 step >>= 1;
6179
6180 if (vspltis_constant (op, step, copies))
6181 return true;
6182
6183 if (vspltis_shifted (op) != 0)
6184 return true;
6185
6186 return false;
6187 }
6188
6189 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6190 result is OP. Abort if it is not possible. */
6191
6192 rtx
6193 gen_easy_altivec_constant (rtx op)
6194 {
6195 machine_mode mode = GET_MODE (op);
6196 int nunits = GET_MODE_NUNITS (mode);
6197 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6198 unsigned step = nunits / 4;
6199 unsigned copies = 1;
6200
6201 /* Start with a vspltisw. */
6202 if (vspltis_constant (op, step, copies))
6203 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6204
6205 /* Then try with a vspltish. */
6206 if (step == 1)
6207 copies <<= 1;
6208 else
6209 step >>= 1;
6210
6211 if (vspltis_constant (op, step, copies))
6212 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6213
6214 /* And finally a vspltisb. */
6215 if (step == 1)
6216 copies <<= 1;
6217 else
6218 step >>= 1;
6219
6220 if (vspltis_constant (op, step, copies))
6221 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6222
6223 gcc_unreachable ();
6224 }
6225
6226 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6227 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6228
6229 Return the number of instructions needed (1 or 2) into the address pointed
6230 via NUM_INSNS_PTR.
6231
6232 Return the constant that is being split via CONSTANT_PTR. */
6233
6234 bool
6235 xxspltib_constant_p (rtx op,
6236 machine_mode mode,
6237 int *num_insns_ptr,
6238 int *constant_ptr)
6239 {
6240 size_t nunits = GET_MODE_NUNITS (mode);
6241 size_t i;
6242 HOST_WIDE_INT value;
6243 rtx element;
6244
6245 /* Set the returned values to out of bound values. */
6246 *num_insns_ptr = -1;
6247 *constant_ptr = 256;
6248
6249 if (!TARGET_P9_VECTOR)
6250 return false;
6251
6252 if (mode == VOIDmode)
6253 mode = GET_MODE (op);
6254
6255 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6256 return false;
6257
6258 /* Handle (vec_duplicate <constant>). */
6259 if (GET_CODE (op) == VEC_DUPLICATE)
6260 {
6261 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6262 && mode != V2DImode)
6263 return false;
6264
6265 element = XEXP (op, 0);
6266 if (!CONST_INT_P (element))
6267 return false;
6268
6269 value = INTVAL (element);
6270 if (!IN_RANGE (value, -128, 127))
6271 return false;
6272 }
6273
6274 /* Handle (const_vector [...]). */
6275 else if (GET_CODE (op) == CONST_VECTOR)
6276 {
6277 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6278 && mode != V2DImode)
6279 return false;
6280
6281 element = CONST_VECTOR_ELT (op, 0);
6282 if (!CONST_INT_P (element))
6283 return false;
6284
6285 value = INTVAL (element);
6286 if (!IN_RANGE (value, -128, 127))
6287 return false;
6288
6289 for (i = 1; i < nunits; i++)
6290 {
6291 element = CONST_VECTOR_ELT (op, i);
6292 if (!CONST_INT_P (element))
6293 return false;
6294
6295 if (value != INTVAL (element))
6296 return false;
6297 }
6298 }
6299
6300 /* Handle integer constants being loaded into the upper part of the VSX
6301 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6302 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6303 else if (CONST_INT_P (op))
6304 {
6305 if (!SCALAR_INT_MODE_P (mode))
6306 return false;
6307
6308 value = INTVAL (op);
6309 if (!IN_RANGE (value, -128, 127))
6310 return false;
6311
6312 if (!IN_RANGE (value, -1, 0))
6313 {
6314 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6315 return false;
6316
6317 if (EASY_VECTOR_15 (value))
6318 return false;
6319 }
6320 }
6321
6322 else
6323 return false;
6324
6325 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6326 sign extend. Special case 0/-1 to allow getting any VSX register instead
6327 of an Altivec register. */
6328 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6329 && EASY_VECTOR_15 (value))
6330 return false;
6331
6332 /* Return # of instructions and the constant byte for XXSPLTIB. */
6333 if (mode == V16QImode)
6334 *num_insns_ptr = 1;
6335
6336 else if (IN_RANGE (value, -1, 0))
6337 *num_insns_ptr = 1;
6338
6339 else
6340 *num_insns_ptr = 2;
6341
6342 *constant_ptr = (int) value;
6343 return true;
6344 }
6345
6346 const char *
6347 output_vec_const_move (rtx *operands)
6348 {
6349 int shift;
6350 machine_mode mode;
6351 rtx dest, vec;
6352
6353 dest = operands[0];
6354 vec = operands[1];
6355 mode = GET_MODE (dest);
6356
6357 if (TARGET_VSX)
6358 {
6359 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6360 int xxspltib_value = 256;
6361 int num_insns = -1;
6362
6363 if (zero_constant (vec, mode))
6364 {
6365 if (TARGET_P9_VECTOR)
6366 return "xxspltib %x0,0";
6367
6368 else if (dest_vmx_p)
6369 return "vspltisw %0,0";
6370
6371 else
6372 return "xxlxor %x0,%x0,%x0";
6373 }
6374
6375 if (all_ones_constant (vec, mode))
6376 {
6377 if (TARGET_P9_VECTOR)
6378 return "xxspltib %x0,255";
6379
6380 else if (dest_vmx_p)
6381 return "vspltisw %0,-1";
6382
6383 else if (TARGET_P8_VECTOR)
6384 return "xxlorc %x0,%x0,%x0";
6385
6386 else
6387 gcc_unreachable ();
6388 }
6389
6390 if (TARGET_P9_VECTOR
6391 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6392 {
6393 if (num_insns == 1)
6394 {
6395 operands[2] = GEN_INT (xxspltib_value & 0xff);
6396 return "xxspltib %x0,%2";
6397 }
6398
6399 return "#";
6400 }
6401 }
6402
6403 if (TARGET_ALTIVEC)
6404 {
6405 rtx splat_vec;
6406
6407 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6408 if (zero_constant (vec, mode))
6409 return "vspltisw %0,0";
6410
6411 if (all_ones_constant (vec, mode))
6412 return "vspltisw %0,-1";
6413
6414 /* Do we need to construct a value using VSLDOI? */
6415 shift = vspltis_shifted (vec);
6416 if (shift != 0)
6417 return "#";
6418
6419 splat_vec = gen_easy_altivec_constant (vec);
6420 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6421 operands[1] = XEXP (splat_vec, 0);
6422 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6423 return "#";
6424
6425 switch (GET_MODE (splat_vec))
6426 {
6427 case E_V4SImode:
6428 return "vspltisw %0,%1";
6429
6430 case E_V8HImode:
6431 return "vspltish %0,%1";
6432
6433 case E_V16QImode:
6434 return "vspltisb %0,%1";
6435
6436 default:
6437 gcc_unreachable ();
6438 }
6439 }
6440
6441 gcc_unreachable ();
6442 }
6443
6444 /* Initialize vector TARGET to VALS. */
6445
6446 void
6447 rs6000_expand_vector_init (rtx target, rtx vals)
6448 {
6449 machine_mode mode = GET_MODE (target);
6450 machine_mode inner_mode = GET_MODE_INNER (mode);
6451 int n_elts = GET_MODE_NUNITS (mode);
6452 int n_var = 0, one_var = -1;
6453 bool all_same = true, all_const_zero = true;
6454 rtx x, mem;
6455 int i;
6456
6457 for (i = 0; i < n_elts; ++i)
6458 {
6459 x = XVECEXP (vals, 0, i);
6460 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6461 ++n_var, one_var = i;
6462 else if (x != CONST0_RTX (inner_mode))
6463 all_const_zero = false;
6464
6465 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6466 all_same = false;
6467 }
6468
6469 if (n_var == 0)
6470 {
6471 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6472 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6473 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6474 {
6475 /* Zero register. */
6476 emit_move_insn (target, CONST0_RTX (mode));
6477 return;
6478 }
6479 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6480 {
6481 /* Splat immediate. */
6482 emit_insn (gen_rtx_SET (target, const_vec));
6483 return;
6484 }
6485 else
6486 {
6487 /* Load from constant pool. */
6488 emit_move_insn (target, const_vec);
6489 return;
6490 }
6491 }
6492
6493 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6494 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6495 {
6496 rtx op[2];
6497 size_t i;
6498 size_t num_elements = all_same ? 1 : 2;
6499 for (i = 0; i < num_elements; i++)
6500 {
6501 op[i] = XVECEXP (vals, 0, i);
6502 /* Just in case there is a SUBREG with a smaller mode, do a
6503 conversion. */
6504 if (GET_MODE (op[i]) != inner_mode)
6505 {
6506 rtx tmp = gen_reg_rtx (inner_mode);
6507 convert_move (tmp, op[i], 0);
6508 op[i] = tmp;
6509 }
6510 /* Allow load with splat double word. */
6511 else if (MEM_P (op[i]))
6512 {
6513 if (!all_same)
6514 op[i] = force_reg (inner_mode, op[i]);
6515 }
6516 else if (!REG_P (op[i]))
6517 op[i] = force_reg (inner_mode, op[i]);
6518 }
6519
6520 if (all_same)
6521 {
6522 if (mode == V2DFmode)
6523 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6524 else
6525 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6526 }
6527 else
6528 {
6529 if (mode == V2DFmode)
6530 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6531 else
6532 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6533 }
6534 return;
6535 }
6536
6537 /* Special case initializing vector int if we are on 64-bit systems with
6538 direct move or we have the ISA 3.0 instructions. */
6539 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6540 && TARGET_DIRECT_MOVE_64BIT)
6541 {
6542 if (all_same)
6543 {
6544 rtx element0 = XVECEXP (vals, 0, 0);
6545 if (MEM_P (element0))
6546 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6547 else
6548 element0 = force_reg (SImode, element0);
6549
6550 if (TARGET_P9_VECTOR)
6551 emit_insn (gen_vsx_splat_v4si (target, element0));
6552 else
6553 {
6554 rtx tmp = gen_reg_rtx (DImode);
6555 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6556 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6557 }
6558 return;
6559 }
6560 else
6561 {
6562 rtx elements[4];
6563 size_t i;
6564
6565 for (i = 0; i < 4; i++)
6566 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6567
6568 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6569 elements[2], elements[3]));
6570 return;
6571 }
6572 }
6573
6574 /* With single precision floating point on VSX, know that internally single
6575 precision is actually represented as a double, and either make 2 V2DF
6576 vectors, and convert these vectors to single precision, or do one
6577 conversion, and splat the result to the other elements. */
6578 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6579 {
6580 if (all_same)
6581 {
6582 rtx element0 = XVECEXP (vals, 0, 0);
6583
6584 if (TARGET_P9_VECTOR)
6585 {
6586 if (MEM_P (element0))
6587 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6588
6589 emit_insn (gen_vsx_splat_v4sf (target, element0));
6590 }
6591
6592 else
6593 {
6594 rtx freg = gen_reg_rtx (V4SFmode);
6595 rtx sreg = force_reg (SFmode, element0);
6596 rtx cvt = (TARGET_XSCVDPSPN
6597 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6598 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6599
6600 emit_insn (cvt);
6601 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6602 const0_rtx));
6603 }
6604 }
6605 else
6606 {
6607 rtx dbl_even = gen_reg_rtx (V2DFmode);
6608 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6609 rtx flt_even = gen_reg_rtx (V4SFmode);
6610 rtx flt_odd = gen_reg_rtx (V4SFmode);
6611 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6612 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6613 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6614 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6615
6616 /* Use VMRGEW if we can instead of doing a permute. */
6617 if (TARGET_P8_VECTOR)
6618 {
6619 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6620 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6621 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6622 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6623 if (BYTES_BIG_ENDIAN)
6624 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6625 else
6626 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6627 }
6628 else
6629 {
6630 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6631 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6632 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6633 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6634 rs6000_expand_extract_even (target, flt_even, flt_odd);
6635 }
6636 }
6637 return;
6638 }
6639
6640 /* Special case initializing vector short/char that are splats if we are on
6641 64-bit systems with direct move. */
6642 if (all_same && TARGET_DIRECT_MOVE_64BIT
6643 && (mode == V16QImode || mode == V8HImode))
6644 {
6645 rtx op0 = XVECEXP (vals, 0, 0);
6646 rtx di_tmp = gen_reg_rtx (DImode);
6647
6648 if (!REG_P (op0))
6649 op0 = force_reg (GET_MODE_INNER (mode), op0);
6650
6651 if (mode == V16QImode)
6652 {
6653 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6654 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6655 return;
6656 }
6657
6658 if (mode == V8HImode)
6659 {
6660 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6661 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6662 return;
6663 }
6664 }
6665
6666 /* Store value to stack temp. Load vector element. Splat. However, splat
6667 of 64-bit items is not supported on Altivec. */
6668 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6669 {
6670 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6671 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6672 XVECEXP (vals, 0, 0));
6673 x = gen_rtx_UNSPEC (VOIDmode,
6674 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6675 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6676 gen_rtvec (2,
6677 gen_rtx_SET (target, mem),
6678 x)));
6679 x = gen_rtx_VEC_SELECT (inner_mode, target,
6680 gen_rtx_PARALLEL (VOIDmode,
6681 gen_rtvec (1, const0_rtx)));
6682 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6683 return;
6684 }
6685
6686 /* One field is non-constant. Load constant then overwrite
6687 varying field. */
6688 if (n_var == 1)
6689 {
6690 rtx copy = copy_rtx (vals);
6691
6692 /* Load constant part of vector, substitute neighboring value for
6693 varying element. */
6694 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6695 rs6000_expand_vector_init (target, copy);
6696
6697 /* Insert variable. */
6698 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6699 return;
6700 }
6701
6702 /* Construct the vector in memory one field at a time
6703 and load the whole vector. */
6704 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6705 for (i = 0; i < n_elts; i++)
6706 emit_move_insn (adjust_address_nv (mem, inner_mode,
6707 i * GET_MODE_SIZE (inner_mode)),
6708 XVECEXP (vals, 0, i));
6709 emit_move_insn (target, mem);
6710 }
6711
6712 /* Set field ELT of TARGET to VAL. */
6713
6714 void
6715 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6716 {
6717 machine_mode mode = GET_MODE (target);
6718 machine_mode inner_mode = GET_MODE_INNER (mode);
6719 rtx reg = gen_reg_rtx (mode);
6720 rtx mask, mem, x;
6721 int width = GET_MODE_SIZE (inner_mode);
6722 int i;
6723
6724 val = force_reg (GET_MODE (val), val);
6725
6726 if (VECTOR_MEM_VSX_P (mode))
6727 {
6728 rtx insn = NULL_RTX;
6729 rtx elt_rtx = GEN_INT (elt);
6730
6731 if (mode == V2DFmode)
6732 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6733
6734 else if (mode == V2DImode)
6735 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6736
6737 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6738 {
6739 if (mode == V4SImode)
6740 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6741 else if (mode == V8HImode)
6742 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6743 else if (mode == V16QImode)
6744 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6745 else if (mode == V4SFmode)
6746 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6747 }
6748
6749 if (insn)
6750 {
6751 emit_insn (insn);
6752 return;
6753 }
6754 }
6755
6756 /* Simplify setting single element vectors like V1TImode. */
6757 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6758 {
6759 emit_move_insn (target, gen_lowpart (mode, val));
6760 return;
6761 }
6762
6763 /* Load single variable value. */
6764 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6765 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6766 x = gen_rtx_UNSPEC (VOIDmode,
6767 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6768 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6769 gen_rtvec (2,
6770 gen_rtx_SET (reg, mem),
6771 x)));
6772
6773 /* Linear sequence. */
6774 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6775 for (i = 0; i < 16; ++i)
6776 XVECEXP (mask, 0, i) = GEN_INT (i);
6777
6778 /* Set permute mask to insert element into target. */
6779 for (i = 0; i < width; ++i)
6780 XVECEXP (mask, 0, elt*width + i)
6781 = GEN_INT (i + 0x10);
6782 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6783
6784 if (BYTES_BIG_ENDIAN)
6785 x = gen_rtx_UNSPEC (mode,
6786 gen_rtvec (3, target, reg,
6787 force_reg (V16QImode, x)),
6788 UNSPEC_VPERM);
6789 else
6790 {
6791 if (TARGET_P9_VECTOR)
6792 x = gen_rtx_UNSPEC (mode,
6793 gen_rtvec (3, reg, target,
6794 force_reg (V16QImode, x)),
6795 UNSPEC_VPERMR);
6796 else
6797 {
6798 /* Invert selector. We prefer to generate VNAND on P8 so
6799 that future fusion opportunities can kick in, but must
6800 generate VNOR elsewhere. */
6801 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6802 rtx iorx = (TARGET_P8_VECTOR
6803 ? gen_rtx_IOR (V16QImode, notx, notx)
6804 : gen_rtx_AND (V16QImode, notx, notx));
6805 rtx tmp = gen_reg_rtx (V16QImode);
6806 emit_insn (gen_rtx_SET (tmp, iorx));
6807
6808 /* Permute with operands reversed and adjusted selector. */
6809 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6810 UNSPEC_VPERM);
6811 }
6812 }
6813
6814 emit_insn (gen_rtx_SET (target, x));
6815 }
6816
6817 /* Extract field ELT from VEC into TARGET. */
6818
6819 void
6820 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6821 {
6822 machine_mode mode = GET_MODE (vec);
6823 machine_mode inner_mode = GET_MODE_INNER (mode);
6824 rtx mem;
6825
6826 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6827 {
6828 switch (mode)
6829 {
6830 default:
6831 break;
6832 case E_V1TImode:
6833 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6834 emit_move_insn (target, gen_lowpart (TImode, vec));
6835 break;
6836 case E_V2DFmode:
6837 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6838 return;
6839 case E_V2DImode:
6840 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6841 return;
6842 case E_V4SFmode:
6843 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6844 return;
6845 case E_V16QImode:
6846 if (TARGET_DIRECT_MOVE_64BIT)
6847 {
6848 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6849 return;
6850 }
6851 else
6852 break;
6853 case E_V8HImode:
6854 if (TARGET_DIRECT_MOVE_64BIT)
6855 {
6856 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6857 return;
6858 }
6859 else
6860 break;
6861 case E_V4SImode:
6862 if (TARGET_DIRECT_MOVE_64BIT)
6863 {
6864 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6865 return;
6866 }
6867 break;
6868 }
6869 }
6870 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6871 && TARGET_DIRECT_MOVE_64BIT)
6872 {
6873 if (GET_MODE (elt) != DImode)
6874 {
6875 rtx tmp = gen_reg_rtx (DImode);
6876 convert_move (tmp, elt, 0);
6877 elt = tmp;
6878 }
6879 else if (!REG_P (elt))
6880 elt = force_reg (DImode, elt);
6881
6882 switch (mode)
6883 {
6884 case E_V2DFmode:
6885 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6886 return;
6887
6888 case E_V2DImode:
6889 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6890 return;
6891
6892 case E_V4SFmode:
6893 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6894 return;
6895
6896 case E_V4SImode:
6897 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6898 return;
6899
6900 case E_V8HImode:
6901 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6902 return;
6903
6904 case E_V16QImode:
6905 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6906 return;
6907
6908 default:
6909 gcc_unreachable ();
6910 }
6911 }
6912
6913 gcc_assert (CONST_INT_P (elt));
6914
6915 /* Allocate mode-sized buffer. */
6916 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6917
6918 emit_move_insn (mem, vec);
6919
6920 /* Add offset to field within buffer matching vector element. */
6921 mem = adjust_address_nv (mem, inner_mode,
6922 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6923
6924 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6925 }
6926
6927 /* Helper function to return the register number of a RTX. */
6928 static inline int
6929 regno_or_subregno (rtx op)
6930 {
6931 if (REG_P (op))
6932 return REGNO (op);
6933 else if (SUBREG_P (op))
6934 return subreg_regno (op);
6935 else
6936 gcc_unreachable ();
6937 }
6938
6939 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6940 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6941 temporary (BASE_TMP) to fixup the address. Return the new memory address
6942 that is valid for reads or writes to a given register (SCALAR_REG). */
6943
6944 rtx
6945 rs6000_adjust_vec_address (rtx scalar_reg,
6946 rtx mem,
6947 rtx element,
6948 rtx base_tmp,
6949 machine_mode scalar_mode)
6950 {
6951 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
6952 rtx addr = XEXP (mem, 0);
6953 rtx element_offset;
6954 rtx new_addr;
6955 bool valid_addr_p;
6956
6957 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6958 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
6959
6960 /* Calculate what we need to add to the address to get the element
6961 address. */
6962 if (CONST_INT_P (element))
6963 element_offset = GEN_INT (INTVAL (element) * scalar_size);
6964 else
6965 {
6966 int byte_shift = exact_log2 (scalar_size);
6967 gcc_assert (byte_shift >= 0);
6968
6969 if (byte_shift == 0)
6970 element_offset = element;
6971
6972 else
6973 {
6974 if (TARGET_POWERPC64)
6975 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
6976 else
6977 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
6978
6979 element_offset = base_tmp;
6980 }
6981 }
6982
6983 /* Create the new address pointing to the element within the vector. If we
6984 are adding 0, we don't have to change the address. */
6985 if (element_offset == const0_rtx)
6986 new_addr = addr;
6987
6988 /* A simple indirect address can be converted into a reg + offset
6989 address. */
6990 else if (REG_P (addr) || SUBREG_P (addr))
6991 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
6992
6993 /* Optimize D-FORM addresses with constant offset with a constant element, to
6994 include the element offset in the address directly. */
6995 else if (GET_CODE (addr) == PLUS)
6996 {
6997 rtx op0 = XEXP (addr, 0);
6998 rtx op1 = XEXP (addr, 1);
6999 rtx insn;
7000
7001 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7002 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7003 {
7004 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7005 rtx offset_rtx = GEN_INT (offset);
7006
7007 if (IN_RANGE (offset, -32768, 32767)
7008 && (scalar_size < 8 || (offset & 0x3) == 0))
7009 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7010 else
7011 {
7012 emit_move_insn (base_tmp, offset_rtx);
7013 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7014 }
7015 }
7016 else
7017 {
7018 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7019 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7020
7021 /* Note, ADDI requires the register being added to be a base
7022 register. If the register was R0, load it up into the temporary
7023 and do the add. */
7024 if (op1_reg_p
7025 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7026 {
7027 insn = gen_add3_insn (base_tmp, op1, element_offset);
7028 gcc_assert (insn != NULL_RTX);
7029 emit_insn (insn);
7030 }
7031
7032 else if (ele_reg_p
7033 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7034 {
7035 insn = gen_add3_insn (base_tmp, element_offset, op1);
7036 gcc_assert (insn != NULL_RTX);
7037 emit_insn (insn);
7038 }
7039
7040 else
7041 {
7042 emit_move_insn (base_tmp, op1);
7043 emit_insn (gen_add2_insn (base_tmp, element_offset));
7044 }
7045
7046 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7047 }
7048 }
7049
7050 else
7051 {
7052 emit_move_insn (base_tmp, addr);
7053 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7054 }
7055
7056 /* If we have a PLUS, we need to see whether the particular register class
7057 allows for D-FORM or X-FORM addressing. */
7058 if (GET_CODE (new_addr) == PLUS)
7059 {
7060 rtx op1 = XEXP (new_addr, 1);
7061 addr_mask_type addr_mask;
7062 int scalar_regno = regno_or_subregno (scalar_reg);
7063
7064 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7065 if (INT_REGNO_P (scalar_regno))
7066 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7067
7068 else if (FP_REGNO_P (scalar_regno))
7069 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7070
7071 else if (ALTIVEC_REGNO_P (scalar_regno))
7072 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7073
7074 else
7075 gcc_unreachable ();
7076
7077 if (REG_P (op1) || SUBREG_P (op1))
7078 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7079 else
7080 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7081 }
7082
7083 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7084 valid_addr_p = true;
7085
7086 else
7087 valid_addr_p = false;
7088
7089 if (!valid_addr_p)
7090 {
7091 emit_move_insn (base_tmp, new_addr);
7092 new_addr = base_tmp;
7093 }
7094
7095 return change_address (mem, scalar_mode, new_addr);
7096 }
7097
7098 /* Split a variable vec_extract operation into the component instructions. */
7099
7100 void
7101 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7102 rtx tmp_altivec)
7103 {
7104 machine_mode mode = GET_MODE (src);
7105 machine_mode scalar_mode = GET_MODE (dest);
7106 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7107 int byte_shift = exact_log2 (scalar_size);
7108
7109 gcc_assert (byte_shift >= 0);
7110
7111 /* If we are given a memory address, optimize to load just the element. We
7112 don't have to adjust the vector element number on little endian
7113 systems. */
7114 if (MEM_P (src))
7115 {
7116 gcc_assert (REG_P (tmp_gpr));
7117 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7118 tmp_gpr, scalar_mode));
7119 return;
7120 }
7121
7122 else if (REG_P (src) || SUBREG_P (src))
7123 {
7124 int bit_shift = byte_shift + 3;
7125 rtx element2;
7126 int dest_regno = regno_or_subregno (dest);
7127 int src_regno = regno_or_subregno (src);
7128 int element_regno = regno_or_subregno (element);
7129
7130 gcc_assert (REG_P (tmp_gpr));
7131
7132 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7133 a general purpose register. */
7134 if (TARGET_P9_VECTOR
7135 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7136 && INT_REGNO_P (dest_regno)
7137 && ALTIVEC_REGNO_P (src_regno)
7138 && INT_REGNO_P (element_regno))
7139 {
7140 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7141 rtx element_si = gen_rtx_REG (SImode, element_regno);
7142
7143 if (mode == V16QImode)
7144 emit_insn (BYTES_BIG_ENDIAN
7145 ? gen_vextublx (dest_si, element_si, src)
7146 : gen_vextubrx (dest_si, element_si, src));
7147
7148 else if (mode == V8HImode)
7149 {
7150 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7151 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7152 emit_insn (BYTES_BIG_ENDIAN
7153 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7154 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7155 }
7156
7157
7158 else
7159 {
7160 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7161 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7162 emit_insn (BYTES_BIG_ENDIAN
7163 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7164 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7165 }
7166
7167 return;
7168 }
7169
7170
7171 gcc_assert (REG_P (tmp_altivec));
7172
7173 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7174 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7175 will shift the element into the upper position (adding 3 to convert a
7176 byte shift into a bit shift). */
7177 if (scalar_size == 8)
7178 {
7179 if (!BYTES_BIG_ENDIAN)
7180 {
7181 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7182 element2 = tmp_gpr;
7183 }
7184 else
7185 element2 = element;
7186
7187 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7188 bit. */
7189 emit_insn (gen_rtx_SET (tmp_gpr,
7190 gen_rtx_AND (DImode,
7191 gen_rtx_ASHIFT (DImode,
7192 element2,
7193 GEN_INT (6)),
7194 GEN_INT (64))));
7195 }
7196 else
7197 {
7198 if (!BYTES_BIG_ENDIAN)
7199 {
7200 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7201
7202 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7203 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7204 element2 = tmp_gpr;
7205 }
7206 else
7207 element2 = element;
7208
7209 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7210 }
7211
7212 /* Get the value into the lower byte of the Altivec register where VSLO
7213 expects it. */
7214 if (TARGET_P9_VECTOR)
7215 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7216 else if (can_create_pseudo_p ())
7217 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7218 else
7219 {
7220 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7221 emit_move_insn (tmp_di, tmp_gpr);
7222 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7223 }
7224
7225 /* Do the VSLO to get the value into the final location. */
7226 switch (mode)
7227 {
7228 case E_V2DFmode:
7229 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7230 return;
7231
7232 case E_V2DImode:
7233 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7234 return;
7235
7236 case E_V4SFmode:
7237 {
7238 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7239 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7240 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7241 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7242 tmp_altivec));
7243
7244 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7245 return;
7246 }
7247
7248 case E_V4SImode:
7249 case E_V8HImode:
7250 case E_V16QImode:
7251 {
7252 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7253 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7254 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7255 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7256 tmp_altivec));
7257 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7258 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7259 GEN_INT (64 - (8 * scalar_size))));
7260 return;
7261 }
7262
7263 default:
7264 gcc_unreachable ();
7265 }
7266
7267 return;
7268 }
7269 else
7270 gcc_unreachable ();
7271 }
7272
7273 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7274 selects whether the alignment is abi mandated, optional, or
7275 both abi and optional alignment. */
7276
7277 unsigned int
7278 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7279 {
7280 if (how != align_opt)
7281 {
7282 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7283 align = 128;
7284 }
7285
7286 if (how != align_abi)
7287 {
7288 if (TREE_CODE (type) == ARRAY_TYPE
7289 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7290 {
7291 if (align < BITS_PER_WORD)
7292 align = BITS_PER_WORD;
7293 }
7294 }
7295
7296 return align;
7297 }
7298
7299 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7300 instructions simply ignore the low bits; VSX memory instructions
7301 are aligned to 4 or 8 bytes. */
7302
7303 static bool
7304 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7305 {
7306 return (STRICT_ALIGNMENT
7307 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7308 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7309 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7310 && (int) align < VECTOR_ALIGN (mode)))));
7311 }
7312
7313 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7314
7315 bool
7316 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7317 {
7318 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7319 {
7320 if (computed != 128)
7321 {
7322 static bool warned;
7323 if (!warned && warn_psabi)
7324 {
7325 warned = true;
7326 inform (input_location,
7327 "the layout of aggregates containing vectors with"
7328 " %d-byte alignment has changed in GCC 5",
7329 computed / BITS_PER_UNIT);
7330 }
7331 }
7332 /* In current GCC there is no special case. */
7333 return false;
7334 }
7335
7336 return false;
7337 }
7338
7339 /* AIX increases natural record alignment to doubleword if the first
7340 field is an FP double while the FP fields remain word aligned. */
7341
7342 unsigned int
7343 rs6000_special_round_type_align (tree type, unsigned int computed,
7344 unsigned int specified)
7345 {
7346 unsigned int align = MAX (computed, specified);
7347 tree field = TYPE_FIELDS (type);
7348
7349 /* Skip all non field decls */
7350 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7351 field = DECL_CHAIN (field);
7352
7353 if (field != NULL && field != type)
7354 {
7355 type = TREE_TYPE (field);
7356 while (TREE_CODE (type) == ARRAY_TYPE)
7357 type = TREE_TYPE (type);
7358
7359 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7360 align = MAX (align, 64);
7361 }
7362
7363 return align;
7364 }
7365
7366 /* Darwin increases record alignment to the natural alignment of
7367 the first field. */
7368
7369 unsigned int
7370 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7371 unsigned int specified)
7372 {
7373 unsigned int align = MAX (computed, specified);
7374
7375 if (TYPE_PACKED (type))
7376 return align;
7377
7378 /* Find the first field, looking down into aggregates. */
7379 do {
7380 tree field = TYPE_FIELDS (type);
7381 /* Skip all non field decls */
7382 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7383 field = DECL_CHAIN (field);
7384 if (! field)
7385 break;
7386 /* A packed field does not contribute any extra alignment. */
7387 if (DECL_PACKED (field))
7388 return align;
7389 type = TREE_TYPE (field);
7390 while (TREE_CODE (type) == ARRAY_TYPE)
7391 type = TREE_TYPE (type);
7392 } while (AGGREGATE_TYPE_P (type));
7393
7394 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7395 align = MAX (align, TYPE_ALIGN (type));
7396
7397 return align;
7398 }
7399
7400 /* Return 1 for an operand in small memory on V.4/eabi. */
7401
7402 int
7403 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7404 machine_mode mode ATTRIBUTE_UNUSED)
7405 {
7406 #if TARGET_ELF
7407 rtx sym_ref;
7408
7409 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7410 return 0;
7411
7412 if (DEFAULT_ABI != ABI_V4)
7413 return 0;
7414
7415 if (GET_CODE (op) == SYMBOL_REF)
7416 sym_ref = op;
7417
7418 else if (GET_CODE (op) != CONST
7419 || GET_CODE (XEXP (op, 0)) != PLUS
7420 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7421 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7422 return 0;
7423
7424 else
7425 {
7426 rtx sum = XEXP (op, 0);
7427 HOST_WIDE_INT summand;
7428
7429 /* We have to be careful here, because it is the referenced address
7430 that must be 32k from _SDA_BASE_, not just the symbol. */
7431 summand = INTVAL (XEXP (sum, 1));
7432 if (summand < 0 || summand > g_switch_value)
7433 return 0;
7434
7435 sym_ref = XEXP (sum, 0);
7436 }
7437
7438 return SYMBOL_REF_SMALL_P (sym_ref);
7439 #else
7440 return 0;
7441 #endif
7442 }
7443
7444 /* Return true if either operand is a general purpose register. */
7445
7446 bool
7447 gpr_or_gpr_p (rtx op0, rtx op1)
7448 {
7449 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7450 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7451 }
7452
7453 /* Return true if this is a move direct operation between GPR registers and
7454 floating point/VSX registers. */
7455
7456 bool
7457 direct_move_p (rtx op0, rtx op1)
7458 {
7459 int regno0, regno1;
7460
7461 if (!REG_P (op0) || !REG_P (op1))
7462 return false;
7463
7464 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7465 return false;
7466
7467 regno0 = REGNO (op0);
7468 regno1 = REGNO (op1);
7469 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7470 return false;
7471
7472 if (INT_REGNO_P (regno0))
7473 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7474
7475 else if (INT_REGNO_P (regno1))
7476 {
7477 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7478 return true;
7479
7480 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7481 return true;
7482 }
7483
7484 return false;
7485 }
7486
7487 /* Return true if the OFFSET is valid for the quad address instructions that
7488 use d-form (register + offset) addressing. */
7489
7490 static inline bool
7491 quad_address_offset_p (HOST_WIDE_INT offset)
7492 {
7493 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7494 }
7495
7496 /* Return true if the ADDR is an acceptable address for a quad memory
7497 operation of mode MODE (either LQ/STQ for general purpose registers, or
7498 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7499 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7500 3.0 LXV/STXV instruction. */
7501
7502 bool
7503 quad_address_p (rtx addr, machine_mode mode, bool strict)
7504 {
7505 rtx op0, op1;
7506
7507 if (GET_MODE_SIZE (mode) != 16)
7508 return false;
7509
7510 if (legitimate_indirect_address_p (addr, strict))
7511 return true;
7512
7513 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7514 return false;
7515
7516 if (GET_CODE (addr) != PLUS)
7517 return false;
7518
7519 op0 = XEXP (addr, 0);
7520 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7521 return false;
7522
7523 op1 = XEXP (addr, 1);
7524 if (!CONST_INT_P (op1))
7525 return false;
7526
7527 return quad_address_offset_p (INTVAL (op1));
7528 }
7529
7530 /* Return true if this is a load or store quad operation. This function does
7531 not handle the atomic quad memory instructions. */
7532
7533 bool
7534 quad_load_store_p (rtx op0, rtx op1)
7535 {
7536 bool ret;
7537
7538 if (!TARGET_QUAD_MEMORY)
7539 ret = false;
7540
7541 else if (REG_P (op0) && MEM_P (op1))
7542 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7543 && quad_memory_operand (op1, GET_MODE (op1))
7544 && !reg_overlap_mentioned_p (op0, op1));
7545
7546 else if (MEM_P (op0) && REG_P (op1))
7547 ret = (quad_memory_operand (op0, GET_MODE (op0))
7548 && quad_int_reg_operand (op1, GET_MODE (op1)));
7549
7550 else
7551 ret = false;
7552
7553 if (TARGET_DEBUG_ADDR)
7554 {
7555 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7556 ret ? "true" : "false");
7557 debug_rtx (gen_rtx_SET (op0, op1));
7558 }
7559
7560 return ret;
7561 }
7562
7563 /* Given an address, return a constant offset term if one exists. */
7564
7565 static rtx
7566 address_offset (rtx op)
7567 {
7568 if (GET_CODE (op) == PRE_INC
7569 || GET_CODE (op) == PRE_DEC)
7570 op = XEXP (op, 0);
7571 else if (GET_CODE (op) == PRE_MODIFY
7572 || GET_CODE (op) == LO_SUM)
7573 op = XEXP (op, 1);
7574
7575 if (GET_CODE (op) == CONST)
7576 op = XEXP (op, 0);
7577
7578 if (GET_CODE (op) == PLUS)
7579 op = XEXP (op, 1);
7580
7581 if (CONST_INT_P (op))
7582 return op;
7583
7584 return NULL_RTX;
7585 }
7586
7587 /* Return true if the MEM operand is a memory operand suitable for use
7588 with a (full width, possibly multiple) gpr load/store. On
7589 powerpc64 this means the offset must be divisible by 4.
7590 Implements 'Y' constraint.
7591
7592 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7593 a constraint function we know the operand has satisfied a suitable
7594 memory predicate. Also accept some odd rtl generated by reload
7595 (see rs6000_legitimize_reload_address for various forms). It is
7596 important that reload rtl be accepted by appropriate constraints
7597 but not by the operand predicate.
7598
7599 Offsetting a lo_sum should not be allowed, except where we know by
7600 alignment that a 32k boundary is not crossed, but see the ???
7601 comment in rs6000_legitimize_reload_address. Note that by
7602 "offsetting" here we mean a further offset to access parts of the
7603 MEM. It's fine to have a lo_sum where the inner address is offset
7604 from a sym, since the same sym+offset will appear in the high part
7605 of the address calculation. */
7606
7607 bool
7608 mem_operand_gpr (rtx op, machine_mode mode)
7609 {
7610 unsigned HOST_WIDE_INT offset;
7611 int extra;
7612 rtx addr = XEXP (op, 0);
7613
7614 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7615 if (TARGET_UPDATE
7616 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7617 && mode_supports_pre_incdec_p (mode)
7618 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7619 return true;
7620
7621 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7622 if (!rs6000_offsettable_memref_p (op, mode, false))
7623 return false;
7624
7625 op = address_offset (addr);
7626 if (op == NULL_RTX)
7627 return true;
7628
7629 offset = INTVAL (op);
7630 if (TARGET_POWERPC64 && (offset & 3) != 0)
7631 return false;
7632
7633 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7634 if (extra < 0)
7635 extra = 0;
7636
7637 if (GET_CODE (addr) == LO_SUM)
7638 /* For lo_sum addresses, we must allow any offset except one that
7639 causes a wrap, so test only the low 16 bits. */
7640 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7641
7642 return offset + 0x8000 < 0x10000u - extra;
7643 }
7644
7645 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7646 enforce an offset divisible by 4 even for 32-bit. */
7647
7648 bool
7649 mem_operand_ds_form (rtx op, machine_mode mode)
7650 {
7651 unsigned HOST_WIDE_INT offset;
7652 int extra;
7653 rtx addr = XEXP (op, 0);
7654
7655 if (!offsettable_address_p (false, mode, addr))
7656 return false;
7657
7658 op = address_offset (addr);
7659 if (op == NULL_RTX)
7660 return true;
7661
7662 offset = INTVAL (op);
7663 if ((offset & 3) != 0)
7664 return false;
7665
7666 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7667 if (extra < 0)
7668 extra = 0;
7669
7670 if (GET_CODE (addr) == LO_SUM)
7671 /* For lo_sum addresses, we must allow any offset except one that
7672 causes a wrap, so test only the low 16 bits. */
7673 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7674
7675 return offset + 0x8000 < 0x10000u - extra;
7676 }
7677 \f
7678 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7679
7680 static bool
7681 reg_offset_addressing_ok_p (machine_mode mode)
7682 {
7683 switch (mode)
7684 {
7685 case E_V16QImode:
7686 case E_V8HImode:
7687 case E_V4SFmode:
7688 case E_V4SImode:
7689 case E_V2DFmode:
7690 case E_V2DImode:
7691 case E_V1TImode:
7692 case E_TImode:
7693 case E_TFmode:
7694 case E_KFmode:
7695 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7696 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7697 a vector mode, if we want to use the VSX registers to move it around,
7698 we need to restrict ourselves to reg+reg addressing. Similarly for
7699 IEEE 128-bit floating point that is passed in a single vector
7700 register. */
7701 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7702 return mode_supports_dq_form (mode);
7703 break;
7704
7705 case E_SDmode:
7706 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7707 addressing for the LFIWZX and STFIWX instructions. */
7708 if (TARGET_NO_SDMODE_STACK)
7709 return false;
7710 break;
7711
7712 default:
7713 break;
7714 }
7715
7716 return true;
7717 }
7718
7719 static bool
7720 virtual_stack_registers_memory_p (rtx op)
7721 {
7722 int regnum;
7723
7724 if (GET_CODE (op) == REG)
7725 regnum = REGNO (op);
7726
7727 else if (GET_CODE (op) == PLUS
7728 && GET_CODE (XEXP (op, 0)) == REG
7729 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7730 regnum = REGNO (XEXP (op, 0));
7731
7732 else
7733 return false;
7734
7735 return (regnum >= FIRST_VIRTUAL_REGISTER
7736 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7737 }
7738
7739 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7740 is known to not straddle a 32k boundary. This function is used
7741 to determine whether -mcmodel=medium code can use TOC pointer
7742 relative addressing for OP. This means the alignment of the TOC
7743 pointer must also be taken into account, and unfortunately that is
7744 only 8 bytes. */
7745
7746 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7747 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7748 #endif
7749
7750 static bool
7751 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7752 machine_mode mode)
7753 {
7754 tree decl;
7755 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7756
7757 if (GET_CODE (op) != SYMBOL_REF)
7758 return false;
7759
7760 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7761 SYMBOL_REF. */
7762 if (mode_supports_dq_form (mode))
7763 return false;
7764
7765 dsize = GET_MODE_SIZE (mode);
7766 decl = SYMBOL_REF_DECL (op);
7767 if (!decl)
7768 {
7769 if (dsize == 0)
7770 return false;
7771
7772 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7773 replacing memory addresses with an anchor plus offset. We
7774 could find the decl by rummaging around in the block->objects
7775 VEC for the given offset but that seems like too much work. */
7776 dalign = BITS_PER_UNIT;
7777 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7778 && SYMBOL_REF_ANCHOR_P (op)
7779 && SYMBOL_REF_BLOCK (op) != NULL)
7780 {
7781 struct object_block *block = SYMBOL_REF_BLOCK (op);
7782
7783 dalign = block->alignment;
7784 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7785 }
7786 else if (CONSTANT_POOL_ADDRESS_P (op))
7787 {
7788 /* It would be nice to have get_pool_align().. */
7789 machine_mode cmode = get_pool_mode (op);
7790
7791 dalign = GET_MODE_ALIGNMENT (cmode);
7792 }
7793 }
7794 else if (DECL_P (decl))
7795 {
7796 dalign = DECL_ALIGN (decl);
7797
7798 if (dsize == 0)
7799 {
7800 /* Allow BLKmode when the entire object is known to not
7801 cross a 32k boundary. */
7802 if (!DECL_SIZE_UNIT (decl))
7803 return false;
7804
7805 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7806 return false;
7807
7808 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7809 if (dsize > 32768)
7810 return false;
7811
7812 dalign /= BITS_PER_UNIT;
7813 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7814 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7815 return dalign >= dsize;
7816 }
7817 }
7818 else
7819 gcc_unreachable ();
7820
7821 /* Find how many bits of the alignment we know for this access. */
7822 dalign /= BITS_PER_UNIT;
7823 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7824 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7825 mask = dalign - 1;
7826 lsb = offset & -offset;
7827 mask &= lsb - 1;
7828 dalign = mask + 1;
7829
7830 return dalign >= dsize;
7831 }
7832
7833 static bool
7834 constant_pool_expr_p (rtx op)
7835 {
7836 rtx base, offset;
7837
7838 split_const (op, &base, &offset);
7839 return (GET_CODE (base) == SYMBOL_REF
7840 && CONSTANT_POOL_ADDRESS_P (base)
7841 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7842 }
7843
7844 /* These are only used to pass through from print_operand/print_operand_address
7845 to rs6000_output_addr_const_extra over the intervening function
7846 output_addr_const which is not target code. */
7847 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7848
7849 /* Return true if OP is a toc pointer relative address (the output
7850 of create_TOC_reference). If STRICT, do not match non-split
7851 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7852 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7853 TOCREL_OFFSET_RET respectively. */
7854
7855 bool
7856 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7857 const_rtx *tocrel_offset_ret)
7858 {
7859 if (!TARGET_TOC)
7860 return false;
7861
7862 if (TARGET_CMODEL != CMODEL_SMALL)
7863 {
7864 /* When strict ensure we have everything tidy. */
7865 if (strict
7866 && !(GET_CODE (op) == LO_SUM
7867 && REG_P (XEXP (op, 0))
7868 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7869 return false;
7870
7871 /* When not strict, allow non-split TOC addresses and also allow
7872 (lo_sum (high ..)) TOC addresses created during reload. */
7873 if (GET_CODE (op) == LO_SUM)
7874 op = XEXP (op, 1);
7875 }
7876
7877 const_rtx tocrel_base = op;
7878 const_rtx tocrel_offset = const0_rtx;
7879
7880 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7881 {
7882 tocrel_base = XEXP (op, 0);
7883 tocrel_offset = XEXP (op, 1);
7884 }
7885
7886 if (tocrel_base_ret)
7887 *tocrel_base_ret = tocrel_base;
7888 if (tocrel_offset_ret)
7889 *tocrel_offset_ret = tocrel_offset;
7890
7891 return (GET_CODE (tocrel_base) == UNSPEC
7892 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7893 && REG_P (XVECEXP (tocrel_base, 0, 1))
7894 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7895 }
7896
7897 /* Return true if X is a constant pool address, and also for cmodel=medium
7898 if X is a toc-relative address known to be offsettable within MODE. */
7899
7900 bool
7901 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7902 bool strict)
7903 {
7904 const_rtx tocrel_base, tocrel_offset;
7905 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7906 && (TARGET_CMODEL != CMODEL_MEDIUM
7907 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7908 || mode == QImode
7909 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7910 INTVAL (tocrel_offset), mode)));
7911 }
7912
7913 static bool
7914 legitimate_small_data_p (machine_mode mode, rtx x)
7915 {
7916 return (DEFAULT_ABI == ABI_V4
7917 && !flag_pic && !TARGET_TOC
7918 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7919 && small_data_operand (x, mode));
7920 }
7921
7922 bool
7923 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7924 bool strict, bool worst_case)
7925 {
7926 unsigned HOST_WIDE_INT offset;
7927 unsigned int extra;
7928
7929 if (GET_CODE (x) != PLUS)
7930 return false;
7931 if (!REG_P (XEXP (x, 0)))
7932 return false;
7933 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7934 return false;
7935 if (mode_supports_dq_form (mode))
7936 return quad_address_p (x, mode, strict);
7937 if (!reg_offset_addressing_ok_p (mode))
7938 return virtual_stack_registers_memory_p (x);
7939 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7940 return true;
7941 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
7942 return false;
7943
7944 offset = INTVAL (XEXP (x, 1));
7945 extra = 0;
7946 switch (mode)
7947 {
7948 case E_DFmode:
7949 case E_DDmode:
7950 case E_DImode:
7951 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7952 addressing. */
7953 if (VECTOR_MEM_VSX_P (mode))
7954 return false;
7955
7956 if (!worst_case)
7957 break;
7958 if (!TARGET_POWERPC64)
7959 extra = 4;
7960 else if (offset & 3)
7961 return false;
7962 break;
7963
7964 case E_TFmode:
7965 case E_IFmode:
7966 case E_KFmode:
7967 case E_TDmode:
7968 case E_TImode:
7969 case E_PTImode:
7970 extra = 8;
7971 if (!worst_case)
7972 break;
7973 if (!TARGET_POWERPC64)
7974 extra = 12;
7975 else if (offset & 3)
7976 return false;
7977 break;
7978
7979 default:
7980 break;
7981 }
7982
7983 offset += 0x8000;
7984 return offset < 0x10000 - extra;
7985 }
7986
7987 bool
7988 legitimate_indexed_address_p (rtx x, int strict)
7989 {
7990 rtx op0, op1;
7991
7992 if (GET_CODE (x) != PLUS)
7993 return false;
7994
7995 op0 = XEXP (x, 0);
7996 op1 = XEXP (x, 1);
7997
7998 return (REG_P (op0) && REG_P (op1)
7999 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8000 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8001 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8002 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8003 }
8004
8005 bool
8006 avoiding_indexed_address_p (machine_mode mode)
8007 {
8008 /* Avoid indexed addressing for modes that have non-indexed
8009 load/store instruction forms. */
8010 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8011 }
8012
8013 bool
8014 legitimate_indirect_address_p (rtx x, int strict)
8015 {
8016 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8017 }
8018
8019 bool
8020 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8021 {
8022 if (!TARGET_MACHO || !flag_pic
8023 || mode != SImode || GET_CODE (x) != MEM)
8024 return false;
8025 x = XEXP (x, 0);
8026
8027 if (GET_CODE (x) != LO_SUM)
8028 return false;
8029 if (GET_CODE (XEXP (x, 0)) != REG)
8030 return false;
8031 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8032 return false;
8033 x = XEXP (x, 1);
8034
8035 return CONSTANT_P (x);
8036 }
8037
8038 static bool
8039 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8040 {
8041 if (GET_CODE (x) != LO_SUM)
8042 return false;
8043 if (GET_CODE (XEXP (x, 0)) != REG)
8044 return false;
8045 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8046 return false;
8047 /* quad word addresses are restricted, and we can't use LO_SUM. */
8048 if (mode_supports_dq_form (mode))
8049 return false;
8050 x = XEXP (x, 1);
8051
8052 if (TARGET_ELF || TARGET_MACHO)
8053 {
8054 bool large_toc_ok;
8055
8056 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8057 return false;
8058 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8059 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8060 recognizes some LO_SUM addresses as valid although this
8061 function says opposite. In most cases, LRA through different
8062 transformations can generate correct code for address reloads.
8063 It can not manage only some LO_SUM cases. So we need to add
8064 code analogous to one in rs6000_legitimize_reload_address for
8065 LOW_SUM here saying that some addresses are still valid. */
8066 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8067 && small_toc_ref (x, VOIDmode));
8068 if (TARGET_TOC && ! large_toc_ok)
8069 return false;
8070 if (GET_MODE_NUNITS (mode) != 1)
8071 return false;
8072 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8073 && !(/* ??? Assume floating point reg based on mode? */
8074 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8075 return false;
8076
8077 return CONSTANT_P (x) || large_toc_ok;
8078 }
8079
8080 return false;
8081 }
8082
8083
8084 /* Try machine-dependent ways of modifying an illegitimate address
8085 to be legitimate. If we find one, return the new, valid address.
8086 This is used from only one place: `memory_address' in explow.c.
8087
8088 OLDX is the address as it was before break_out_memory_refs was
8089 called. In some cases it is useful to look at this to decide what
8090 needs to be done.
8091
8092 It is always safe for this function to do nothing. It exists to
8093 recognize opportunities to optimize the output.
8094
8095 On RS/6000, first check for the sum of a register with a constant
8096 integer that is out of range. If so, generate code to add the
8097 constant with the low-order 16 bits masked to the register and force
8098 this result into another register (this can be done with `cau').
8099 Then generate an address of REG+(CONST&0xffff), allowing for the
8100 possibility of bit 16 being a one.
8101
8102 Then check for the sum of a register and something not constant, try to
8103 load the other things into a register and return the sum. */
8104
8105 static rtx
8106 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8107 machine_mode mode)
8108 {
8109 unsigned int extra;
8110
8111 if (!reg_offset_addressing_ok_p (mode)
8112 || mode_supports_dq_form (mode))
8113 {
8114 if (virtual_stack_registers_memory_p (x))
8115 return x;
8116
8117 /* In theory we should not be seeing addresses of the form reg+0,
8118 but just in case it is generated, optimize it away. */
8119 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8120 return force_reg (Pmode, XEXP (x, 0));
8121
8122 /* For TImode with load/store quad, restrict addresses to just a single
8123 pointer, so it works with both GPRs and VSX registers. */
8124 /* Make sure both operands are registers. */
8125 else if (GET_CODE (x) == PLUS
8126 && (mode != TImode || !TARGET_VSX))
8127 return gen_rtx_PLUS (Pmode,
8128 force_reg (Pmode, XEXP (x, 0)),
8129 force_reg (Pmode, XEXP (x, 1)));
8130 else
8131 return force_reg (Pmode, x);
8132 }
8133 if (GET_CODE (x) == SYMBOL_REF)
8134 {
8135 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8136 if (model != 0)
8137 return rs6000_legitimize_tls_address (x, model);
8138 }
8139
8140 extra = 0;
8141 switch (mode)
8142 {
8143 case E_TFmode:
8144 case E_TDmode:
8145 case E_TImode:
8146 case E_PTImode:
8147 case E_IFmode:
8148 case E_KFmode:
8149 /* As in legitimate_offset_address_p we do not assume
8150 worst-case. The mode here is just a hint as to the registers
8151 used. A TImode is usually in gprs, but may actually be in
8152 fprs. Leave worst-case scenario for reload to handle via
8153 insn constraints. PTImode is only GPRs. */
8154 extra = 8;
8155 break;
8156 default:
8157 break;
8158 }
8159
8160 if (GET_CODE (x) == PLUS
8161 && GET_CODE (XEXP (x, 0)) == REG
8162 && GET_CODE (XEXP (x, 1)) == CONST_INT
8163 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8164 >= 0x10000 - extra))
8165 {
8166 HOST_WIDE_INT high_int, low_int;
8167 rtx sum;
8168 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8169 if (low_int >= 0x8000 - extra)
8170 low_int = 0;
8171 high_int = INTVAL (XEXP (x, 1)) - low_int;
8172 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8173 GEN_INT (high_int)), 0);
8174 return plus_constant (Pmode, sum, low_int);
8175 }
8176 else if (GET_CODE (x) == PLUS
8177 && GET_CODE (XEXP (x, 0)) == REG
8178 && GET_CODE (XEXP (x, 1)) != CONST_INT
8179 && GET_MODE_NUNITS (mode) == 1
8180 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8181 || (/* ??? Assume floating point reg based on mode? */
8182 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8183 && !avoiding_indexed_address_p (mode))
8184 {
8185 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8186 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8187 }
8188 else if ((TARGET_ELF
8189 #if TARGET_MACHO
8190 || !MACHO_DYNAMIC_NO_PIC_P
8191 #endif
8192 )
8193 && TARGET_32BIT
8194 && TARGET_NO_TOC
8195 && ! flag_pic
8196 && GET_CODE (x) != CONST_INT
8197 && GET_CODE (x) != CONST_WIDE_INT
8198 && GET_CODE (x) != CONST_DOUBLE
8199 && CONSTANT_P (x)
8200 && GET_MODE_NUNITS (mode) == 1
8201 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8202 || (/* ??? Assume floating point reg based on mode? */
8203 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8204 {
8205 rtx reg = gen_reg_rtx (Pmode);
8206 if (TARGET_ELF)
8207 emit_insn (gen_elf_high (reg, x));
8208 else
8209 emit_insn (gen_macho_high (reg, x));
8210 return gen_rtx_LO_SUM (Pmode, reg, x);
8211 }
8212 else if (TARGET_TOC
8213 && GET_CODE (x) == SYMBOL_REF
8214 && constant_pool_expr_p (x)
8215 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8216 return create_TOC_reference (x, NULL_RTX);
8217 else
8218 return x;
8219 }
8220
8221 /* Debug version of rs6000_legitimize_address. */
8222 static rtx
8223 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8224 {
8225 rtx ret;
8226 rtx_insn *insns;
8227
8228 start_sequence ();
8229 ret = rs6000_legitimize_address (x, oldx, mode);
8230 insns = get_insns ();
8231 end_sequence ();
8232
8233 if (ret != x)
8234 {
8235 fprintf (stderr,
8236 "\nrs6000_legitimize_address: mode %s, old code %s, "
8237 "new code %s, modified\n",
8238 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8239 GET_RTX_NAME (GET_CODE (ret)));
8240
8241 fprintf (stderr, "Original address:\n");
8242 debug_rtx (x);
8243
8244 fprintf (stderr, "oldx:\n");
8245 debug_rtx (oldx);
8246
8247 fprintf (stderr, "New address:\n");
8248 debug_rtx (ret);
8249
8250 if (insns)
8251 {
8252 fprintf (stderr, "Insns added:\n");
8253 debug_rtx_list (insns, 20);
8254 }
8255 }
8256 else
8257 {
8258 fprintf (stderr,
8259 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8260 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8261
8262 debug_rtx (x);
8263 }
8264
8265 if (insns)
8266 emit_insn (insns);
8267
8268 return ret;
8269 }
8270
8271 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8272 We need to emit DTP-relative relocations. */
8273
8274 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8275 static void
8276 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8277 {
8278 switch (size)
8279 {
8280 case 4:
8281 fputs ("\t.long\t", file);
8282 break;
8283 case 8:
8284 fputs (DOUBLE_INT_ASM_OP, file);
8285 break;
8286 default:
8287 gcc_unreachable ();
8288 }
8289 output_addr_const (file, x);
8290 if (TARGET_ELF)
8291 fputs ("@dtprel+0x8000", file);
8292 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8293 {
8294 switch (SYMBOL_REF_TLS_MODEL (x))
8295 {
8296 case 0:
8297 break;
8298 case TLS_MODEL_LOCAL_EXEC:
8299 fputs ("@le", file);
8300 break;
8301 case TLS_MODEL_INITIAL_EXEC:
8302 fputs ("@ie", file);
8303 break;
8304 case TLS_MODEL_GLOBAL_DYNAMIC:
8305 case TLS_MODEL_LOCAL_DYNAMIC:
8306 fputs ("@m", file);
8307 break;
8308 default:
8309 gcc_unreachable ();
8310 }
8311 }
8312 }
8313
8314 /* Return true if X is a symbol that refers to real (rather than emulated)
8315 TLS. */
8316
8317 static bool
8318 rs6000_real_tls_symbol_ref_p (rtx x)
8319 {
8320 return (GET_CODE (x) == SYMBOL_REF
8321 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8322 }
8323
8324 /* In the name of slightly smaller debug output, and to cater to
8325 general assembler lossage, recognize various UNSPEC sequences
8326 and turn them back into a direct symbol reference. */
8327
8328 static rtx
8329 rs6000_delegitimize_address (rtx orig_x)
8330 {
8331 rtx x, y, offset;
8332
8333 orig_x = delegitimize_mem_from_attrs (orig_x);
8334 x = orig_x;
8335 if (MEM_P (x))
8336 x = XEXP (x, 0);
8337
8338 y = x;
8339 if (TARGET_CMODEL != CMODEL_SMALL
8340 && GET_CODE (y) == LO_SUM)
8341 y = XEXP (y, 1);
8342
8343 offset = NULL_RTX;
8344 if (GET_CODE (y) == PLUS
8345 && GET_MODE (y) == Pmode
8346 && CONST_INT_P (XEXP (y, 1)))
8347 {
8348 offset = XEXP (y, 1);
8349 y = XEXP (y, 0);
8350 }
8351
8352 if (GET_CODE (y) == UNSPEC
8353 && XINT (y, 1) == UNSPEC_TOCREL)
8354 {
8355 y = XVECEXP (y, 0, 0);
8356
8357 #ifdef HAVE_AS_TLS
8358 /* Do not associate thread-local symbols with the original
8359 constant pool symbol. */
8360 if (TARGET_XCOFF
8361 && GET_CODE (y) == SYMBOL_REF
8362 && CONSTANT_POOL_ADDRESS_P (y)
8363 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8364 return orig_x;
8365 #endif
8366
8367 if (offset != NULL_RTX)
8368 y = gen_rtx_PLUS (Pmode, y, offset);
8369 if (!MEM_P (orig_x))
8370 return y;
8371 else
8372 return replace_equiv_address_nv (orig_x, y);
8373 }
8374
8375 if (TARGET_MACHO
8376 && GET_CODE (orig_x) == LO_SUM
8377 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8378 {
8379 y = XEXP (XEXP (orig_x, 1), 0);
8380 if (GET_CODE (y) == UNSPEC
8381 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8382 return XVECEXP (y, 0, 0);
8383 }
8384
8385 return orig_x;
8386 }
8387
8388 /* Return true if X shouldn't be emitted into the debug info.
8389 The linker doesn't like .toc section references from
8390 .debug_* sections, so reject .toc section symbols. */
8391
8392 static bool
8393 rs6000_const_not_ok_for_debug_p (rtx x)
8394 {
8395 if (GET_CODE (x) == UNSPEC)
8396 return true;
8397 if (GET_CODE (x) == SYMBOL_REF
8398 && CONSTANT_POOL_ADDRESS_P (x))
8399 {
8400 rtx c = get_pool_constant (x);
8401 machine_mode cmode = get_pool_mode (x);
8402 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8403 return true;
8404 }
8405
8406 return false;
8407 }
8408
8409 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8410
8411 static bool
8412 rs6000_legitimate_combined_insn (rtx_insn *insn)
8413 {
8414 int icode = INSN_CODE (insn);
8415
8416 /* Reject creating doloop insns. Combine should not be allowed
8417 to create these for a number of reasons:
8418 1) In a nested loop, if combine creates one of these in an
8419 outer loop and the register allocator happens to allocate ctr
8420 to the outer loop insn, then the inner loop can't use ctr.
8421 Inner loops ought to be more highly optimized.
8422 2) Combine often wants to create one of these from what was
8423 originally a three insn sequence, first combining the three
8424 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8425 allocated ctr, the splitter takes use back to the three insn
8426 sequence. It's better to stop combine at the two insn
8427 sequence.
8428 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8429 insns, the register allocator sometimes uses floating point
8430 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8431 jump insn and output reloads are not implemented for jumps,
8432 the ctrsi/ctrdi splitters need to handle all possible cases.
8433 That's a pain, and it gets to be seriously difficult when a
8434 splitter that runs after reload needs memory to transfer from
8435 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8436 for the difficult case. It's better to not create problems
8437 in the first place. */
8438 if (icode != CODE_FOR_nothing
8439 && (icode == CODE_FOR_bdz_si
8440 || icode == CODE_FOR_bdz_di
8441 || icode == CODE_FOR_bdnz_si
8442 || icode == CODE_FOR_bdnz_di
8443 || icode == CODE_FOR_bdztf_si
8444 || icode == CODE_FOR_bdztf_di
8445 || icode == CODE_FOR_bdnztf_si
8446 || icode == CODE_FOR_bdnztf_di))
8447 return false;
8448
8449 return true;
8450 }
8451
8452 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8453
8454 static GTY(()) rtx rs6000_tls_symbol;
8455 static rtx
8456 rs6000_tls_get_addr (void)
8457 {
8458 if (!rs6000_tls_symbol)
8459 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8460
8461 return rs6000_tls_symbol;
8462 }
8463
8464 /* Construct the SYMBOL_REF for TLS GOT references. */
8465
8466 static GTY(()) rtx rs6000_got_symbol;
8467 static rtx
8468 rs6000_got_sym (void)
8469 {
8470 if (!rs6000_got_symbol)
8471 {
8472 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8473 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8474 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8475 }
8476
8477 return rs6000_got_symbol;
8478 }
8479
8480 /* AIX Thread-Local Address support. */
8481
8482 static rtx
8483 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8484 {
8485 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8486 const char *name;
8487 char *tlsname;
8488
8489 name = XSTR (addr, 0);
8490 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8491 or the symbol will be in TLS private data section. */
8492 if (name[strlen (name) - 1] != ']'
8493 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8494 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8495 {
8496 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8497 strcpy (tlsname, name);
8498 strcat (tlsname,
8499 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8500 tlsaddr = copy_rtx (addr);
8501 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8502 }
8503 else
8504 tlsaddr = addr;
8505
8506 /* Place addr into TOC constant pool. */
8507 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8508
8509 /* Output the TOC entry and create the MEM referencing the value. */
8510 if (constant_pool_expr_p (XEXP (sym, 0))
8511 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8512 {
8513 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8514 mem = gen_const_mem (Pmode, tocref);
8515 set_mem_alias_set (mem, get_TOC_alias_set ());
8516 }
8517 else
8518 return sym;
8519
8520 /* Use global-dynamic for local-dynamic. */
8521 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8522 || model == TLS_MODEL_LOCAL_DYNAMIC)
8523 {
8524 /* Create new TOC reference for @m symbol. */
8525 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8526 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8527 strcpy (tlsname, "*LCM");
8528 strcat (tlsname, name + 3);
8529 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8530 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8531 tocref = create_TOC_reference (modaddr, NULL_RTX);
8532 rtx modmem = gen_const_mem (Pmode, tocref);
8533 set_mem_alias_set (modmem, get_TOC_alias_set ());
8534
8535 rtx modreg = gen_reg_rtx (Pmode);
8536 emit_insn (gen_rtx_SET (modreg, modmem));
8537
8538 tmpreg = gen_reg_rtx (Pmode);
8539 emit_insn (gen_rtx_SET (tmpreg, mem));
8540
8541 dest = gen_reg_rtx (Pmode);
8542 if (TARGET_32BIT)
8543 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8544 else
8545 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8546 return dest;
8547 }
8548 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8549 else if (TARGET_32BIT)
8550 {
8551 tlsreg = gen_reg_rtx (SImode);
8552 emit_insn (gen_tls_get_tpointer (tlsreg));
8553 }
8554 else
8555 tlsreg = gen_rtx_REG (DImode, 13);
8556
8557 /* Load the TOC value into temporary register. */
8558 tmpreg = gen_reg_rtx (Pmode);
8559 emit_insn (gen_rtx_SET (tmpreg, mem));
8560 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8561 gen_rtx_MINUS (Pmode, addr, tlsreg));
8562
8563 /* Add TOC symbol value to TLS pointer. */
8564 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8565
8566 return dest;
8567 }
8568
8569 /* Mess with a call, to make it look like the tls_gdld insns when
8570 !TARGET_TLS_MARKERS. These insns have an extra unspec to
8571 differentiate them from standard calls, because they need to emit
8572 the arg setup insns as well as the actual call. That keeps the
8573 arg setup insns immediately adjacent to the branch and link. */
8574
8575 static void
8576 edit_tls_call_insn (rtx arg)
8577 {
8578 rtx call_insn = last_call_insn ();
8579 if (!TARGET_TLS_MARKERS)
8580 {
8581 rtx patt = PATTERN (call_insn);
8582 gcc_assert (GET_CODE (patt) == PARALLEL);
8583 rtvec orig = XVEC (patt, 0);
8584 rtvec v = rtvec_alloc (GET_NUM_ELEM (orig) + 1);
8585 gcc_assert (GET_NUM_ELEM (orig) > 0);
8586 /* The (set (..) (call (mem ..))). */
8587 RTVEC_ELT (v, 0) = RTVEC_ELT (orig, 0);
8588 /* The extra unspec. */
8589 RTVEC_ELT (v, 1) = arg;
8590 /* All other assorted call pattern pieces. */
8591 for (int i = 1; i < GET_NUM_ELEM (orig); i++)
8592 RTVEC_ELT (v, i + 1) = RTVEC_ELT (orig, i);
8593 XVEC (patt, 0) = v;
8594 }
8595 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8596 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8597 pic_offset_table_rtx);
8598 }
8599
8600 /* Passes the tls arg value for global dynamic and local dynamic
8601 emit_library_call_value in rs6000_legitimize_tls_address to
8602 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8603 marker relocs put on __tls_get_addr calls. */
8604 static rtx global_tlsarg;
8605
8606 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8607 this (thread-local) address. */
8608
8609 static rtx
8610 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8611 {
8612 rtx dest, insn;
8613
8614 if (TARGET_XCOFF)
8615 return rs6000_legitimize_tls_address_aix (addr, model);
8616
8617 dest = gen_reg_rtx (Pmode);
8618 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8619 {
8620 rtx tlsreg;
8621
8622 if (TARGET_64BIT)
8623 {
8624 tlsreg = gen_rtx_REG (Pmode, 13);
8625 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8626 }
8627 else
8628 {
8629 tlsreg = gen_rtx_REG (Pmode, 2);
8630 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8631 }
8632 emit_insn (insn);
8633 }
8634 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8635 {
8636 rtx tlsreg, tmp;
8637
8638 tmp = gen_reg_rtx (Pmode);
8639 if (TARGET_64BIT)
8640 {
8641 tlsreg = gen_rtx_REG (Pmode, 13);
8642 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8643 }
8644 else
8645 {
8646 tlsreg = gen_rtx_REG (Pmode, 2);
8647 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8648 }
8649 emit_insn (insn);
8650 if (TARGET_64BIT)
8651 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8652 else
8653 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8654 emit_insn (insn);
8655 }
8656 else
8657 {
8658 rtx got, tga, tmp1, tmp2;
8659
8660 /* We currently use relocations like @got@tlsgd for tls, which
8661 means the linker will handle allocation of tls entries, placing
8662 them in the .got section. So use a pointer to the .got section,
8663 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8664 or to secondary GOT sections used by 32-bit -fPIC. */
8665 if (TARGET_64BIT)
8666 got = gen_rtx_REG (Pmode, 2);
8667 else
8668 {
8669 if (flag_pic == 1)
8670 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8671 else
8672 {
8673 rtx gsym = rs6000_got_sym ();
8674 got = gen_reg_rtx (Pmode);
8675 if (flag_pic == 0)
8676 rs6000_emit_move (got, gsym, Pmode);
8677 else
8678 {
8679 rtx mem, lab;
8680
8681 tmp1 = gen_reg_rtx (Pmode);
8682 tmp2 = gen_reg_rtx (Pmode);
8683 mem = gen_const_mem (Pmode, tmp1);
8684 lab = gen_label_rtx ();
8685 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8686 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8687 if (TARGET_LINK_STACK)
8688 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8689 emit_move_insn (tmp2, mem);
8690 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8691 set_unique_reg_note (last, REG_EQUAL, gsym);
8692 }
8693 }
8694 }
8695
8696 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8697 {
8698 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8699 UNSPEC_TLSGD);
8700 global_tlsarg = arg;
8701 rtx argreg = const0_rtx;
8702 if (TARGET_TLS_MARKERS)
8703 {
8704 argreg = gen_rtx_REG (Pmode, 3);
8705 emit_insn (gen_rtx_SET (argreg, arg));
8706 }
8707
8708 tga = rs6000_tls_get_addr ();
8709 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8710 argreg, Pmode);
8711 global_tlsarg = NULL_RTX;
8712
8713 edit_tls_call_insn (arg);
8714 }
8715 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8716 {
8717 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got),
8718 UNSPEC_TLSLD);
8719 global_tlsarg = arg;
8720 rtx argreg = const0_rtx;
8721 if (TARGET_TLS_MARKERS)
8722 {
8723 argreg = gen_rtx_REG (Pmode, 3);
8724 emit_insn (gen_rtx_SET (argreg, arg));
8725 }
8726
8727 tga = rs6000_tls_get_addr ();
8728 tmp1 = gen_reg_rtx (Pmode);
8729 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8730 argreg, Pmode);
8731 global_tlsarg = NULL_RTX;
8732
8733 edit_tls_call_insn (arg);
8734
8735 if (rs6000_tls_size == 16)
8736 {
8737 if (TARGET_64BIT)
8738 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8739 else
8740 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8741 }
8742 else if (rs6000_tls_size == 32)
8743 {
8744 tmp2 = gen_reg_rtx (Pmode);
8745 if (TARGET_64BIT)
8746 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8747 else
8748 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8749 emit_insn (insn);
8750 if (TARGET_64BIT)
8751 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8752 else
8753 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8754 }
8755 else
8756 {
8757 tmp2 = gen_reg_rtx (Pmode);
8758 if (TARGET_64BIT)
8759 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8760 else
8761 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8762 emit_insn (insn);
8763 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8764 }
8765 emit_insn (insn);
8766 }
8767 else
8768 {
8769 /* IE, or 64-bit offset LE. */
8770 tmp2 = gen_reg_rtx (Pmode);
8771 if (TARGET_64BIT)
8772 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8773 else
8774 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8775 emit_insn (insn);
8776 if (TARGET_64BIT)
8777 insn = gen_tls_tls_64 (dest, tmp2, addr);
8778 else
8779 insn = gen_tls_tls_32 (dest, tmp2, addr);
8780 emit_insn (insn);
8781 }
8782 }
8783
8784 return dest;
8785 }
8786
8787 /* Only create the global variable for the stack protect guard if we are using
8788 the global flavor of that guard. */
8789 static tree
8790 rs6000_init_stack_protect_guard (void)
8791 {
8792 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8793 return default_stack_protect_guard ();
8794
8795 return NULL_TREE;
8796 }
8797
8798 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8799
8800 static bool
8801 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8802 {
8803 if (GET_CODE (x) == HIGH
8804 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8805 return true;
8806
8807 /* A TLS symbol in the TOC cannot contain a sum. */
8808 if (GET_CODE (x) == CONST
8809 && GET_CODE (XEXP (x, 0)) == PLUS
8810 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8811 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8812 return true;
8813
8814 /* Do not place an ELF TLS symbol in the constant pool. */
8815 return TARGET_ELF && tls_referenced_p (x);
8816 }
8817
8818 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8819 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8820 can be addressed relative to the toc pointer. */
8821
8822 static bool
8823 use_toc_relative_ref (rtx sym, machine_mode mode)
8824 {
8825 return ((constant_pool_expr_p (sym)
8826 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8827 get_pool_mode (sym)))
8828 || (TARGET_CMODEL == CMODEL_MEDIUM
8829 && SYMBOL_REF_LOCAL_P (sym)
8830 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8831 }
8832
8833 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8834 replace the input X, or the original X if no replacement is called for.
8835 The output parameter *WIN is 1 if the calling macro should goto WIN,
8836 0 if it should not.
8837
8838 For RS/6000, we wish to handle large displacements off a base
8839 register by splitting the addend across an addiu/addis and the mem insn.
8840 This cuts number of extra insns needed from 3 to 1.
8841
8842 On Darwin, we use this to generate code for floating point constants.
8843 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8844 The Darwin code is inside #if TARGET_MACHO because only then are the
8845 machopic_* functions defined. */
8846 static rtx
8847 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8848 int opnum, int type,
8849 int ind_levels ATTRIBUTE_UNUSED, int *win)
8850 {
8851 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8852 bool quad_offset_p = mode_supports_dq_form (mode);
8853
8854 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8855 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8856 if (reg_offset_p
8857 && opnum == 1
8858 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8859 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8860 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8861 && TARGET_P9_VECTOR)
8862 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8863 && TARGET_P9_VECTOR)))
8864 reg_offset_p = false;
8865
8866 /* We must recognize output that we have already generated ourselves. */
8867 if (GET_CODE (x) == PLUS
8868 && GET_CODE (XEXP (x, 0)) == PLUS
8869 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8870 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8871 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8872 {
8873 if (TARGET_DEBUG_ADDR)
8874 {
8875 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8876 debug_rtx (x);
8877 }
8878 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8879 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8880 opnum, (enum reload_type) type);
8881 *win = 1;
8882 return x;
8883 }
8884
8885 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8886 if (GET_CODE (x) == LO_SUM
8887 && GET_CODE (XEXP (x, 0)) == HIGH)
8888 {
8889 if (TARGET_DEBUG_ADDR)
8890 {
8891 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8892 debug_rtx (x);
8893 }
8894 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8895 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8896 opnum, (enum reload_type) type);
8897 *win = 1;
8898 return x;
8899 }
8900
8901 #if TARGET_MACHO
8902 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8903 && GET_CODE (x) == LO_SUM
8904 && GET_CODE (XEXP (x, 0)) == PLUS
8905 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8906 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8907 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8908 && machopic_operand_p (XEXP (x, 1)))
8909 {
8910 /* Result of previous invocation of this function on Darwin
8911 floating point constant. */
8912 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8913 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8914 opnum, (enum reload_type) type);
8915 *win = 1;
8916 return x;
8917 }
8918 #endif
8919
8920 if (TARGET_CMODEL != CMODEL_SMALL
8921 && reg_offset_p
8922 && !quad_offset_p
8923 && small_toc_ref (x, VOIDmode))
8924 {
8925 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8926 x = gen_rtx_LO_SUM (Pmode, hi, x);
8927 if (TARGET_DEBUG_ADDR)
8928 {
8929 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8930 debug_rtx (x);
8931 }
8932 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8933 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8934 opnum, (enum reload_type) type);
8935 *win = 1;
8936 return x;
8937 }
8938
8939 if (GET_CODE (x) == PLUS
8940 && REG_P (XEXP (x, 0))
8941 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
8942 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8943 && CONST_INT_P (XEXP (x, 1))
8944 && reg_offset_p
8945 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8946 {
8947 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
8948 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
8949 HOST_WIDE_INT high
8950 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8951
8952 /* Check for 32-bit overflow or quad addresses with one of the
8953 four least significant bits set. */
8954 if (high + low != val
8955 || (quad_offset_p && (low & 0xf)))
8956 {
8957 *win = 0;
8958 return x;
8959 }
8960
8961 /* Reload the high part into a base reg; leave the low part
8962 in the mem directly. */
8963
8964 x = gen_rtx_PLUS (GET_MODE (x),
8965 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8966 GEN_INT (high)),
8967 GEN_INT (low));
8968
8969 if (TARGET_DEBUG_ADDR)
8970 {
8971 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
8972 debug_rtx (x);
8973 }
8974 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8975 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8976 opnum, (enum reload_type) type);
8977 *win = 1;
8978 return x;
8979 }
8980
8981 if (GET_CODE (x) == SYMBOL_REF
8982 && reg_offset_p
8983 && !quad_offset_p
8984 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
8985 #if TARGET_MACHO
8986 && DEFAULT_ABI == ABI_DARWIN
8987 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
8988 && machopic_symbol_defined_p (x)
8989 #else
8990 && DEFAULT_ABI == ABI_V4
8991 && !flag_pic
8992 #endif
8993 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
8994 The same goes for DImode without 64-bit gprs and DFmode and DDmode
8995 without fprs.
8996 ??? Assume floating point reg based on mode? This assumption is
8997 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
8998 where reload ends up doing a DFmode load of a constant from
8999 mem using two gprs. Unfortunately, at this point reload
9000 hasn't yet selected regs so poking around in reload data
9001 won't help and even if we could figure out the regs reliably,
9002 we'd still want to allow this transformation when the mem is
9003 naturally aligned. Since we say the address is good here, we
9004 can't disable offsets from LO_SUMs in mem_operand_gpr.
9005 FIXME: Allow offset from lo_sum for other modes too, when
9006 mem is sufficiently aligned.
9007
9008 Also disallow this if the type can go in VMX/Altivec registers, since
9009 those registers do not have d-form (reg+offset) address modes. */
9010 && !reg_addr[mode].scalar_in_vmx_p
9011 && mode != TFmode
9012 && mode != TDmode
9013 && mode != IFmode
9014 && mode != KFmode
9015 && (mode != TImode || !TARGET_VSX)
9016 && mode != PTImode
9017 && (mode != DImode || TARGET_POWERPC64)
9018 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9019 || TARGET_HARD_FLOAT))
9020 {
9021 #if TARGET_MACHO
9022 if (flag_pic)
9023 {
9024 rtx offset = machopic_gen_offset (x);
9025 x = gen_rtx_LO_SUM (GET_MODE (x),
9026 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9027 gen_rtx_HIGH (Pmode, offset)), offset);
9028 }
9029 else
9030 #endif
9031 x = gen_rtx_LO_SUM (GET_MODE (x),
9032 gen_rtx_HIGH (Pmode, x), x);
9033
9034 if (TARGET_DEBUG_ADDR)
9035 {
9036 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9037 debug_rtx (x);
9038 }
9039 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9040 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9041 opnum, (enum reload_type) type);
9042 *win = 1;
9043 return x;
9044 }
9045
9046 /* Reload an offset address wrapped by an AND that represents the
9047 masking of the lower bits. Strip the outer AND and let reload
9048 convert the offset address into an indirect address. For VSX,
9049 force reload to create the address with an AND in a separate
9050 register, because we can't guarantee an altivec register will
9051 be used. */
9052 if (VECTOR_MEM_ALTIVEC_P (mode)
9053 && GET_CODE (x) == AND
9054 && GET_CODE (XEXP (x, 0)) == PLUS
9055 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9056 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9057 && GET_CODE (XEXP (x, 1)) == CONST_INT
9058 && INTVAL (XEXP (x, 1)) == -16)
9059 {
9060 x = XEXP (x, 0);
9061 *win = 1;
9062 return x;
9063 }
9064
9065 if (TARGET_TOC
9066 && reg_offset_p
9067 && !quad_offset_p
9068 && GET_CODE (x) == SYMBOL_REF
9069 && use_toc_relative_ref (x, mode))
9070 {
9071 x = create_TOC_reference (x, NULL_RTX);
9072 if (TARGET_CMODEL != CMODEL_SMALL)
9073 {
9074 if (TARGET_DEBUG_ADDR)
9075 {
9076 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9077 debug_rtx (x);
9078 }
9079 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9080 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9081 opnum, (enum reload_type) type);
9082 }
9083 *win = 1;
9084 return x;
9085 }
9086 *win = 0;
9087 return x;
9088 }
9089
9090 /* Debug version of rs6000_legitimize_reload_address. */
9091 static rtx
9092 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9093 int opnum, int type,
9094 int ind_levels, int *win)
9095 {
9096 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9097 ind_levels, win);
9098 fprintf (stderr,
9099 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9100 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9101 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9102 debug_rtx (x);
9103
9104 if (x == ret)
9105 fprintf (stderr, "Same address returned\n");
9106 else if (!ret)
9107 fprintf (stderr, "NULL returned\n");
9108 else
9109 {
9110 fprintf (stderr, "New address:\n");
9111 debug_rtx (ret);
9112 }
9113
9114 return ret;
9115 }
9116
9117 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9118 that is a valid memory address for an instruction.
9119 The MODE argument is the machine mode for the MEM expression
9120 that wants to use this address.
9121
9122 On the RS/6000, there are four valid address: a SYMBOL_REF that
9123 refers to a constant pool entry of an address (or the sum of it
9124 plus a constant), a short (16-bit signed) constant plus a register,
9125 the sum of two registers, or a register indirect, possibly with an
9126 auto-increment. For DFmode, DDmode and DImode with a constant plus
9127 register, we must ensure that both words are addressable or PowerPC64
9128 with offset word aligned.
9129
9130 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9131 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9132 because adjacent memory cells are accessed by adding word-sized offsets
9133 during assembly output. */
9134 static bool
9135 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9136 {
9137 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9138 bool quad_offset_p = mode_supports_dq_form (mode);
9139
9140 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9141 if (VECTOR_MEM_ALTIVEC_P (mode)
9142 && GET_CODE (x) == AND
9143 && GET_CODE (XEXP (x, 1)) == CONST_INT
9144 && INTVAL (XEXP (x, 1)) == -16)
9145 x = XEXP (x, 0);
9146
9147 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9148 return 0;
9149 if (legitimate_indirect_address_p (x, reg_ok_strict))
9150 return 1;
9151 if (TARGET_UPDATE
9152 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9153 && mode_supports_pre_incdec_p (mode)
9154 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9155 return 1;
9156 /* Handle restricted vector d-form offsets in ISA 3.0. */
9157 if (quad_offset_p)
9158 {
9159 if (quad_address_p (x, mode, reg_ok_strict))
9160 return 1;
9161 }
9162 else if (virtual_stack_registers_memory_p (x))
9163 return 1;
9164
9165 else if (reg_offset_p)
9166 {
9167 if (legitimate_small_data_p (mode, x))
9168 return 1;
9169 if (legitimate_constant_pool_address_p (x, mode,
9170 reg_ok_strict || lra_in_progress))
9171 return 1;
9172 }
9173
9174 /* For TImode, if we have TImode in VSX registers, only allow register
9175 indirect addresses. This will allow the values to go in either GPRs
9176 or VSX registers without reloading. The vector types would tend to
9177 go into VSX registers, so we allow REG+REG, while TImode seems
9178 somewhat split, in that some uses are GPR based, and some VSX based. */
9179 /* FIXME: We could loosen this by changing the following to
9180 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9181 but currently we cannot allow REG+REG addressing for TImode. See
9182 PR72827 for complete details on how this ends up hoodwinking DSE. */
9183 if (mode == TImode && TARGET_VSX)
9184 return 0;
9185 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9186 if (! reg_ok_strict
9187 && reg_offset_p
9188 && GET_CODE (x) == PLUS
9189 && GET_CODE (XEXP (x, 0)) == REG
9190 && (XEXP (x, 0) == virtual_stack_vars_rtx
9191 || XEXP (x, 0) == arg_pointer_rtx)
9192 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9193 return 1;
9194 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9195 return 1;
9196 if (!FLOAT128_2REG_P (mode)
9197 && (TARGET_HARD_FLOAT
9198 || TARGET_POWERPC64
9199 || (mode != DFmode && mode != DDmode))
9200 && (TARGET_POWERPC64 || mode != DImode)
9201 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9202 && mode != PTImode
9203 && !avoiding_indexed_address_p (mode)
9204 && legitimate_indexed_address_p (x, reg_ok_strict))
9205 return 1;
9206 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9207 && mode_supports_pre_modify_p (mode)
9208 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9209 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9210 reg_ok_strict, false)
9211 || (!avoiding_indexed_address_p (mode)
9212 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9213 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9214 return 1;
9215 if (reg_offset_p && !quad_offset_p
9216 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9217 return 1;
9218 return 0;
9219 }
9220
9221 /* Debug version of rs6000_legitimate_address_p. */
9222 static bool
9223 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9224 bool reg_ok_strict)
9225 {
9226 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9227 fprintf (stderr,
9228 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9229 "strict = %d, reload = %s, code = %s\n",
9230 ret ? "true" : "false",
9231 GET_MODE_NAME (mode),
9232 reg_ok_strict,
9233 (reload_completed ? "after" : "before"),
9234 GET_RTX_NAME (GET_CODE (x)));
9235 debug_rtx (x);
9236
9237 return ret;
9238 }
9239
9240 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9241
9242 static bool
9243 rs6000_mode_dependent_address_p (const_rtx addr,
9244 addr_space_t as ATTRIBUTE_UNUSED)
9245 {
9246 return rs6000_mode_dependent_address_ptr (addr);
9247 }
9248
9249 /* Go to LABEL if ADDR (a legitimate address expression)
9250 has an effect that depends on the machine mode it is used for.
9251
9252 On the RS/6000 this is true of all integral offsets (since AltiVec
9253 and VSX modes don't allow them) or is a pre-increment or decrement.
9254
9255 ??? Except that due to conceptual problems in offsettable_address_p
9256 we can't really report the problems of integral offsets. So leave
9257 this assuming that the adjustable offset must be valid for the
9258 sub-words of a TFmode operand, which is what we had before. */
9259
9260 static bool
9261 rs6000_mode_dependent_address (const_rtx addr)
9262 {
9263 switch (GET_CODE (addr))
9264 {
9265 case PLUS:
9266 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9267 is considered a legitimate address before reload, so there
9268 are no offset restrictions in that case. Note that this
9269 condition is safe in strict mode because any address involving
9270 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9271 been rejected as illegitimate. */
9272 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9273 && XEXP (addr, 0) != arg_pointer_rtx
9274 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9275 {
9276 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9277 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9278 }
9279 break;
9280
9281 case LO_SUM:
9282 /* Anything in the constant pool is sufficiently aligned that
9283 all bytes have the same high part address. */
9284 return !legitimate_constant_pool_address_p (addr, QImode, false);
9285
9286 /* Auto-increment cases are now treated generically in recog.c. */
9287 case PRE_MODIFY:
9288 return TARGET_UPDATE;
9289
9290 /* AND is only allowed in Altivec loads. */
9291 case AND:
9292 return true;
9293
9294 default:
9295 break;
9296 }
9297
9298 return false;
9299 }
9300
9301 /* Debug version of rs6000_mode_dependent_address. */
9302 static bool
9303 rs6000_debug_mode_dependent_address (const_rtx addr)
9304 {
9305 bool ret = rs6000_mode_dependent_address (addr);
9306
9307 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9308 ret ? "true" : "false");
9309 debug_rtx (addr);
9310
9311 return ret;
9312 }
9313
9314 /* Implement FIND_BASE_TERM. */
9315
9316 rtx
9317 rs6000_find_base_term (rtx op)
9318 {
9319 rtx base;
9320
9321 base = op;
9322 if (GET_CODE (base) == CONST)
9323 base = XEXP (base, 0);
9324 if (GET_CODE (base) == PLUS)
9325 base = XEXP (base, 0);
9326 if (GET_CODE (base) == UNSPEC)
9327 switch (XINT (base, 1))
9328 {
9329 case UNSPEC_TOCREL:
9330 case UNSPEC_MACHOPIC_OFFSET:
9331 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9332 for aliasing purposes. */
9333 return XVECEXP (base, 0, 0);
9334 }
9335
9336 return op;
9337 }
9338
9339 /* More elaborate version of recog's offsettable_memref_p predicate
9340 that works around the ??? note of rs6000_mode_dependent_address.
9341 In particular it accepts
9342
9343 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9344
9345 in 32-bit mode, that the recog predicate rejects. */
9346
9347 static bool
9348 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9349 {
9350 bool worst_case;
9351
9352 if (!MEM_P (op))
9353 return false;
9354
9355 /* First mimic offsettable_memref_p. */
9356 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9357 return true;
9358
9359 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9360 the latter predicate knows nothing about the mode of the memory
9361 reference and, therefore, assumes that it is the largest supported
9362 mode (TFmode). As a consequence, legitimate offsettable memory
9363 references are rejected. rs6000_legitimate_offset_address_p contains
9364 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9365 at least with a little bit of help here given that we know the
9366 actual registers used. */
9367 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9368 || GET_MODE_SIZE (reg_mode) == 4);
9369 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9370 strict, worst_case);
9371 }
9372
9373 /* Determine the reassociation width to be used in reassociate_bb.
9374 This takes into account how many parallel operations we
9375 can actually do of a given type, and also the latency.
9376 P8:
9377 int add/sub 6/cycle
9378 mul 2/cycle
9379 vect add/sub/mul 2/cycle
9380 fp add/sub/mul 2/cycle
9381 dfp 1/cycle
9382 */
9383
9384 static int
9385 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9386 machine_mode mode)
9387 {
9388 switch (rs6000_tune)
9389 {
9390 case PROCESSOR_POWER8:
9391 case PROCESSOR_POWER9:
9392 if (DECIMAL_FLOAT_MODE_P (mode))
9393 return 1;
9394 if (VECTOR_MODE_P (mode))
9395 return 4;
9396 if (INTEGRAL_MODE_P (mode))
9397 return 1;
9398 if (FLOAT_MODE_P (mode))
9399 return 4;
9400 break;
9401 default:
9402 break;
9403 }
9404 return 1;
9405 }
9406
9407 /* Change register usage conditional on target flags. */
9408 static void
9409 rs6000_conditional_register_usage (void)
9410 {
9411 int i;
9412
9413 if (TARGET_DEBUG_TARGET)
9414 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9415
9416 /* Set MQ register fixed (already call_used) so that it will not be
9417 allocated. */
9418 fixed_regs[64] = 1;
9419
9420 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9421 if (TARGET_64BIT)
9422 fixed_regs[13] = call_used_regs[13]
9423 = call_really_used_regs[13] = 1;
9424
9425 /* Conditionally disable FPRs. */
9426 if (TARGET_SOFT_FLOAT)
9427 for (i = 32; i < 64; i++)
9428 fixed_regs[i] = call_used_regs[i]
9429 = call_really_used_regs[i] = 1;
9430
9431 /* The TOC register is not killed across calls in a way that is
9432 visible to the compiler. */
9433 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9434 call_really_used_regs[2] = 0;
9435
9436 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9437 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9438
9439 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9440 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9441 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9442 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9443
9444 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9445 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9446 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9447 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9448
9449 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9450 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9451 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9452
9453 if (!TARGET_ALTIVEC && !TARGET_VSX)
9454 {
9455 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9456 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9457 call_really_used_regs[VRSAVE_REGNO] = 1;
9458 }
9459
9460 if (TARGET_ALTIVEC || TARGET_VSX)
9461 global_regs[VSCR_REGNO] = 1;
9462
9463 if (TARGET_ALTIVEC_ABI)
9464 {
9465 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9466 call_used_regs[i] = call_really_used_regs[i] = 1;
9467
9468 /* AIX reserves VR20:31 in non-extended ABI mode. */
9469 if (TARGET_XCOFF)
9470 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9471 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9472 }
9473 }
9474
9475 \f
9476 /* Output insns to set DEST equal to the constant SOURCE as a series of
9477 lis, ori and shl instructions and return TRUE. */
9478
9479 bool
9480 rs6000_emit_set_const (rtx dest, rtx source)
9481 {
9482 machine_mode mode = GET_MODE (dest);
9483 rtx temp, set;
9484 rtx_insn *insn;
9485 HOST_WIDE_INT c;
9486
9487 gcc_checking_assert (CONST_INT_P (source));
9488 c = INTVAL (source);
9489 switch (mode)
9490 {
9491 case E_QImode:
9492 case E_HImode:
9493 emit_insn (gen_rtx_SET (dest, source));
9494 return true;
9495
9496 case E_SImode:
9497 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9498
9499 emit_insn (gen_rtx_SET (copy_rtx (temp),
9500 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9501 emit_insn (gen_rtx_SET (dest,
9502 gen_rtx_IOR (SImode, copy_rtx (temp),
9503 GEN_INT (c & 0xffff))));
9504 break;
9505
9506 case E_DImode:
9507 if (!TARGET_POWERPC64)
9508 {
9509 rtx hi, lo;
9510
9511 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9512 DImode);
9513 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9514 DImode);
9515 emit_move_insn (hi, GEN_INT (c >> 32));
9516 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9517 emit_move_insn (lo, GEN_INT (c));
9518 }
9519 else
9520 rs6000_emit_set_long_const (dest, c);
9521 break;
9522
9523 default:
9524 gcc_unreachable ();
9525 }
9526
9527 insn = get_last_insn ();
9528 set = single_set (insn);
9529 if (! CONSTANT_P (SET_SRC (set)))
9530 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9531
9532 return true;
9533 }
9534
9535 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9536 Output insns to set DEST equal to the constant C as a series of
9537 lis, ori and shl instructions. */
9538
9539 static void
9540 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9541 {
9542 rtx temp;
9543 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9544
9545 ud1 = c & 0xffff;
9546 c = c >> 16;
9547 ud2 = c & 0xffff;
9548 c = c >> 16;
9549 ud3 = c & 0xffff;
9550 c = c >> 16;
9551 ud4 = c & 0xffff;
9552
9553 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9554 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9555 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9556
9557 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9558 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9559 {
9560 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9561
9562 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9563 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9564 if (ud1 != 0)
9565 emit_move_insn (dest,
9566 gen_rtx_IOR (DImode, copy_rtx (temp),
9567 GEN_INT (ud1)));
9568 }
9569 else if (ud3 == 0 && ud4 == 0)
9570 {
9571 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9572
9573 gcc_assert (ud2 & 0x8000);
9574 emit_move_insn (copy_rtx (temp),
9575 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9576 if (ud1 != 0)
9577 emit_move_insn (copy_rtx (temp),
9578 gen_rtx_IOR (DImode, copy_rtx (temp),
9579 GEN_INT (ud1)));
9580 emit_move_insn (dest,
9581 gen_rtx_ZERO_EXTEND (DImode,
9582 gen_lowpart (SImode,
9583 copy_rtx (temp))));
9584 }
9585 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9586 || (ud4 == 0 && ! (ud3 & 0x8000)))
9587 {
9588 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9589
9590 emit_move_insn (copy_rtx (temp),
9591 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9592 if (ud2 != 0)
9593 emit_move_insn (copy_rtx (temp),
9594 gen_rtx_IOR (DImode, copy_rtx (temp),
9595 GEN_INT (ud2)));
9596 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9597 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9598 GEN_INT (16)));
9599 if (ud1 != 0)
9600 emit_move_insn (dest,
9601 gen_rtx_IOR (DImode, copy_rtx (temp),
9602 GEN_INT (ud1)));
9603 }
9604 else
9605 {
9606 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9607
9608 emit_move_insn (copy_rtx (temp),
9609 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9610 if (ud3 != 0)
9611 emit_move_insn (copy_rtx (temp),
9612 gen_rtx_IOR (DImode, copy_rtx (temp),
9613 GEN_INT (ud3)));
9614
9615 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9616 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9617 GEN_INT (32)));
9618 if (ud2 != 0)
9619 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9620 gen_rtx_IOR (DImode, copy_rtx (temp),
9621 GEN_INT (ud2 << 16)));
9622 if (ud1 != 0)
9623 emit_move_insn (dest,
9624 gen_rtx_IOR (DImode, copy_rtx (temp),
9625 GEN_INT (ud1)));
9626 }
9627 }
9628
9629 /* Helper for the following. Get rid of [r+r] memory refs
9630 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9631
9632 static void
9633 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9634 {
9635 if (GET_CODE (operands[0]) == MEM
9636 && GET_CODE (XEXP (operands[0], 0)) != REG
9637 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9638 GET_MODE (operands[0]), false))
9639 operands[0]
9640 = replace_equiv_address (operands[0],
9641 copy_addr_to_reg (XEXP (operands[0], 0)));
9642
9643 if (GET_CODE (operands[1]) == MEM
9644 && GET_CODE (XEXP (operands[1], 0)) != REG
9645 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9646 GET_MODE (operands[1]), false))
9647 operands[1]
9648 = replace_equiv_address (operands[1],
9649 copy_addr_to_reg (XEXP (operands[1], 0)));
9650 }
9651
9652 /* Generate a vector of constants to permute MODE for a little-endian
9653 storage operation by swapping the two halves of a vector. */
9654 static rtvec
9655 rs6000_const_vec (machine_mode mode)
9656 {
9657 int i, subparts;
9658 rtvec v;
9659
9660 switch (mode)
9661 {
9662 case E_V1TImode:
9663 subparts = 1;
9664 break;
9665 case E_V2DFmode:
9666 case E_V2DImode:
9667 subparts = 2;
9668 break;
9669 case E_V4SFmode:
9670 case E_V4SImode:
9671 subparts = 4;
9672 break;
9673 case E_V8HImode:
9674 subparts = 8;
9675 break;
9676 case E_V16QImode:
9677 subparts = 16;
9678 break;
9679 default:
9680 gcc_unreachable();
9681 }
9682
9683 v = rtvec_alloc (subparts);
9684
9685 for (i = 0; i < subparts / 2; ++i)
9686 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9687 for (i = subparts / 2; i < subparts; ++i)
9688 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9689
9690 return v;
9691 }
9692
9693 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9694 store operation. */
9695 void
9696 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9697 {
9698 /* Scalar permutations are easier to express in integer modes rather than
9699 floating-point modes, so cast them here. We use V1TImode instead
9700 of TImode to ensure that the values don't go through GPRs. */
9701 if (FLOAT128_VECTOR_P (mode))
9702 {
9703 dest = gen_lowpart (V1TImode, dest);
9704 source = gen_lowpart (V1TImode, source);
9705 mode = V1TImode;
9706 }
9707
9708 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9709 scalar. */
9710 if (mode == TImode || mode == V1TImode)
9711 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9712 GEN_INT (64))));
9713 else
9714 {
9715 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9716 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9717 }
9718 }
9719
9720 /* Emit a little-endian load from vector memory location SOURCE to VSX
9721 register DEST in mode MODE. The load is done with two permuting
9722 insn's that represent an lxvd2x and xxpermdi. */
9723 void
9724 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9725 {
9726 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9727 V1TImode). */
9728 if (mode == TImode || mode == V1TImode)
9729 {
9730 mode = V2DImode;
9731 dest = gen_lowpart (V2DImode, dest);
9732 source = adjust_address (source, V2DImode, 0);
9733 }
9734
9735 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9736 rs6000_emit_le_vsx_permute (tmp, source, mode);
9737 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9738 }
9739
9740 /* Emit a little-endian store to vector memory location DEST from VSX
9741 register SOURCE in mode MODE. The store is done with two permuting
9742 insn's that represent an xxpermdi and an stxvd2x. */
9743 void
9744 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9745 {
9746 /* This should never be called during or after LRA, because it does
9747 not re-permute the source register. It is intended only for use
9748 during expand. */
9749 gcc_assert (!lra_in_progress && !reload_completed);
9750
9751 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9752 V1TImode). */
9753 if (mode == TImode || mode == V1TImode)
9754 {
9755 mode = V2DImode;
9756 dest = adjust_address (dest, V2DImode, 0);
9757 source = gen_lowpart (V2DImode, source);
9758 }
9759
9760 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9761 rs6000_emit_le_vsx_permute (tmp, source, mode);
9762 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9763 }
9764
9765 /* Emit a sequence representing a little-endian VSX load or store,
9766 moving data from SOURCE to DEST in mode MODE. This is done
9767 separately from rs6000_emit_move to ensure it is called only
9768 during expand. LE VSX loads and stores introduced later are
9769 handled with a split. The expand-time RTL generation allows
9770 us to optimize away redundant pairs of register-permutes. */
9771 void
9772 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9773 {
9774 gcc_assert (!BYTES_BIG_ENDIAN
9775 && VECTOR_MEM_VSX_P (mode)
9776 && !TARGET_P9_VECTOR
9777 && !gpr_or_gpr_p (dest, source)
9778 && (MEM_P (source) ^ MEM_P (dest)));
9779
9780 if (MEM_P (source))
9781 {
9782 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9783 rs6000_emit_le_vsx_load (dest, source, mode);
9784 }
9785 else
9786 {
9787 if (!REG_P (source))
9788 source = force_reg (mode, source);
9789 rs6000_emit_le_vsx_store (dest, source, mode);
9790 }
9791 }
9792
9793 /* Return whether a SFmode or SImode move can be done without converting one
9794 mode to another. This arrises when we have:
9795
9796 (SUBREG:SF (REG:SI ...))
9797 (SUBREG:SI (REG:SF ...))
9798
9799 and one of the values is in a floating point/vector register, where SFmode
9800 scalars are stored in DFmode format. */
9801
9802 bool
9803 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9804 {
9805 if (TARGET_ALLOW_SF_SUBREG)
9806 return true;
9807
9808 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9809 return true;
9810
9811 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9812 return true;
9813
9814 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9815 if (SUBREG_P (dest))
9816 {
9817 rtx dest_subreg = SUBREG_REG (dest);
9818 rtx src_subreg = SUBREG_REG (src);
9819 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9820 }
9821
9822 return false;
9823 }
9824
9825
9826 /* Helper function to change moves with:
9827
9828 (SUBREG:SF (REG:SI)) and
9829 (SUBREG:SI (REG:SF))
9830
9831 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9832 values are stored as DFmode values in the VSX registers. We need to convert
9833 the bits before we can use a direct move or operate on the bits in the
9834 vector register as an integer type.
9835
9836 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9837
9838 static bool
9839 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9840 {
9841 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
9842 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9843 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9844 {
9845 rtx inner_source = SUBREG_REG (source);
9846 machine_mode inner_mode = GET_MODE (inner_source);
9847
9848 if (mode == SImode && inner_mode == SFmode)
9849 {
9850 emit_insn (gen_movsi_from_sf (dest, inner_source));
9851 return true;
9852 }
9853
9854 if (mode == SFmode && inner_mode == SImode)
9855 {
9856 emit_insn (gen_movsf_from_si (dest, inner_source));
9857 return true;
9858 }
9859 }
9860
9861 return false;
9862 }
9863
9864 /* Emit a move from SOURCE to DEST in mode MODE. */
9865 void
9866 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9867 {
9868 rtx operands[2];
9869 operands[0] = dest;
9870 operands[1] = source;
9871
9872 if (TARGET_DEBUG_ADDR)
9873 {
9874 fprintf (stderr,
9875 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9876 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9877 GET_MODE_NAME (mode),
9878 lra_in_progress,
9879 reload_completed,
9880 can_create_pseudo_p ());
9881 debug_rtx (dest);
9882 fprintf (stderr, "source:\n");
9883 debug_rtx (source);
9884 }
9885
9886 /* Check that we get CONST_WIDE_INT only when we should. */
9887 if (CONST_WIDE_INT_P (operands[1])
9888 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9889 gcc_unreachable ();
9890
9891 #ifdef HAVE_AS_GNU_ATTRIBUTE
9892 /* If we use a long double type, set the flags in .gnu_attribute that say
9893 what the long double type is. This is to allow the linker's warning
9894 message for the wrong long double to be useful, even if the function does
9895 not do a call (for example, doing a 128-bit add on power9 if the long
9896 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9897 used if they aren't the default long dobule type. */
9898 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9899 {
9900 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9901 rs6000_passes_float = rs6000_passes_long_double = true;
9902
9903 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9904 rs6000_passes_float = rs6000_passes_long_double = true;
9905 }
9906 #endif
9907
9908 /* See if we need to special case SImode/SFmode SUBREG moves. */
9909 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9910 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9911 return;
9912
9913 /* Check if GCC is setting up a block move that will end up using FP
9914 registers as temporaries. We must make sure this is acceptable. */
9915 if (GET_CODE (operands[0]) == MEM
9916 && GET_CODE (operands[1]) == MEM
9917 && mode == DImode
9918 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9919 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9920 && ! (rs6000_slow_unaligned_access (SImode,
9921 (MEM_ALIGN (operands[0]) > 32
9922 ? 32 : MEM_ALIGN (operands[0])))
9923 || rs6000_slow_unaligned_access (SImode,
9924 (MEM_ALIGN (operands[1]) > 32
9925 ? 32 : MEM_ALIGN (operands[1]))))
9926 && ! MEM_VOLATILE_P (operands [0])
9927 && ! MEM_VOLATILE_P (operands [1]))
9928 {
9929 emit_move_insn (adjust_address (operands[0], SImode, 0),
9930 adjust_address (operands[1], SImode, 0));
9931 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9932 adjust_address (copy_rtx (operands[1]), SImode, 4));
9933 return;
9934 }
9935
9936 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9937 && !gpc_reg_operand (operands[1], mode))
9938 operands[1] = force_reg (mode, operands[1]);
9939
9940 /* Recognize the case where operand[1] is a reference to thread-local
9941 data and load its address to a register. */
9942 if (tls_referenced_p (operands[1]))
9943 {
9944 enum tls_model model;
9945 rtx tmp = operands[1];
9946 rtx addend = NULL;
9947
9948 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9949 {
9950 addend = XEXP (XEXP (tmp, 0), 1);
9951 tmp = XEXP (XEXP (tmp, 0), 0);
9952 }
9953
9954 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
9955 model = SYMBOL_REF_TLS_MODEL (tmp);
9956 gcc_assert (model != 0);
9957
9958 tmp = rs6000_legitimize_tls_address (tmp, model);
9959 if (addend)
9960 {
9961 tmp = gen_rtx_PLUS (mode, tmp, addend);
9962 tmp = force_operand (tmp, operands[0]);
9963 }
9964 operands[1] = tmp;
9965 }
9966
9967 /* 128-bit constant floating-point values on Darwin should really be loaded
9968 as two parts. However, this premature splitting is a problem when DFmode
9969 values can go into Altivec registers. */
9970 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
9971 && !reg_addr[DFmode].scalar_in_vmx_p)
9972 {
9973 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9974 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9975 DFmode);
9976 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9977 GET_MODE_SIZE (DFmode)),
9978 simplify_gen_subreg (DFmode, operands[1], mode,
9979 GET_MODE_SIZE (DFmode)),
9980 DFmode);
9981 return;
9982 }
9983
9984 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9985 p1:SD) if p1 is not of floating point class and p0 is spilled as
9986 we can have no analogous movsd_store for this. */
9987 if (lra_in_progress && mode == DDmode
9988 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9989 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9990 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
9991 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9992 {
9993 enum reg_class cl;
9994 int regno = REGNO (SUBREG_REG (operands[1]));
9995
9996 if (regno >= FIRST_PSEUDO_REGISTER)
9997 {
9998 cl = reg_preferred_class (regno);
9999 regno = reg_renumber[regno];
10000 if (regno < 0)
10001 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10002 }
10003 if (regno >= 0 && ! FP_REGNO_P (regno))
10004 {
10005 mode = SDmode;
10006 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10007 operands[1] = SUBREG_REG (operands[1]);
10008 }
10009 }
10010 if (lra_in_progress
10011 && mode == SDmode
10012 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10013 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10014 && (REG_P (operands[1])
10015 || (GET_CODE (operands[1]) == SUBREG
10016 && REG_P (SUBREG_REG (operands[1])))))
10017 {
10018 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10019 ? SUBREG_REG (operands[1]) : operands[1]);
10020 enum reg_class cl;
10021
10022 if (regno >= FIRST_PSEUDO_REGISTER)
10023 {
10024 cl = reg_preferred_class (regno);
10025 gcc_assert (cl != NO_REGS);
10026 regno = reg_renumber[regno];
10027 if (regno < 0)
10028 regno = ira_class_hard_regs[cl][0];
10029 }
10030 if (FP_REGNO_P (regno))
10031 {
10032 if (GET_MODE (operands[0]) != DDmode)
10033 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10034 emit_insn (gen_movsd_store (operands[0], operands[1]));
10035 }
10036 else if (INT_REGNO_P (regno))
10037 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10038 else
10039 gcc_unreachable();
10040 return;
10041 }
10042 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10043 p:DD)) if p0 is not of floating point class and p1 is spilled as
10044 we can have no analogous movsd_load for this. */
10045 if (lra_in_progress && mode == DDmode
10046 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10047 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10048 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10049 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10050 {
10051 enum reg_class cl;
10052 int regno = REGNO (SUBREG_REG (operands[0]));
10053
10054 if (regno >= FIRST_PSEUDO_REGISTER)
10055 {
10056 cl = reg_preferred_class (regno);
10057 regno = reg_renumber[regno];
10058 if (regno < 0)
10059 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10060 }
10061 if (regno >= 0 && ! FP_REGNO_P (regno))
10062 {
10063 mode = SDmode;
10064 operands[0] = SUBREG_REG (operands[0]);
10065 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10066 }
10067 }
10068 if (lra_in_progress
10069 && mode == SDmode
10070 && (REG_P (operands[0])
10071 || (GET_CODE (operands[0]) == SUBREG
10072 && REG_P (SUBREG_REG (operands[0]))))
10073 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10074 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10075 {
10076 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10077 ? SUBREG_REG (operands[0]) : operands[0]);
10078 enum reg_class cl;
10079
10080 if (regno >= FIRST_PSEUDO_REGISTER)
10081 {
10082 cl = reg_preferred_class (regno);
10083 gcc_assert (cl != NO_REGS);
10084 regno = reg_renumber[regno];
10085 if (regno < 0)
10086 regno = ira_class_hard_regs[cl][0];
10087 }
10088 if (FP_REGNO_P (regno))
10089 {
10090 if (GET_MODE (operands[1]) != DDmode)
10091 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10092 emit_insn (gen_movsd_load (operands[0], operands[1]));
10093 }
10094 else if (INT_REGNO_P (regno))
10095 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10096 else
10097 gcc_unreachable();
10098 return;
10099 }
10100
10101 /* FIXME: In the long term, this switch statement should go away
10102 and be replaced by a sequence of tests based on things like
10103 mode == Pmode. */
10104 switch (mode)
10105 {
10106 case E_HImode:
10107 case E_QImode:
10108 if (CONSTANT_P (operands[1])
10109 && GET_CODE (operands[1]) != CONST_INT)
10110 operands[1] = force_const_mem (mode, operands[1]);
10111 break;
10112
10113 case E_TFmode:
10114 case E_TDmode:
10115 case E_IFmode:
10116 case E_KFmode:
10117 if (FLOAT128_2REG_P (mode))
10118 rs6000_eliminate_indexed_memrefs (operands);
10119 /* fall through */
10120
10121 case E_DFmode:
10122 case E_DDmode:
10123 case E_SFmode:
10124 case E_SDmode:
10125 if (CONSTANT_P (operands[1])
10126 && ! easy_fp_constant (operands[1], mode))
10127 operands[1] = force_const_mem (mode, operands[1]);
10128 break;
10129
10130 case E_V16QImode:
10131 case E_V8HImode:
10132 case E_V4SFmode:
10133 case E_V4SImode:
10134 case E_V2DFmode:
10135 case E_V2DImode:
10136 case E_V1TImode:
10137 if (CONSTANT_P (operands[1])
10138 && !easy_vector_constant (operands[1], mode))
10139 operands[1] = force_const_mem (mode, operands[1]);
10140 break;
10141
10142 case E_SImode:
10143 case E_DImode:
10144 /* Use default pattern for address of ELF small data */
10145 if (TARGET_ELF
10146 && mode == Pmode
10147 && DEFAULT_ABI == ABI_V4
10148 && (GET_CODE (operands[1]) == SYMBOL_REF
10149 || GET_CODE (operands[1]) == CONST)
10150 && small_data_operand (operands[1], mode))
10151 {
10152 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10153 return;
10154 }
10155
10156 if (DEFAULT_ABI == ABI_V4
10157 && mode == Pmode && mode == SImode
10158 && flag_pic == 1 && got_operand (operands[1], mode))
10159 {
10160 emit_insn (gen_movsi_got (operands[0], operands[1]));
10161 return;
10162 }
10163
10164 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10165 && TARGET_NO_TOC
10166 && ! flag_pic
10167 && mode == Pmode
10168 && CONSTANT_P (operands[1])
10169 && GET_CODE (operands[1]) != HIGH
10170 && GET_CODE (operands[1]) != CONST_INT)
10171 {
10172 rtx target = (!can_create_pseudo_p ()
10173 ? operands[0]
10174 : gen_reg_rtx (mode));
10175
10176 /* If this is a function address on -mcall-aixdesc,
10177 convert it to the address of the descriptor. */
10178 if (DEFAULT_ABI == ABI_AIX
10179 && GET_CODE (operands[1]) == SYMBOL_REF
10180 && XSTR (operands[1], 0)[0] == '.')
10181 {
10182 const char *name = XSTR (operands[1], 0);
10183 rtx new_ref;
10184 while (*name == '.')
10185 name++;
10186 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10187 CONSTANT_POOL_ADDRESS_P (new_ref)
10188 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10189 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10190 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10191 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10192 operands[1] = new_ref;
10193 }
10194
10195 if (DEFAULT_ABI == ABI_DARWIN)
10196 {
10197 #if TARGET_MACHO
10198 if (MACHO_DYNAMIC_NO_PIC_P)
10199 {
10200 /* Take care of any required data indirection. */
10201 operands[1] = rs6000_machopic_legitimize_pic_address (
10202 operands[1], mode, operands[0]);
10203 if (operands[0] != operands[1])
10204 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10205 return;
10206 }
10207 #endif
10208 emit_insn (gen_macho_high (target, operands[1]));
10209 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10210 return;
10211 }
10212
10213 emit_insn (gen_elf_high (target, operands[1]));
10214 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10215 return;
10216 }
10217
10218 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10219 and we have put it in the TOC, we just need to make a TOC-relative
10220 reference to it. */
10221 if (TARGET_TOC
10222 && GET_CODE (operands[1]) == SYMBOL_REF
10223 && use_toc_relative_ref (operands[1], mode))
10224 operands[1] = create_TOC_reference (operands[1], operands[0]);
10225 else if (mode == Pmode
10226 && CONSTANT_P (operands[1])
10227 && GET_CODE (operands[1]) != HIGH
10228 && ((REG_P (operands[0])
10229 && FP_REGNO_P (REGNO (operands[0])))
10230 || !CONST_INT_P (operands[1])
10231 || (num_insns_constant (operands[1], mode)
10232 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10233 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10234 && (TARGET_CMODEL == CMODEL_SMALL
10235 || can_create_pseudo_p ()
10236 || (REG_P (operands[0])
10237 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10238 {
10239
10240 #if TARGET_MACHO
10241 /* Darwin uses a special PIC legitimizer. */
10242 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10243 {
10244 operands[1] =
10245 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10246 operands[0]);
10247 if (operands[0] != operands[1])
10248 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10249 return;
10250 }
10251 #endif
10252
10253 /* If we are to limit the number of things we put in the TOC and
10254 this is a symbol plus a constant we can add in one insn,
10255 just put the symbol in the TOC and add the constant. */
10256 if (GET_CODE (operands[1]) == CONST
10257 && TARGET_NO_SUM_IN_TOC
10258 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10259 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10260 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10261 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10262 && ! side_effects_p (operands[0]))
10263 {
10264 rtx sym =
10265 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10266 rtx other = XEXP (XEXP (operands[1], 0), 1);
10267
10268 sym = force_reg (mode, sym);
10269 emit_insn (gen_add3_insn (operands[0], sym, other));
10270 return;
10271 }
10272
10273 operands[1] = force_const_mem (mode, operands[1]);
10274
10275 if (TARGET_TOC
10276 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10277 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10278 {
10279 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10280 operands[0]);
10281 operands[1] = gen_const_mem (mode, tocref);
10282 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10283 }
10284 }
10285 break;
10286
10287 case E_TImode:
10288 if (!VECTOR_MEM_VSX_P (TImode))
10289 rs6000_eliminate_indexed_memrefs (operands);
10290 break;
10291
10292 case E_PTImode:
10293 rs6000_eliminate_indexed_memrefs (operands);
10294 break;
10295
10296 default:
10297 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10298 }
10299
10300 /* Above, we may have called force_const_mem which may have returned
10301 an invalid address. If we can, fix this up; otherwise, reload will
10302 have to deal with it. */
10303 if (GET_CODE (operands[1]) == MEM)
10304 operands[1] = validize_mem (operands[1]);
10305
10306 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10307 }
10308 \f
10309 /* Nonzero if we can use a floating-point register to pass this arg. */
10310 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10311 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10312 && (CUM)->fregno <= FP_ARG_MAX_REG \
10313 && TARGET_HARD_FLOAT)
10314
10315 /* Nonzero if we can use an AltiVec register to pass this arg. */
10316 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10317 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10318 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10319 && TARGET_ALTIVEC_ABI \
10320 && (NAMED))
10321
10322 /* Walk down the type tree of TYPE counting consecutive base elements.
10323 If *MODEP is VOIDmode, then set it to the first valid floating point
10324 or vector type. If a non-floating point or vector type is found, or
10325 if a floating point or vector type that doesn't match a non-VOIDmode
10326 *MODEP is found, then return -1, otherwise return the count in the
10327 sub-tree. */
10328
10329 static int
10330 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10331 {
10332 machine_mode mode;
10333 HOST_WIDE_INT size;
10334
10335 switch (TREE_CODE (type))
10336 {
10337 case REAL_TYPE:
10338 mode = TYPE_MODE (type);
10339 if (!SCALAR_FLOAT_MODE_P (mode))
10340 return -1;
10341
10342 if (*modep == VOIDmode)
10343 *modep = mode;
10344
10345 if (*modep == mode)
10346 return 1;
10347
10348 break;
10349
10350 case COMPLEX_TYPE:
10351 mode = TYPE_MODE (TREE_TYPE (type));
10352 if (!SCALAR_FLOAT_MODE_P (mode))
10353 return -1;
10354
10355 if (*modep == VOIDmode)
10356 *modep = mode;
10357
10358 if (*modep == mode)
10359 return 2;
10360
10361 break;
10362
10363 case VECTOR_TYPE:
10364 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10365 return -1;
10366
10367 /* Use V4SImode as representative of all 128-bit vector types. */
10368 size = int_size_in_bytes (type);
10369 switch (size)
10370 {
10371 case 16:
10372 mode = V4SImode;
10373 break;
10374 default:
10375 return -1;
10376 }
10377
10378 if (*modep == VOIDmode)
10379 *modep = mode;
10380
10381 /* Vector modes are considered to be opaque: two vectors are
10382 equivalent for the purposes of being homogeneous aggregates
10383 if they are the same size. */
10384 if (*modep == mode)
10385 return 1;
10386
10387 break;
10388
10389 case ARRAY_TYPE:
10390 {
10391 int count;
10392 tree index = TYPE_DOMAIN (type);
10393
10394 /* Can't handle incomplete types nor sizes that are not
10395 fixed. */
10396 if (!COMPLETE_TYPE_P (type)
10397 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10398 return -1;
10399
10400 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10401 if (count == -1
10402 || !index
10403 || !TYPE_MAX_VALUE (index)
10404 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10405 || !TYPE_MIN_VALUE (index)
10406 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10407 || count < 0)
10408 return -1;
10409
10410 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10411 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10412
10413 /* There must be no padding. */
10414 if (wi::to_wide (TYPE_SIZE (type))
10415 != count * GET_MODE_BITSIZE (*modep))
10416 return -1;
10417
10418 return count;
10419 }
10420
10421 case RECORD_TYPE:
10422 {
10423 int count = 0;
10424 int sub_count;
10425 tree field;
10426
10427 /* Can't handle incomplete types nor sizes that are not
10428 fixed. */
10429 if (!COMPLETE_TYPE_P (type)
10430 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10431 return -1;
10432
10433 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10434 {
10435 if (TREE_CODE (field) != FIELD_DECL)
10436 continue;
10437
10438 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10439 if (sub_count < 0)
10440 return -1;
10441 count += sub_count;
10442 }
10443
10444 /* There must be no padding. */
10445 if (wi::to_wide (TYPE_SIZE (type))
10446 != count * GET_MODE_BITSIZE (*modep))
10447 return -1;
10448
10449 return count;
10450 }
10451
10452 case UNION_TYPE:
10453 case QUAL_UNION_TYPE:
10454 {
10455 /* These aren't very interesting except in a degenerate case. */
10456 int count = 0;
10457 int sub_count;
10458 tree field;
10459
10460 /* Can't handle incomplete types nor sizes that are not
10461 fixed. */
10462 if (!COMPLETE_TYPE_P (type)
10463 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10464 return -1;
10465
10466 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10467 {
10468 if (TREE_CODE (field) != FIELD_DECL)
10469 continue;
10470
10471 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10472 if (sub_count < 0)
10473 return -1;
10474 count = count > sub_count ? count : sub_count;
10475 }
10476
10477 /* There must be no padding. */
10478 if (wi::to_wide (TYPE_SIZE (type))
10479 != count * GET_MODE_BITSIZE (*modep))
10480 return -1;
10481
10482 return count;
10483 }
10484
10485 default:
10486 break;
10487 }
10488
10489 return -1;
10490 }
10491
10492 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10493 float or vector aggregate that shall be passed in FP/vector registers
10494 according to the ELFv2 ABI, return the homogeneous element mode in
10495 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10496
10497 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10498
10499 static bool
10500 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10501 machine_mode *elt_mode,
10502 int *n_elts)
10503 {
10504 /* Note that we do not accept complex types at the top level as
10505 homogeneous aggregates; these types are handled via the
10506 targetm.calls.split_complex_arg mechanism. Complex types
10507 can be elements of homogeneous aggregates, however. */
10508 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10509 && AGGREGATE_TYPE_P (type))
10510 {
10511 machine_mode field_mode = VOIDmode;
10512 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10513
10514 if (field_count > 0)
10515 {
10516 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10517 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10518
10519 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10520 up to AGGR_ARG_NUM_REG registers. */
10521 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10522 {
10523 if (elt_mode)
10524 *elt_mode = field_mode;
10525 if (n_elts)
10526 *n_elts = field_count;
10527 return true;
10528 }
10529 }
10530 }
10531
10532 if (elt_mode)
10533 *elt_mode = mode;
10534 if (n_elts)
10535 *n_elts = 1;
10536 return false;
10537 }
10538
10539 /* Return a nonzero value to say to return the function value in
10540 memory, just as large structures are always returned. TYPE will be
10541 the data type of the value, and FNTYPE will be the type of the
10542 function doing the returning, or @code{NULL} for libcalls.
10543
10544 The AIX ABI for the RS/6000 specifies that all structures are
10545 returned in memory. The Darwin ABI does the same.
10546
10547 For the Darwin 64 Bit ABI, a function result can be returned in
10548 registers or in memory, depending on the size of the return data
10549 type. If it is returned in registers, the value occupies the same
10550 registers as it would if it were the first and only function
10551 argument. Otherwise, the function places its result in memory at
10552 the location pointed to by GPR3.
10553
10554 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10555 but a draft put them in memory, and GCC used to implement the draft
10556 instead of the final standard. Therefore, aix_struct_return
10557 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10558 compatibility can change DRAFT_V4_STRUCT_RET to override the
10559 default, and -m switches get the final word. See
10560 rs6000_option_override_internal for more details.
10561
10562 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10563 long double support is enabled. These values are returned in memory.
10564
10565 int_size_in_bytes returns -1 for variable size objects, which go in
10566 memory always. The cast to unsigned makes -1 > 8. */
10567
10568 static bool
10569 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10570 {
10571 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10572 if (TARGET_MACHO
10573 && rs6000_darwin64_abi
10574 && TREE_CODE (type) == RECORD_TYPE
10575 && int_size_in_bytes (type) > 0)
10576 {
10577 CUMULATIVE_ARGS valcum;
10578 rtx valret;
10579
10580 valcum.words = 0;
10581 valcum.fregno = FP_ARG_MIN_REG;
10582 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10583 /* Do a trial code generation as if this were going to be passed
10584 as an argument; if any part goes in memory, we return NULL. */
10585 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10586 if (valret)
10587 return false;
10588 /* Otherwise fall through to more conventional ABI rules. */
10589 }
10590
10591 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10592 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10593 NULL, NULL))
10594 return false;
10595
10596 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10597 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10598 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10599 return false;
10600
10601 if (AGGREGATE_TYPE_P (type)
10602 && (aix_struct_return
10603 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10604 return true;
10605
10606 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10607 modes only exist for GCC vector types if -maltivec. */
10608 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10609 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10610 return false;
10611
10612 /* Return synthetic vectors in memory. */
10613 if (TREE_CODE (type) == VECTOR_TYPE
10614 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10615 {
10616 static bool warned_for_return_big_vectors = false;
10617 if (!warned_for_return_big_vectors)
10618 {
10619 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10620 "non-standard ABI extension with no compatibility "
10621 "guarantee");
10622 warned_for_return_big_vectors = true;
10623 }
10624 return true;
10625 }
10626
10627 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10628 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10629 return true;
10630
10631 return false;
10632 }
10633
10634 /* Specify whether values returned in registers should be at the most
10635 significant end of a register. We want aggregates returned by
10636 value to match the way aggregates are passed to functions. */
10637
10638 static bool
10639 rs6000_return_in_msb (const_tree valtype)
10640 {
10641 return (DEFAULT_ABI == ABI_ELFv2
10642 && BYTES_BIG_ENDIAN
10643 && AGGREGATE_TYPE_P (valtype)
10644 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10645 == PAD_UPWARD));
10646 }
10647
10648 #ifdef HAVE_AS_GNU_ATTRIBUTE
10649 /* Return TRUE if a call to function FNDECL may be one that
10650 potentially affects the function calling ABI of the object file. */
10651
10652 static bool
10653 call_ABI_of_interest (tree fndecl)
10654 {
10655 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10656 {
10657 struct cgraph_node *c_node;
10658
10659 /* Libcalls are always interesting. */
10660 if (fndecl == NULL_TREE)
10661 return true;
10662
10663 /* Any call to an external function is interesting. */
10664 if (DECL_EXTERNAL (fndecl))
10665 return true;
10666
10667 /* Interesting functions that we are emitting in this object file. */
10668 c_node = cgraph_node::get (fndecl);
10669 c_node = c_node->ultimate_alias_target ();
10670 return !c_node->only_called_directly_p ();
10671 }
10672 return false;
10673 }
10674 #endif
10675
10676 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10677 for a call to a function whose data type is FNTYPE.
10678 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10679
10680 For incoming args we set the number of arguments in the prototype large
10681 so we never return a PARALLEL. */
10682
10683 void
10684 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10685 rtx libname ATTRIBUTE_UNUSED, int incoming,
10686 int libcall, int n_named_args,
10687 tree fndecl,
10688 machine_mode return_mode ATTRIBUTE_UNUSED)
10689 {
10690 static CUMULATIVE_ARGS zero_cumulative;
10691
10692 *cum = zero_cumulative;
10693 cum->words = 0;
10694 cum->fregno = FP_ARG_MIN_REG;
10695 cum->vregno = ALTIVEC_ARG_MIN_REG;
10696 cum->prototype = (fntype && prototype_p (fntype));
10697 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10698 ? CALL_LIBCALL : CALL_NORMAL);
10699 cum->sysv_gregno = GP_ARG_MIN_REG;
10700 cum->stdarg = stdarg_p (fntype);
10701 cum->libcall = libcall;
10702
10703 cum->nargs_prototype = 0;
10704 if (incoming || cum->prototype)
10705 cum->nargs_prototype = n_named_args;
10706
10707 /* Check for a longcall attribute. */
10708 if ((!fntype && rs6000_default_long_calls)
10709 || (fntype
10710 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10711 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10712 cum->call_cookie |= CALL_LONG;
10713 else if (DEFAULT_ABI != ABI_DARWIN)
10714 {
10715 bool is_local = (fndecl
10716 && !DECL_EXTERNAL (fndecl)
10717 && !DECL_WEAK (fndecl)
10718 && (*targetm.binds_local_p) (fndecl));
10719 if (is_local)
10720 ;
10721 else if (flag_plt)
10722 {
10723 if (fntype
10724 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10725 cum->call_cookie |= CALL_LONG;
10726 }
10727 else
10728 {
10729 if (!(fntype
10730 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10731 cum->call_cookie |= CALL_LONG;
10732 }
10733 }
10734
10735 if (TARGET_DEBUG_ARG)
10736 {
10737 fprintf (stderr, "\ninit_cumulative_args:");
10738 if (fntype)
10739 {
10740 tree ret_type = TREE_TYPE (fntype);
10741 fprintf (stderr, " ret code = %s,",
10742 get_tree_code_name (TREE_CODE (ret_type)));
10743 }
10744
10745 if (cum->call_cookie & CALL_LONG)
10746 fprintf (stderr, " longcall,");
10747
10748 fprintf (stderr, " proto = %d, nargs = %d\n",
10749 cum->prototype, cum->nargs_prototype);
10750 }
10751
10752 #ifdef HAVE_AS_GNU_ATTRIBUTE
10753 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10754 {
10755 cum->escapes = call_ABI_of_interest (fndecl);
10756 if (cum->escapes)
10757 {
10758 tree return_type;
10759
10760 if (fntype)
10761 {
10762 return_type = TREE_TYPE (fntype);
10763 return_mode = TYPE_MODE (return_type);
10764 }
10765 else
10766 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10767
10768 if (return_type != NULL)
10769 {
10770 if (TREE_CODE (return_type) == RECORD_TYPE
10771 && TYPE_TRANSPARENT_AGGR (return_type))
10772 {
10773 return_type = TREE_TYPE (first_field (return_type));
10774 return_mode = TYPE_MODE (return_type);
10775 }
10776 if (AGGREGATE_TYPE_P (return_type)
10777 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10778 <= 8))
10779 rs6000_returns_struct = true;
10780 }
10781 if (SCALAR_FLOAT_MODE_P (return_mode))
10782 {
10783 rs6000_passes_float = true;
10784 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10785 && (FLOAT128_IBM_P (return_mode)
10786 || FLOAT128_IEEE_P (return_mode)
10787 || (return_type != NULL
10788 && (TYPE_MAIN_VARIANT (return_type)
10789 == long_double_type_node))))
10790 rs6000_passes_long_double = true;
10791
10792 /* Note if we passed or return a IEEE 128-bit type. We changed
10793 the mangling for these types, and we may need to make an alias
10794 with the old mangling. */
10795 if (FLOAT128_IEEE_P (return_mode))
10796 rs6000_passes_ieee128 = true;
10797 }
10798 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10799 rs6000_passes_vector = true;
10800 }
10801 }
10802 #endif
10803
10804 if (fntype
10805 && !TARGET_ALTIVEC
10806 && TARGET_ALTIVEC_ABI
10807 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10808 {
10809 error ("cannot return value in vector register because"
10810 " altivec instructions are disabled, use %qs"
10811 " to enable them", "-maltivec");
10812 }
10813 }
10814 \f
10815 /* The mode the ABI uses for a word. This is not the same as word_mode
10816 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10817
10818 static scalar_int_mode
10819 rs6000_abi_word_mode (void)
10820 {
10821 return TARGET_32BIT ? SImode : DImode;
10822 }
10823
10824 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10825 static char *
10826 rs6000_offload_options (void)
10827 {
10828 if (TARGET_64BIT)
10829 return xstrdup ("-foffload-abi=lp64");
10830 else
10831 return xstrdup ("-foffload-abi=ilp32");
10832 }
10833
10834 /* On rs6000, function arguments are promoted, as are function return
10835 values. */
10836
10837 static machine_mode
10838 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10839 machine_mode mode,
10840 int *punsignedp ATTRIBUTE_UNUSED,
10841 const_tree, int)
10842 {
10843 PROMOTE_MODE (mode, *punsignedp, type);
10844
10845 return mode;
10846 }
10847
10848 /* Return true if TYPE must be passed on the stack and not in registers. */
10849
10850 static bool
10851 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10852 {
10853 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10854 return must_pass_in_stack_var_size (mode, type);
10855 else
10856 return must_pass_in_stack_var_size_or_pad (mode, type);
10857 }
10858
10859 static inline bool
10860 is_complex_IBM_long_double (machine_mode mode)
10861 {
10862 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10863 }
10864
10865 /* Whether ABI_V4 passes MODE args to a function in floating point
10866 registers. */
10867
10868 static bool
10869 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10870 {
10871 if (!TARGET_HARD_FLOAT)
10872 return false;
10873 if (mode == DFmode)
10874 return true;
10875 if (mode == SFmode && named)
10876 return true;
10877 /* ABI_V4 passes complex IBM long double in 8 gprs.
10878 Stupid, but we can't change the ABI now. */
10879 if (is_complex_IBM_long_double (mode))
10880 return false;
10881 if (FLOAT128_2REG_P (mode))
10882 return true;
10883 if (DECIMAL_FLOAT_MODE_P (mode))
10884 return true;
10885 return false;
10886 }
10887
10888 /* Implement TARGET_FUNCTION_ARG_PADDING.
10889
10890 For the AIX ABI structs are always stored left shifted in their
10891 argument slot. */
10892
10893 static pad_direction
10894 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10895 {
10896 #ifndef AGGREGATE_PADDING_FIXED
10897 #define AGGREGATE_PADDING_FIXED 0
10898 #endif
10899 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10900 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10901 #endif
10902
10903 if (!AGGREGATE_PADDING_FIXED)
10904 {
10905 /* GCC used to pass structures of the same size as integer types as
10906 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10907 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10908 passed padded downward, except that -mstrict-align further
10909 muddied the water in that multi-component structures of 2 and 4
10910 bytes in size were passed padded upward.
10911
10912 The following arranges for best compatibility with previous
10913 versions of gcc, but removes the -mstrict-align dependency. */
10914 if (BYTES_BIG_ENDIAN)
10915 {
10916 HOST_WIDE_INT size = 0;
10917
10918 if (mode == BLKmode)
10919 {
10920 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10921 size = int_size_in_bytes (type);
10922 }
10923 else
10924 size = GET_MODE_SIZE (mode);
10925
10926 if (size == 1 || size == 2 || size == 4)
10927 return PAD_DOWNWARD;
10928 }
10929 return PAD_UPWARD;
10930 }
10931
10932 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10933 {
10934 if (type != 0 && AGGREGATE_TYPE_P (type))
10935 return PAD_UPWARD;
10936 }
10937
10938 /* Fall back to the default. */
10939 return default_function_arg_padding (mode, type);
10940 }
10941
10942 /* If defined, a C expression that gives the alignment boundary, in bits,
10943 of an argument with the specified mode and type. If it is not defined,
10944 PARM_BOUNDARY is used for all arguments.
10945
10946 V.4 wants long longs and doubles to be double word aligned. Just
10947 testing the mode size is a boneheaded way to do this as it means
10948 that other types such as complex int are also double word aligned.
10949 However, we're stuck with this because changing the ABI might break
10950 existing library interfaces.
10951
10952 Quadword align Altivec/VSX vectors.
10953 Quadword align large synthetic vector types. */
10954
10955 static unsigned int
10956 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10957 {
10958 machine_mode elt_mode;
10959 int n_elts;
10960
10961 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10962
10963 if (DEFAULT_ABI == ABI_V4
10964 && (GET_MODE_SIZE (mode) == 8
10965 || (TARGET_HARD_FLOAT
10966 && !is_complex_IBM_long_double (mode)
10967 && FLOAT128_2REG_P (mode))))
10968 return 64;
10969 else if (FLOAT128_VECTOR_P (mode))
10970 return 128;
10971 else if (type && TREE_CODE (type) == VECTOR_TYPE
10972 && int_size_in_bytes (type) >= 8
10973 && int_size_in_bytes (type) < 16)
10974 return 64;
10975 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10976 || (type && TREE_CODE (type) == VECTOR_TYPE
10977 && int_size_in_bytes (type) >= 16))
10978 return 128;
10979
10980 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10981 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10982 -mcompat-align-parm is used. */
10983 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10984 || DEFAULT_ABI == ABI_ELFv2)
10985 && type && TYPE_ALIGN (type) > 64)
10986 {
10987 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10988 or homogeneous float/vector aggregates here. We already handled
10989 vector aggregates above, but still need to check for float here. */
10990 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10991 && !SCALAR_FLOAT_MODE_P (elt_mode));
10992
10993 /* We used to check for BLKmode instead of the above aggregate type
10994 check. Warn when this results in any difference to the ABI. */
10995 if (aggregate_p != (mode == BLKmode))
10996 {
10997 static bool warned;
10998 if (!warned && warn_psabi)
10999 {
11000 warned = true;
11001 inform (input_location,
11002 "the ABI of passing aggregates with %d-byte alignment"
11003 " has changed in GCC 5",
11004 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11005 }
11006 }
11007
11008 if (aggregate_p)
11009 return 128;
11010 }
11011
11012 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11013 implement the "aggregate type" check as a BLKmode check here; this
11014 means certain aggregate types are in fact not aligned. */
11015 if (TARGET_MACHO && rs6000_darwin64_abi
11016 && mode == BLKmode
11017 && type && TYPE_ALIGN (type) > 64)
11018 return 128;
11019
11020 return PARM_BOUNDARY;
11021 }
11022
11023 /* The offset in words to the start of the parameter save area. */
11024
11025 static unsigned int
11026 rs6000_parm_offset (void)
11027 {
11028 return (DEFAULT_ABI == ABI_V4 ? 2
11029 : DEFAULT_ABI == ABI_ELFv2 ? 4
11030 : 6);
11031 }
11032
11033 /* For a function parm of MODE and TYPE, return the starting word in
11034 the parameter area. NWORDS of the parameter area are already used. */
11035
11036 static unsigned int
11037 rs6000_parm_start (machine_mode mode, const_tree type,
11038 unsigned int nwords)
11039 {
11040 unsigned int align;
11041
11042 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11043 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11044 }
11045
11046 /* Compute the size (in words) of a function argument. */
11047
11048 static unsigned long
11049 rs6000_arg_size (machine_mode mode, const_tree type)
11050 {
11051 unsigned long size;
11052
11053 if (mode != BLKmode)
11054 size = GET_MODE_SIZE (mode);
11055 else
11056 size = int_size_in_bytes (type);
11057
11058 if (TARGET_32BIT)
11059 return (size + 3) >> 2;
11060 else
11061 return (size + 7) >> 3;
11062 }
11063 \f
11064 /* Use this to flush pending int fields. */
11065
11066 static void
11067 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11068 HOST_WIDE_INT bitpos, int final)
11069 {
11070 unsigned int startbit, endbit;
11071 int intregs, intoffset;
11072
11073 /* Handle the situations where a float is taking up the first half
11074 of the GPR, and the other half is empty (typically due to
11075 alignment restrictions). We can detect this by a 8-byte-aligned
11076 int field, or by seeing that this is the final flush for this
11077 argument. Count the word and continue on. */
11078 if (cum->floats_in_gpr == 1
11079 && (cum->intoffset % 64 == 0
11080 || (cum->intoffset == -1 && final)))
11081 {
11082 cum->words++;
11083 cum->floats_in_gpr = 0;
11084 }
11085
11086 if (cum->intoffset == -1)
11087 return;
11088
11089 intoffset = cum->intoffset;
11090 cum->intoffset = -1;
11091 cum->floats_in_gpr = 0;
11092
11093 if (intoffset % BITS_PER_WORD != 0)
11094 {
11095 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11096 if (!int_mode_for_size (bits, 0).exists ())
11097 {
11098 /* We couldn't find an appropriate mode, which happens,
11099 e.g., in packed structs when there are 3 bytes to load.
11100 Back intoffset back to the beginning of the word in this
11101 case. */
11102 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11103 }
11104 }
11105
11106 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11107 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11108 intregs = (endbit - startbit) / BITS_PER_WORD;
11109 cum->words += intregs;
11110 /* words should be unsigned. */
11111 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11112 {
11113 int pad = (endbit/BITS_PER_WORD) - cum->words;
11114 cum->words += pad;
11115 }
11116 }
11117
11118 /* The darwin64 ABI calls for us to recurse down through structs,
11119 looking for elements passed in registers. Unfortunately, we have
11120 to track int register count here also because of misalignments
11121 in powerpc alignment mode. */
11122
11123 static void
11124 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11125 const_tree type,
11126 HOST_WIDE_INT startbitpos)
11127 {
11128 tree f;
11129
11130 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11131 if (TREE_CODE (f) == FIELD_DECL)
11132 {
11133 HOST_WIDE_INT bitpos = startbitpos;
11134 tree ftype = TREE_TYPE (f);
11135 machine_mode mode;
11136 if (ftype == error_mark_node)
11137 continue;
11138 mode = TYPE_MODE (ftype);
11139
11140 if (DECL_SIZE (f) != 0
11141 && tree_fits_uhwi_p (bit_position (f)))
11142 bitpos += int_bit_position (f);
11143
11144 /* ??? FIXME: else assume zero offset. */
11145
11146 if (TREE_CODE (ftype) == RECORD_TYPE)
11147 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11148 else if (USE_FP_FOR_ARG_P (cum, mode))
11149 {
11150 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11151 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11152 cum->fregno += n_fpregs;
11153 /* Single-precision floats present a special problem for
11154 us, because they are smaller than an 8-byte GPR, and so
11155 the structure-packing rules combined with the standard
11156 varargs behavior mean that we want to pack float/float
11157 and float/int combinations into a single register's
11158 space. This is complicated by the arg advance flushing,
11159 which works on arbitrarily large groups of int-type
11160 fields. */
11161 if (mode == SFmode)
11162 {
11163 if (cum->floats_in_gpr == 1)
11164 {
11165 /* Two floats in a word; count the word and reset
11166 the float count. */
11167 cum->words++;
11168 cum->floats_in_gpr = 0;
11169 }
11170 else if (bitpos % 64 == 0)
11171 {
11172 /* A float at the beginning of an 8-byte word;
11173 count it and put off adjusting cum->words until
11174 we see if a arg advance flush is going to do it
11175 for us. */
11176 cum->floats_in_gpr++;
11177 }
11178 else
11179 {
11180 /* The float is at the end of a word, preceded
11181 by integer fields, so the arg advance flush
11182 just above has already set cum->words and
11183 everything is taken care of. */
11184 }
11185 }
11186 else
11187 cum->words += n_fpregs;
11188 }
11189 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11190 {
11191 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11192 cum->vregno++;
11193 cum->words += 2;
11194 }
11195 else if (cum->intoffset == -1)
11196 cum->intoffset = bitpos;
11197 }
11198 }
11199
11200 /* Check for an item that needs to be considered specially under the darwin 64
11201 bit ABI. These are record types where the mode is BLK or the structure is
11202 8 bytes in size. */
11203 static int
11204 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11205 {
11206 return rs6000_darwin64_abi
11207 && ((mode == BLKmode
11208 && TREE_CODE (type) == RECORD_TYPE
11209 && int_size_in_bytes (type) > 0)
11210 || (type && TREE_CODE (type) == RECORD_TYPE
11211 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11212 }
11213
11214 /* Update the data in CUM to advance over an argument
11215 of mode MODE and data type TYPE.
11216 (TYPE is null for libcalls where that information may not be available.)
11217
11218 Note that for args passed by reference, function_arg will be called
11219 with MODE and TYPE set to that of the pointer to the arg, not the arg
11220 itself. */
11221
11222 static void
11223 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11224 const_tree type, bool named, int depth)
11225 {
11226 machine_mode elt_mode;
11227 int n_elts;
11228
11229 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11230
11231 /* Only tick off an argument if we're not recursing. */
11232 if (depth == 0)
11233 cum->nargs_prototype--;
11234
11235 #ifdef HAVE_AS_GNU_ATTRIBUTE
11236 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11237 && cum->escapes)
11238 {
11239 if (SCALAR_FLOAT_MODE_P (mode))
11240 {
11241 rs6000_passes_float = true;
11242 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11243 && (FLOAT128_IBM_P (mode)
11244 || FLOAT128_IEEE_P (mode)
11245 || (type != NULL
11246 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11247 rs6000_passes_long_double = true;
11248
11249 /* Note if we passed or return a IEEE 128-bit type. We changed the
11250 mangling for these types, and we may need to make an alias with
11251 the old mangling. */
11252 if (FLOAT128_IEEE_P (mode))
11253 rs6000_passes_ieee128 = true;
11254 }
11255 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11256 rs6000_passes_vector = true;
11257 }
11258 #endif
11259
11260 if (TARGET_ALTIVEC_ABI
11261 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11262 || (type && TREE_CODE (type) == VECTOR_TYPE
11263 && int_size_in_bytes (type) == 16)))
11264 {
11265 bool stack = false;
11266
11267 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11268 {
11269 cum->vregno += n_elts;
11270
11271 if (!TARGET_ALTIVEC)
11272 error ("cannot pass argument in vector register because"
11273 " altivec instructions are disabled, use %qs"
11274 " to enable them", "-maltivec");
11275
11276 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11277 even if it is going to be passed in a vector register.
11278 Darwin does the same for variable-argument functions. */
11279 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11280 && TARGET_64BIT)
11281 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11282 stack = true;
11283 }
11284 else
11285 stack = true;
11286
11287 if (stack)
11288 {
11289 int align;
11290
11291 /* Vector parameters must be 16-byte aligned. In 32-bit
11292 mode this means we need to take into account the offset
11293 to the parameter save area. In 64-bit mode, they just
11294 have to start on an even word, since the parameter save
11295 area is 16-byte aligned. */
11296 if (TARGET_32BIT)
11297 align = -(rs6000_parm_offset () + cum->words) & 3;
11298 else
11299 align = cum->words & 1;
11300 cum->words += align + rs6000_arg_size (mode, type);
11301
11302 if (TARGET_DEBUG_ARG)
11303 {
11304 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11305 cum->words, align);
11306 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11307 cum->nargs_prototype, cum->prototype,
11308 GET_MODE_NAME (mode));
11309 }
11310 }
11311 }
11312 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11313 {
11314 int size = int_size_in_bytes (type);
11315 /* Variable sized types have size == -1 and are
11316 treated as if consisting entirely of ints.
11317 Pad to 16 byte boundary if needed. */
11318 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11319 && (cum->words % 2) != 0)
11320 cum->words++;
11321 /* For varargs, we can just go up by the size of the struct. */
11322 if (!named)
11323 cum->words += (size + 7) / 8;
11324 else
11325 {
11326 /* It is tempting to say int register count just goes up by
11327 sizeof(type)/8, but this is wrong in a case such as
11328 { int; double; int; } [powerpc alignment]. We have to
11329 grovel through the fields for these too. */
11330 cum->intoffset = 0;
11331 cum->floats_in_gpr = 0;
11332 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11333 rs6000_darwin64_record_arg_advance_flush (cum,
11334 size * BITS_PER_UNIT, 1);
11335 }
11336 if (TARGET_DEBUG_ARG)
11337 {
11338 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11339 cum->words, TYPE_ALIGN (type), size);
11340 fprintf (stderr,
11341 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11342 cum->nargs_prototype, cum->prototype,
11343 GET_MODE_NAME (mode));
11344 }
11345 }
11346 else if (DEFAULT_ABI == ABI_V4)
11347 {
11348 if (abi_v4_pass_in_fpr (mode, named))
11349 {
11350 /* _Decimal128 must use an even/odd register pair. This assumes
11351 that the register number is odd when fregno is odd. */
11352 if (mode == TDmode && (cum->fregno % 2) == 1)
11353 cum->fregno++;
11354
11355 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11356 <= FP_ARG_V4_MAX_REG)
11357 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11358 else
11359 {
11360 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11361 if (mode == DFmode || FLOAT128_IBM_P (mode)
11362 || mode == DDmode || mode == TDmode)
11363 cum->words += cum->words & 1;
11364 cum->words += rs6000_arg_size (mode, type);
11365 }
11366 }
11367 else
11368 {
11369 int n_words = rs6000_arg_size (mode, type);
11370 int gregno = cum->sysv_gregno;
11371
11372 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11373 As does any other 2 word item such as complex int due to a
11374 historical mistake. */
11375 if (n_words == 2)
11376 gregno += (1 - gregno) & 1;
11377
11378 /* Multi-reg args are not split between registers and stack. */
11379 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11380 {
11381 /* Long long is aligned on the stack. So are other 2 word
11382 items such as complex int due to a historical mistake. */
11383 if (n_words == 2)
11384 cum->words += cum->words & 1;
11385 cum->words += n_words;
11386 }
11387
11388 /* Note: continuing to accumulate gregno past when we've started
11389 spilling to the stack indicates the fact that we've started
11390 spilling to the stack to expand_builtin_saveregs. */
11391 cum->sysv_gregno = gregno + n_words;
11392 }
11393
11394 if (TARGET_DEBUG_ARG)
11395 {
11396 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11397 cum->words, cum->fregno);
11398 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11399 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11400 fprintf (stderr, "mode = %4s, named = %d\n",
11401 GET_MODE_NAME (mode), named);
11402 }
11403 }
11404 else
11405 {
11406 int n_words = rs6000_arg_size (mode, type);
11407 int start_words = cum->words;
11408 int align_words = rs6000_parm_start (mode, type, start_words);
11409
11410 cum->words = align_words + n_words;
11411
11412 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11413 {
11414 /* _Decimal128 must be passed in an even/odd float register pair.
11415 This assumes that the register number is odd when fregno is
11416 odd. */
11417 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11418 cum->fregno++;
11419 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11420 }
11421
11422 if (TARGET_DEBUG_ARG)
11423 {
11424 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11425 cum->words, cum->fregno);
11426 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11427 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11428 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11429 named, align_words - start_words, depth);
11430 }
11431 }
11432 }
11433
11434 static void
11435 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11436 const_tree type, bool named)
11437 {
11438 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11439 0);
11440 }
11441
11442 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11443 structure between cum->intoffset and bitpos to integer registers. */
11444
11445 static void
11446 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11447 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11448 {
11449 machine_mode mode;
11450 unsigned int regno;
11451 unsigned int startbit, endbit;
11452 int this_regno, intregs, intoffset;
11453 rtx reg;
11454
11455 if (cum->intoffset == -1)
11456 return;
11457
11458 intoffset = cum->intoffset;
11459 cum->intoffset = -1;
11460
11461 /* If this is the trailing part of a word, try to only load that
11462 much into the register. Otherwise load the whole register. Note
11463 that in the latter case we may pick up unwanted bits. It's not a
11464 problem at the moment but may wish to revisit. */
11465
11466 if (intoffset % BITS_PER_WORD != 0)
11467 {
11468 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11469 if (!int_mode_for_size (bits, 0).exists (&mode))
11470 {
11471 /* We couldn't find an appropriate mode, which happens,
11472 e.g., in packed structs when there are 3 bytes to load.
11473 Back intoffset back to the beginning of the word in this
11474 case. */
11475 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11476 mode = word_mode;
11477 }
11478 }
11479 else
11480 mode = word_mode;
11481
11482 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11483 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11484 intregs = (endbit - startbit) / BITS_PER_WORD;
11485 this_regno = cum->words + intoffset / BITS_PER_WORD;
11486
11487 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11488 cum->use_stack = 1;
11489
11490 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11491 if (intregs <= 0)
11492 return;
11493
11494 intoffset /= BITS_PER_UNIT;
11495 do
11496 {
11497 regno = GP_ARG_MIN_REG + this_regno;
11498 reg = gen_rtx_REG (mode, regno);
11499 rvec[(*k)++] =
11500 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11501
11502 this_regno += 1;
11503 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11504 mode = word_mode;
11505 intregs -= 1;
11506 }
11507 while (intregs > 0);
11508 }
11509
11510 /* Recursive workhorse for the following. */
11511
11512 static void
11513 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11514 HOST_WIDE_INT startbitpos, rtx rvec[],
11515 int *k)
11516 {
11517 tree f;
11518
11519 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11520 if (TREE_CODE (f) == FIELD_DECL)
11521 {
11522 HOST_WIDE_INT bitpos = startbitpos;
11523 tree ftype = TREE_TYPE (f);
11524 machine_mode mode;
11525 if (ftype == error_mark_node)
11526 continue;
11527 mode = TYPE_MODE (ftype);
11528
11529 if (DECL_SIZE (f) != 0
11530 && tree_fits_uhwi_p (bit_position (f)))
11531 bitpos += int_bit_position (f);
11532
11533 /* ??? FIXME: else assume zero offset. */
11534
11535 if (TREE_CODE (ftype) == RECORD_TYPE)
11536 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11537 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11538 {
11539 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11540 #if 0
11541 switch (mode)
11542 {
11543 case E_SCmode: mode = SFmode; break;
11544 case E_DCmode: mode = DFmode; break;
11545 case E_TCmode: mode = TFmode; break;
11546 default: break;
11547 }
11548 #endif
11549 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11550 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11551 {
11552 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11553 && (mode == TFmode || mode == TDmode));
11554 /* Long double or _Decimal128 split over regs and memory. */
11555 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11556 cum->use_stack=1;
11557 }
11558 rvec[(*k)++]
11559 = gen_rtx_EXPR_LIST (VOIDmode,
11560 gen_rtx_REG (mode, cum->fregno++),
11561 GEN_INT (bitpos / BITS_PER_UNIT));
11562 if (FLOAT128_2REG_P (mode))
11563 cum->fregno++;
11564 }
11565 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11566 {
11567 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11568 rvec[(*k)++]
11569 = gen_rtx_EXPR_LIST (VOIDmode,
11570 gen_rtx_REG (mode, cum->vregno++),
11571 GEN_INT (bitpos / BITS_PER_UNIT));
11572 }
11573 else if (cum->intoffset == -1)
11574 cum->intoffset = bitpos;
11575 }
11576 }
11577
11578 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11579 the register(s) to be used for each field and subfield of a struct
11580 being passed by value, along with the offset of where the
11581 register's value may be found in the block. FP fields go in FP
11582 register, vector fields go in vector registers, and everything
11583 else goes in int registers, packed as in memory.
11584
11585 This code is also used for function return values. RETVAL indicates
11586 whether this is the case.
11587
11588 Much of this is taken from the SPARC V9 port, which has a similar
11589 calling convention. */
11590
11591 static rtx
11592 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11593 bool named, bool retval)
11594 {
11595 rtx rvec[FIRST_PSEUDO_REGISTER];
11596 int k = 1, kbase = 1;
11597 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11598 /* This is a copy; modifications are not visible to our caller. */
11599 CUMULATIVE_ARGS copy_cum = *orig_cum;
11600 CUMULATIVE_ARGS *cum = &copy_cum;
11601
11602 /* Pad to 16 byte boundary if needed. */
11603 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11604 && (cum->words % 2) != 0)
11605 cum->words++;
11606
11607 cum->intoffset = 0;
11608 cum->use_stack = 0;
11609 cum->named = named;
11610
11611 /* Put entries into rvec[] for individual FP and vector fields, and
11612 for the chunks of memory that go in int regs. Note we start at
11613 element 1; 0 is reserved for an indication of using memory, and
11614 may or may not be filled in below. */
11615 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11616 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11617
11618 /* If any part of the struct went on the stack put all of it there.
11619 This hack is because the generic code for
11620 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11621 parts of the struct are not at the beginning. */
11622 if (cum->use_stack)
11623 {
11624 if (retval)
11625 return NULL_RTX; /* doesn't go in registers at all */
11626 kbase = 0;
11627 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11628 }
11629 if (k > 1 || cum->use_stack)
11630 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11631 else
11632 return NULL_RTX;
11633 }
11634
11635 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11636
11637 static rtx
11638 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11639 int align_words)
11640 {
11641 int n_units;
11642 int i, k;
11643 rtx rvec[GP_ARG_NUM_REG + 1];
11644
11645 if (align_words >= GP_ARG_NUM_REG)
11646 return NULL_RTX;
11647
11648 n_units = rs6000_arg_size (mode, type);
11649
11650 /* Optimize the simple case where the arg fits in one gpr, except in
11651 the case of BLKmode due to assign_parms assuming that registers are
11652 BITS_PER_WORD wide. */
11653 if (n_units == 0
11654 || (n_units == 1 && mode != BLKmode))
11655 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11656
11657 k = 0;
11658 if (align_words + n_units > GP_ARG_NUM_REG)
11659 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11660 using a magic NULL_RTX component.
11661 This is not strictly correct. Only some of the arg belongs in
11662 memory, not all of it. However, the normal scheme using
11663 function_arg_partial_nregs can result in unusual subregs, eg.
11664 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11665 store the whole arg to memory is often more efficient than code
11666 to store pieces, and we know that space is available in the right
11667 place for the whole arg. */
11668 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11669
11670 i = 0;
11671 do
11672 {
11673 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11674 rtx off = GEN_INT (i++ * 4);
11675 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11676 }
11677 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11678
11679 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11680 }
11681
11682 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11683 but must also be copied into the parameter save area starting at
11684 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11685 to the GPRs and/or memory. Return the number of elements used. */
11686
11687 static int
11688 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11689 int align_words, rtx *rvec)
11690 {
11691 int k = 0;
11692
11693 if (align_words < GP_ARG_NUM_REG)
11694 {
11695 int n_words = rs6000_arg_size (mode, type);
11696
11697 if (align_words + n_words > GP_ARG_NUM_REG
11698 || mode == BLKmode
11699 || (TARGET_32BIT && TARGET_POWERPC64))
11700 {
11701 /* If this is partially on the stack, then we only
11702 include the portion actually in registers here. */
11703 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11704 int i = 0;
11705
11706 if (align_words + n_words > GP_ARG_NUM_REG)
11707 {
11708 /* Not all of the arg fits in gprs. Say that it goes in memory
11709 too, using a magic NULL_RTX component. Also see comment in
11710 rs6000_mixed_function_arg for why the normal
11711 function_arg_partial_nregs scheme doesn't work in this case. */
11712 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11713 }
11714
11715 do
11716 {
11717 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11718 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11719 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11720 }
11721 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11722 }
11723 else
11724 {
11725 /* The whole arg fits in gprs. */
11726 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11727 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11728 }
11729 }
11730 else
11731 {
11732 /* It's entirely in memory. */
11733 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11734 }
11735
11736 return k;
11737 }
11738
11739 /* RVEC is a vector of K components of an argument of mode MODE.
11740 Construct the final function_arg return value from it. */
11741
11742 static rtx
11743 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11744 {
11745 gcc_assert (k >= 1);
11746
11747 /* Avoid returning a PARALLEL in the trivial cases. */
11748 if (k == 1)
11749 {
11750 if (XEXP (rvec[0], 0) == NULL_RTX)
11751 return NULL_RTX;
11752
11753 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11754 return XEXP (rvec[0], 0);
11755 }
11756
11757 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11758 }
11759
11760 /* Determine where to put an argument to a function.
11761 Value is zero to push the argument on the stack,
11762 or a hard register in which to store the argument.
11763
11764 MODE is the argument's machine mode.
11765 TYPE is the data type of the argument (as a tree).
11766 This is null for libcalls where that information may
11767 not be available.
11768 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11769 the preceding args and about the function being called. It is
11770 not modified in this routine.
11771 NAMED is nonzero if this argument is a named parameter
11772 (otherwise it is an extra parameter matching an ellipsis).
11773
11774 On RS/6000 the first eight words of non-FP are normally in registers
11775 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11776 Under V.4, the first 8 FP args are in registers.
11777
11778 If this is floating-point and no prototype is specified, we use
11779 both an FP and integer register (or possibly FP reg and stack). Library
11780 functions (when CALL_LIBCALL is set) always have the proper types for args,
11781 so we can pass the FP value just in one register. emit_library_function
11782 doesn't support PARALLEL anyway.
11783
11784 Note that for args passed by reference, function_arg will be called
11785 with MODE and TYPE set to that of the pointer to the arg, not the arg
11786 itself. */
11787
11788 static rtx
11789 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11790 const_tree type, bool named)
11791 {
11792 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11793 enum rs6000_abi abi = DEFAULT_ABI;
11794 machine_mode elt_mode;
11795 int n_elts;
11796
11797 /* Return a marker to indicate whether CR1 needs to set or clear the
11798 bit that V.4 uses to say fp args were passed in registers.
11799 Assume that we don't need the marker for software floating point,
11800 or compiler generated library calls. */
11801 if (mode == VOIDmode)
11802 {
11803 if (abi == ABI_V4
11804 && (cum->call_cookie & CALL_LIBCALL) == 0
11805 && (cum->stdarg
11806 || (cum->nargs_prototype < 0
11807 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11808 && TARGET_HARD_FLOAT)
11809 return GEN_INT (cum->call_cookie
11810 | ((cum->fregno == FP_ARG_MIN_REG)
11811 ? CALL_V4_SET_FP_ARGS
11812 : CALL_V4_CLEAR_FP_ARGS));
11813
11814 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11815 }
11816
11817 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11818
11819 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11820 {
11821 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11822 if (rslt != NULL_RTX)
11823 return rslt;
11824 /* Else fall through to usual handling. */
11825 }
11826
11827 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11828 {
11829 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11830 rtx r, off;
11831 int i, k = 0;
11832
11833 /* Do we also need to pass this argument in the parameter save area?
11834 Library support functions for IEEE 128-bit are assumed to not need the
11835 value passed both in GPRs and in vector registers. */
11836 if (TARGET_64BIT && !cum->prototype
11837 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11838 {
11839 int align_words = ROUND_UP (cum->words, 2);
11840 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11841 }
11842
11843 /* Describe where this argument goes in the vector registers. */
11844 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11845 {
11846 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11847 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11848 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11849 }
11850
11851 return rs6000_finish_function_arg (mode, rvec, k);
11852 }
11853 else if (TARGET_ALTIVEC_ABI
11854 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11855 || (type && TREE_CODE (type) == VECTOR_TYPE
11856 && int_size_in_bytes (type) == 16)))
11857 {
11858 if (named || abi == ABI_V4)
11859 return NULL_RTX;
11860 else
11861 {
11862 /* Vector parameters to varargs functions under AIX or Darwin
11863 get passed in memory and possibly also in GPRs. */
11864 int align, align_words, n_words;
11865 machine_mode part_mode;
11866
11867 /* Vector parameters must be 16-byte aligned. In 32-bit
11868 mode this means we need to take into account the offset
11869 to the parameter save area. In 64-bit mode, they just
11870 have to start on an even word, since the parameter save
11871 area is 16-byte aligned. */
11872 if (TARGET_32BIT)
11873 align = -(rs6000_parm_offset () + cum->words) & 3;
11874 else
11875 align = cum->words & 1;
11876 align_words = cum->words + align;
11877
11878 /* Out of registers? Memory, then. */
11879 if (align_words >= GP_ARG_NUM_REG)
11880 return NULL_RTX;
11881
11882 if (TARGET_32BIT && TARGET_POWERPC64)
11883 return rs6000_mixed_function_arg (mode, type, align_words);
11884
11885 /* The vector value goes in GPRs. Only the part of the
11886 value in GPRs is reported here. */
11887 part_mode = mode;
11888 n_words = rs6000_arg_size (mode, type);
11889 if (align_words + n_words > GP_ARG_NUM_REG)
11890 /* Fortunately, there are only two possibilities, the value
11891 is either wholly in GPRs or half in GPRs and half not. */
11892 part_mode = DImode;
11893
11894 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11895 }
11896 }
11897
11898 else if (abi == ABI_V4)
11899 {
11900 if (abi_v4_pass_in_fpr (mode, named))
11901 {
11902 /* _Decimal128 must use an even/odd register pair. This assumes
11903 that the register number is odd when fregno is odd. */
11904 if (mode == TDmode && (cum->fregno % 2) == 1)
11905 cum->fregno++;
11906
11907 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11908 <= FP_ARG_V4_MAX_REG)
11909 return gen_rtx_REG (mode, cum->fregno);
11910 else
11911 return NULL_RTX;
11912 }
11913 else
11914 {
11915 int n_words = rs6000_arg_size (mode, type);
11916 int gregno = cum->sysv_gregno;
11917
11918 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11919 As does any other 2 word item such as complex int due to a
11920 historical mistake. */
11921 if (n_words == 2)
11922 gregno += (1 - gregno) & 1;
11923
11924 /* Multi-reg args are not split between registers and stack. */
11925 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11926 return NULL_RTX;
11927
11928 if (TARGET_32BIT && TARGET_POWERPC64)
11929 return rs6000_mixed_function_arg (mode, type,
11930 gregno - GP_ARG_MIN_REG);
11931 return gen_rtx_REG (mode, gregno);
11932 }
11933 }
11934 else
11935 {
11936 int align_words = rs6000_parm_start (mode, type, cum->words);
11937
11938 /* _Decimal128 must be passed in an even/odd float register pair.
11939 This assumes that the register number is odd when fregno is odd. */
11940 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11941 cum->fregno++;
11942
11943 if (USE_FP_FOR_ARG_P (cum, elt_mode))
11944 {
11945 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11946 rtx r, off;
11947 int i, k = 0;
11948 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11949 int fpr_words;
11950
11951 /* Do we also need to pass this argument in the parameter
11952 save area? */
11953 if (type && (cum->nargs_prototype <= 0
11954 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11955 && TARGET_XL_COMPAT
11956 && align_words >= GP_ARG_NUM_REG)))
11957 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11958
11959 /* Describe where this argument goes in the fprs. */
11960 for (i = 0; i < n_elts
11961 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11962 {
11963 /* Check if the argument is split over registers and memory.
11964 This can only ever happen for long double or _Decimal128;
11965 complex types are handled via split_complex_arg. */
11966 machine_mode fmode = elt_mode;
11967 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11968 {
11969 gcc_assert (FLOAT128_2REG_P (fmode));
11970 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11971 }
11972
11973 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11974 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11975 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11976 }
11977
11978 /* If there were not enough FPRs to hold the argument, the rest
11979 usually goes into memory. However, if the current position
11980 is still within the register parameter area, a portion may
11981 actually have to go into GPRs.
11982
11983 Note that it may happen that the portion of the argument
11984 passed in the first "half" of the first GPR was already
11985 passed in the last FPR as well.
11986
11987 For unnamed arguments, we already set up GPRs to cover the
11988 whole argument in rs6000_psave_function_arg, so there is
11989 nothing further to do at this point. */
11990 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11991 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11992 && cum->nargs_prototype > 0)
11993 {
11994 static bool warned;
11995
11996 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11997 int n_words = rs6000_arg_size (mode, type);
11998
11999 align_words += fpr_words;
12000 n_words -= fpr_words;
12001
12002 do
12003 {
12004 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12005 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12006 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12007 }
12008 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12009
12010 if (!warned && warn_psabi)
12011 {
12012 warned = true;
12013 inform (input_location,
12014 "the ABI of passing homogeneous float aggregates"
12015 " has changed in GCC 5");
12016 }
12017 }
12018
12019 return rs6000_finish_function_arg (mode, rvec, k);
12020 }
12021 else if (align_words < GP_ARG_NUM_REG)
12022 {
12023 if (TARGET_32BIT && TARGET_POWERPC64)
12024 return rs6000_mixed_function_arg (mode, type, align_words);
12025
12026 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12027 }
12028 else
12029 return NULL_RTX;
12030 }
12031 }
12032 \f
12033 /* For an arg passed partly in registers and partly in memory, this is
12034 the number of bytes passed in registers. For args passed entirely in
12035 registers or entirely in memory, zero. When an arg is described by a
12036 PARALLEL, perhaps using more than one register type, this function
12037 returns the number of bytes used by the first element of the PARALLEL. */
12038
12039 static int
12040 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12041 tree type, bool named)
12042 {
12043 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12044 bool passed_in_gprs = true;
12045 int ret = 0;
12046 int align_words;
12047 machine_mode elt_mode;
12048 int n_elts;
12049
12050 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12051
12052 if (DEFAULT_ABI == ABI_V4)
12053 return 0;
12054
12055 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12056 {
12057 /* If we are passing this arg in the fixed parameter save area (gprs or
12058 memory) as well as VRs, we do not use the partial bytes mechanism;
12059 instead, rs6000_function_arg will return a PARALLEL including a memory
12060 element as necessary. Library support functions for IEEE 128-bit are
12061 assumed to not need the value passed both in GPRs and in vector
12062 registers. */
12063 if (TARGET_64BIT && !cum->prototype
12064 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12065 return 0;
12066
12067 /* Otherwise, we pass in VRs only. Check for partial copies. */
12068 passed_in_gprs = false;
12069 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12070 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12071 }
12072
12073 /* In this complicated case we just disable the partial_nregs code. */
12074 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12075 return 0;
12076
12077 align_words = rs6000_parm_start (mode, type, cum->words);
12078
12079 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12080 {
12081 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12082
12083 /* If we are passing this arg in the fixed parameter save area
12084 (gprs or memory) as well as FPRs, we do not use the partial
12085 bytes mechanism; instead, rs6000_function_arg will return a
12086 PARALLEL including a memory element as necessary. */
12087 if (type
12088 && (cum->nargs_prototype <= 0
12089 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12090 && TARGET_XL_COMPAT
12091 && align_words >= GP_ARG_NUM_REG)))
12092 return 0;
12093
12094 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12095 passed_in_gprs = false;
12096 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12097 {
12098 /* Compute number of bytes / words passed in FPRs. If there
12099 is still space available in the register parameter area
12100 *after* that amount, a part of the argument will be passed
12101 in GPRs. In that case, the total amount passed in any
12102 registers is equal to the amount that would have been passed
12103 in GPRs if everything were passed there, so we fall back to
12104 the GPR code below to compute the appropriate value. */
12105 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12106 * MIN (8, GET_MODE_SIZE (elt_mode)));
12107 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12108
12109 if (align_words + fpr_words < GP_ARG_NUM_REG)
12110 passed_in_gprs = true;
12111 else
12112 ret = fpr;
12113 }
12114 }
12115
12116 if (passed_in_gprs
12117 && align_words < GP_ARG_NUM_REG
12118 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12119 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12120
12121 if (ret != 0 && TARGET_DEBUG_ARG)
12122 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12123
12124 return ret;
12125 }
12126 \f
12127 /* A C expression that indicates when an argument must be passed by
12128 reference. If nonzero for an argument, a copy of that argument is
12129 made in memory and a pointer to the argument is passed instead of
12130 the argument itself. The pointer is passed in whatever way is
12131 appropriate for passing a pointer to that type.
12132
12133 Under V.4, aggregates and long double are passed by reference.
12134
12135 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12136 reference unless the AltiVec vector extension ABI is in force.
12137
12138 As an extension to all ABIs, variable sized types are passed by
12139 reference. */
12140
12141 static bool
12142 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12143 machine_mode mode, const_tree type,
12144 bool named ATTRIBUTE_UNUSED)
12145 {
12146 if (!type)
12147 return 0;
12148
12149 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12150 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12151 {
12152 if (TARGET_DEBUG_ARG)
12153 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12154 return 1;
12155 }
12156
12157 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12158 {
12159 if (TARGET_DEBUG_ARG)
12160 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12161 return 1;
12162 }
12163
12164 if (int_size_in_bytes (type) < 0)
12165 {
12166 if (TARGET_DEBUG_ARG)
12167 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12168 return 1;
12169 }
12170
12171 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12172 modes only exist for GCC vector types if -maltivec. */
12173 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12174 {
12175 if (TARGET_DEBUG_ARG)
12176 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12177 return 1;
12178 }
12179
12180 /* Pass synthetic vectors in memory. */
12181 if (TREE_CODE (type) == VECTOR_TYPE
12182 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12183 {
12184 static bool warned_for_pass_big_vectors = false;
12185 if (TARGET_DEBUG_ARG)
12186 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12187 if (!warned_for_pass_big_vectors)
12188 {
12189 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12190 "non-standard ABI extension with no compatibility "
12191 "guarantee");
12192 warned_for_pass_big_vectors = true;
12193 }
12194 return 1;
12195 }
12196
12197 return 0;
12198 }
12199
12200 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12201 already processes. Return true if the parameter must be passed
12202 (fully or partially) on the stack. */
12203
12204 static bool
12205 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12206 {
12207 machine_mode mode;
12208 int unsignedp;
12209 rtx entry_parm;
12210
12211 /* Catch errors. */
12212 if (type == NULL || type == error_mark_node)
12213 return true;
12214
12215 /* Handle types with no storage requirement. */
12216 if (TYPE_MODE (type) == VOIDmode)
12217 return false;
12218
12219 /* Handle complex types. */
12220 if (TREE_CODE (type) == COMPLEX_TYPE)
12221 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12222 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12223
12224 /* Handle transparent aggregates. */
12225 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12226 && TYPE_TRANSPARENT_AGGR (type))
12227 type = TREE_TYPE (first_field (type));
12228
12229 /* See if this arg was passed by invisible reference. */
12230 if (pass_by_reference (get_cumulative_args (args_so_far),
12231 TYPE_MODE (type), type, true))
12232 type = build_pointer_type (type);
12233
12234 /* Find mode as it is passed by the ABI. */
12235 unsignedp = TYPE_UNSIGNED (type);
12236 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12237
12238 /* If we must pass in stack, we need a stack. */
12239 if (rs6000_must_pass_in_stack (mode, type))
12240 return true;
12241
12242 /* If there is no incoming register, we need a stack. */
12243 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12244 if (entry_parm == NULL)
12245 return true;
12246
12247 /* Likewise if we need to pass both in registers and on the stack. */
12248 if (GET_CODE (entry_parm) == PARALLEL
12249 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12250 return true;
12251
12252 /* Also true if we're partially in registers and partially not. */
12253 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12254 return true;
12255
12256 /* Update info on where next arg arrives in registers. */
12257 rs6000_function_arg_advance (args_so_far, mode, type, true);
12258 return false;
12259 }
12260
12261 /* Return true if FUN has no prototype, has a variable argument
12262 list, or passes any parameter in memory. */
12263
12264 static bool
12265 rs6000_function_parms_need_stack (tree fun, bool incoming)
12266 {
12267 tree fntype, result;
12268 CUMULATIVE_ARGS args_so_far_v;
12269 cumulative_args_t args_so_far;
12270
12271 if (!fun)
12272 /* Must be a libcall, all of which only use reg parms. */
12273 return false;
12274
12275 fntype = fun;
12276 if (!TYPE_P (fun))
12277 fntype = TREE_TYPE (fun);
12278
12279 /* Varargs functions need the parameter save area. */
12280 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12281 return true;
12282
12283 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12284 args_so_far = pack_cumulative_args (&args_so_far_v);
12285
12286 /* When incoming, we will have been passed the function decl.
12287 It is necessary to use the decl to handle K&R style functions,
12288 where TYPE_ARG_TYPES may not be available. */
12289 if (incoming)
12290 {
12291 gcc_assert (DECL_P (fun));
12292 result = DECL_RESULT (fun);
12293 }
12294 else
12295 result = TREE_TYPE (fntype);
12296
12297 if (result && aggregate_value_p (result, fntype))
12298 {
12299 if (!TYPE_P (result))
12300 result = TREE_TYPE (result);
12301 result = build_pointer_type (result);
12302 rs6000_parm_needs_stack (args_so_far, result);
12303 }
12304
12305 if (incoming)
12306 {
12307 tree parm;
12308
12309 for (parm = DECL_ARGUMENTS (fun);
12310 parm && parm != void_list_node;
12311 parm = TREE_CHAIN (parm))
12312 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12313 return true;
12314 }
12315 else
12316 {
12317 function_args_iterator args_iter;
12318 tree arg_type;
12319
12320 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12321 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12322 return true;
12323 }
12324
12325 return false;
12326 }
12327
12328 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12329 usually a constant depending on the ABI. However, in the ELFv2 ABI
12330 the register parameter area is optional when calling a function that
12331 has a prototype is scope, has no variable argument list, and passes
12332 all parameters in registers. */
12333
12334 int
12335 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12336 {
12337 int reg_parm_stack_space;
12338
12339 switch (DEFAULT_ABI)
12340 {
12341 default:
12342 reg_parm_stack_space = 0;
12343 break;
12344
12345 case ABI_AIX:
12346 case ABI_DARWIN:
12347 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12348 break;
12349
12350 case ABI_ELFv2:
12351 /* ??? Recomputing this every time is a bit expensive. Is there
12352 a place to cache this information? */
12353 if (rs6000_function_parms_need_stack (fun, incoming))
12354 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12355 else
12356 reg_parm_stack_space = 0;
12357 break;
12358 }
12359
12360 return reg_parm_stack_space;
12361 }
12362
12363 static void
12364 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12365 {
12366 int i;
12367 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12368
12369 if (nregs == 0)
12370 return;
12371
12372 for (i = 0; i < nregs; i++)
12373 {
12374 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12375 if (reload_completed)
12376 {
12377 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12378 tem = NULL_RTX;
12379 else
12380 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12381 i * GET_MODE_SIZE (reg_mode));
12382 }
12383 else
12384 tem = replace_equiv_address (tem, XEXP (tem, 0));
12385
12386 gcc_assert (tem);
12387
12388 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12389 }
12390 }
12391 \f
12392 /* Perform any needed actions needed for a function that is receiving a
12393 variable number of arguments.
12394
12395 CUM is as above.
12396
12397 MODE and TYPE are the mode and type of the current parameter.
12398
12399 PRETEND_SIZE is a variable that should be set to the amount of stack
12400 that must be pushed by the prolog to pretend that our caller pushed
12401 it.
12402
12403 Normally, this macro will push all remaining incoming registers on the
12404 stack and set PRETEND_SIZE to the length of the registers pushed. */
12405
12406 static void
12407 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12408 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12409 int no_rtl)
12410 {
12411 CUMULATIVE_ARGS next_cum;
12412 int reg_size = TARGET_32BIT ? 4 : 8;
12413 rtx save_area = NULL_RTX, mem;
12414 int first_reg_offset;
12415 alias_set_type set;
12416
12417 /* Skip the last named argument. */
12418 next_cum = *get_cumulative_args (cum);
12419 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12420
12421 if (DEFAULT_ABI == ABI_V4)
12422 {
12423 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12424
12425 if (! no_rtl)
12426 {
12427 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12428 HOST_WIDE_INT offset = 0;
12429
12430 /* Try to optimize the size of the varargs save area.
12431 The ABI requires that ap.reg_save_area is doubleword
12432 aligned, but we don't need to allocate space for all
12433 the bytes, only those to which we actually will save
12434 anything. */
12435 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12436 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12437 if (TARGET_HARD_FLOAT
12438 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12439 && cfun->va_list_fpr_size)
12440 {
12441 if (gpr_reg_num)
12442 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12443 * UNITS_PER_FP_WORD;
12444 if (cfun->va_list_fpr_size
12445 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12446 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12447 else
12448 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12449 * UNITS_PER_FP_WORD;
12450 }
12451 if (gpr_reg_num)
12452 {
12453 offset = -((first_reg_offset * reg_size) & ~7);
12454 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12455 {
12456 gpr_reg_num = cfun->va_list_gpr_size;
12457 if (reg_size == 4 && (first_reg_offset & 1))
12458 gpr_reg_num++;
12459 }
12460 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12461 }
12462 else if (fpr_size)
12463 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12464 * UNITS_PER_FP_WORD
12465 - (int) (GP_ARG_NUM_REG * reg_size);
12466
12467 if (gpr_size + fpr_size)
12468 {
12469 rtx reg_save_area
12470 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12471 gcc_assert (GET_CODE (reg_save_area) == MEM);
12472 reg_save_area = XEXP (reg_save_area, 0);
12473 if (GET_CODE (reg_save_area) == PLUS)
12474 {
12475 gcc_assert (XEXP (reg_save_area, 0)
12476 == virtual_stack_vars_rtx);
12477 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12478 offset += INTVAL (XEXP (reg_save_area, 1));
12479 }
12480 else
12481 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12482 }
12483
12484 cfun->machine->varargs_save_offset = offset;
12485 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12486 }
12487 }
12488 else
12489 {
12490 first_reg_offset = next_cum.words;
12491 save_area = crtl->args.internal_arg_pointer;
12492
12493 if (targetm.calls.must_pass_in_stack (mode, type))
12494 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12495 }
12496
12497 set = get_varargs_alias_set ();
12498 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12499 && cfun->va_list_gpr_size)
12500 {
12501 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12502
12503 if (va_list_gpr_counter_field)
12504 /* V4 va_list_gpr_size counts number of registers needed. */
12505 n_gpr = cfun->va_list_gpr_size;
12506 else
12507 /* char * va_list instead counts number of bytes needed. */
12508 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12509
12510 if (nregs > n_gpr)
12511 nregs = n_gpr;
12512
12513 mem = gen_rtx_MEM (BLKmode,
12514 plus_constant (Pmode, save_area,
12515 first_reg_offset * reg_size));
12516 MEM_NOTRAP_P (mem) = 1;
12517 set_mem_alias_set (mem, set);
12518 set_mem_align (mem, BITS_PER_WORD);
12519
12520 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12521 nregs);
12522 }
12523
12524 /* Save FP registers if needed. */
12525 if (DEFAULT_ABI == ABI_V4
12526 && TARGET_HARD_FLOAT
12527 && ! no_rtl
12528 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12529 && cfun->va_list_fpr_size)
12530 {
12531 int fregno = next_cum.fregno, nregs;
12532 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12533 rtx lab = gen_label_rtx ();
12534 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12535 * UNITS_PER_FP_WORD);
12536
12537 emit_jump_insn
12538 (gen_rtx_SET (pc_rtx,
12539 gen_rtx_IF_THEN_ELSE (VOIDmode,
12540 gen_rtx_NE (VOIDmode, cr1,
12541 const0_rtx),
12542 gen_rtx_LABEL_REF (VOIDmode, lab),
12543 pc_rtx)));
12544
12545 for (nregs = 0;
12546 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12547 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12548 {
12549 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12550 plus_constant (Pmode, save_area, off));
12551 MEM_NOTRAP_P (mem) = 1;
12552 set_mem_alias_set (mem, set);
12553 set_mem_align (mem, GET_MODE_ALIGNMENT (
12554 TARGET_HARD_FLOAT ? DFmode : SFmode));
12555 emit_move_insn (mem, gen_rtx_REG (
12556 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12557 }
12558
12559 emit_label (lab);
12560 }
12561 }
12562
12563 /* Create the va_list data type. */
12564
12565 static tree
12566 rs6000_build_builtin_va_list (void)
12567 {
12568 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12569
12570 /* For AIX, prefer 'char *' because that's what the system
12571 header files like. */
12572 if (DEFAULT_ABI != ABI_V4)
12573 return build_pointer_type (char_type_node);
12574
12575 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12576 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12577 get_identifier ("__va_list_tag"), record);
12578
12579 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12580 unsigned_char_type_node);
12581 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12582 unsigned_char_type_node);
12583 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12584 every user file. */
12585 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12586 get_identifier ("reserved"), short_unsigned_type_node);
12587 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12588 get_identifier ("overflow_arg_area"),
12589 ptr_type_node);
12590 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12591 get_identifier ("reg_save_area"),
12592 ptr_type_node);
12593
12594 va_list_gpr_counter_field = f_gpr;
12595 va_list_fpr_counter_field = f_fpr;
12596
12597 DECL_FIELD_CONTEXT (f_gpr) = record;
12598 DECL_FIELD_CONTEXT (f_fpr) = record;
12599 DECL_FIELD_CONTEXT (f_res) = record;
12600 DECL_FIELD_CONTEXT (f_ovf) = record;
12601 DECL_FIELD_CONTEXT (f_sav) = record;
12602
12603 TYPE_STUB_DECL (record) = type_decl;
12604 TYPE_NAME (record) = type_decl;
12605 TYPE_FIELDS (record) = f_gpr;
12606 DECL_CHAIN (f_gpr) = f_fpr;
12607 DECL_CHAIN (f_fpr) = f_res;
12608 DECL_CHAIN (f_res) = f_ovf;
12609 DECL_CHAIN (f_ovf) = f_sav;
12610
12611 layout_type (record);
12612
12613 /* The correct type is an array type of one element. */
12614 return build_array_type (record, build_index_type (size_zero_node));
12615 }
12616
12617 /* Implement va_start. */
12618
12619 static void
12620 rs6000_va_start (tree valist, rtx nextarg)
12621 {
12622 HOST_WIDE_INT words, n_gpr, n_fpr;
12623 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12624 tree gpr, fpr, ovf, sav, t;
12625
12626 /* Only SVR4 needs something special. */
12627 if (DEFAULT_ABI != ABI_V4)
12628 {
12629 std_expand_builtin_va_start (valist, nextarg);
12630 return;
12631 }
12632
12633 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12634 f_fpr = DECL_CHAIN (f_gpr);
12635 f_res = DECL_CHAIN (f_fpr);
12636 f_ovf = DECL_CHAIN (f_res);
12637 f_sav = DECL_CHAIN (f_ovf);
12638
12639 valist = build_simple_mem_ref (valist);
12640 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12641 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12642 f_fpr, NULL_TREE);
12643 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12644 f_ovf, NULL_TREE);
12645 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12646 f_sav, NULL_TREE);
12647
12648 /* Count number of gp and fp argument registers used. */
12649 words = crtl->args.info.words;
12650 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12651 GP_ARG_NUM_REG);
12652 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12653 FP_ARG_NUM_REG);
12654
12655 if (TARGET_DEBUG_ARG)
12656 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12657 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12658 words, n_gpr, n_fpr);
12659
12660 if (cfun->va_list_gpr_size)
12661 {
12662 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12663 build_int_cst (NULL_TREE, n_gpr));
12664 TREE_SIDE_EFFECTS (t) = 1;
12665 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12666 }
12667
12668 if (cfun->va_list_fpr_size)
12669 {
12670 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12671 build_int_cst (NULL_TREE, n_fpr));
12672 TREE_SIDE_EFFECTS (t) = 1;
12673 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12674
12675 #ifdef HAVE_AS_GNU_ATTRIBUTE
12676 if (call_ABI_of_interest (cfun->decl))
12677 rs6000_passes_float = true;
12678 #endif
12679 }
12680
12681 /* Find the overflow area. */
12682 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12683 if (words != 0)
12684 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12685 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12686 TREE_SIDE_EFFECTS (t) = 1;
12687 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12688
12689 /* If there were no va_arg invocations, don't set up the register
12690 save area. */
12691 if (!cfun->va_list_gpr_size
12692 && !cfun->va_list_fpr_size
12693 && n_gpr < GP_ARG_NUM_REG
12694 && n_fpr < FP_ARG_V4_MAX_REG)
12695 return;
12696
12697 /* Find the register save area. */
12698 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12699 if (cfun->machine->varargs_save_offset)
12700 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12701 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12702 TREE_SIDE_EFFECTS (t) = 1;
12703 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12704 }
12705
12706 /* Implement va_arg. */
12707
12708 static tree
12709 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12710 gimple_seq *post_p)
12711 {
12712 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12713 tree gpr, fpr, ovf, sav, reg, t, u;
12714 int size, rsize, n_reg, sav_ofs, sav_scale;
12715 tree lab_false, lab_over, addr;
12716 int align;
12717 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12718 int regalign = 0;
12719 gimple *stmt;
12720
12721 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12722 {
12723 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12724 return build_va_arg_indirect_ref (t);
12725 }
12726
12727 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12728 earlier version of gcc, with the property that it always applied alignment
12729 adjustments to the va-args (even for zero-sized types). The cheapest way
12730 to deal with this is to replicate the effect of the part of
12731 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12732 of relevance.
12733 We don't need to check for pass-by-reference because of the test above.
12734 We can return a simplifed answer, since we know there's no offset to add. */
12735
12736 if (((TARGET_MACHO
12737 && rs6000_darwin64_abi)
12738 || DEFAULT_ABI == ABI_ELFv2
12739 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12740 && integer_zerop (TYPE_SIZE (type)))
12741 {
12742 unsigned HOST_WIDE_INT align, boundary;
12743 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12744 align = PARM_BOUNDARY / BITS_PER_UNIT;
12745 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12746 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12747 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12748 boundary /= BITS_PER_UNIT;
12749 if (boundary > align)
12750 {
12751 tree t ;
12752 /* This updates arg ptr by the amount that would be necessary
12753 to align the zero-sized (but not zero-alignment) item. */
12754 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12755 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12756 gimplify_and_add (t, pre_p);
12757
12758 t = fold_convert (sizetype, valist_tmp);
12759 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12760 fold_convert (TREE_TYPE (valist),
12761 fold_build2 (BIT_AND_EXPR, sizetype, t,
12762 size_int (-boundary))));
12763 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12764 gimplify_and_add (t, pre_p);
12765 }
12766 /* Since it is zero-sized there's no increment for the item itself. */
12767 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12768 return build_va_arg_indirect_ref (valist_tmp);
12769 }
12770
12771 if (DEFAULT_ABI != ABI_V4)
12772 {
12773 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12774 {
12775 tree elem_type = TREE_TYPE (type);
12776 machine_mode elem_mode = TYPE_MODE (elem_type);
12777 int elem_size = GET_MODE_SIZE (elem_mode);
12778
12779 if (elem_size < UNITS_PER_WORD)
12780 {
12781 tree real_part, imag_part;
12782 gimple_seq post = NULL;
12783
12784 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12785 &post);
12786 /* Copy the value into a temporary, lest the formal temporary
12787 be reused out from under us. */
12788 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12789 gimple_seq_add_seq (pre_p, post);
12790
12791 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12792 post_p);
12793
12794 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12795 }
12796 }
12797
12798 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12799 }
12800
12801 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12802 f_fpr = DECL_CHAIN (f_gpr);
12803 f_res = DECL_CHAIN (f_fpr);
12804 f_ovf = DECL_CHAIN (f_res);
12805 f_sav = DECL_CHAIN (f_ovf);
12806
12807 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12808 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12809 f_fpr, NULL_TREE);
12810 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12811 f_ovf, NULL_TREE);
12812 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12813 f_sav, NULL_TREE);
12814
12815 size = int_size_in_bytes (type);
12816 rsize = (size + 3) / 4;
12817 int pad = 4 * rsize - size;
12818 align = 1;
12819
12820 machine_mode mode = TYPE_MODE (type);
12821 if (abi_v4_pass_in_fpr (mode, false))
12822 {
12823 /* FP args go in FP registers, if present. */
12824 reg = fpr;
12825 n_reg = (size + 7) / 8;
12826 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12827 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12828 if (mode != SFmode && mode != SDmode)
12829 align = 8;
12830 }
12831 else
12832 {
12833 /* Otherwise into GP registers. */
12834 reg = gpr;
12835 n_reg = rsize;
12836 sav_ofs = 0;
12837 sav_scale = 4;
12838 if (n_reg == 2)
12839 align = 8;
12840 }
12841
12842 /* Pull the value out of the saved registers.... */
12843
12844 lab_over = NULL;
12845 addr = create_tmp_var (ptr_type_node, "addr");
12846
12847 /* AltiVec vectors never go in registers when -mabi=altivec. */
12848 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12849 align = 16;
12850 else
12851 {
12852 lab_false = create_artificial_label (input_location);
12853 lab_over = create_artificial_label (input_location);
12854
12855 /* Long long is aligned in the registers. As are any other 2 gpr
12856 item such as complex int due to a historical mistake. */
12857 u = reg;
12858 if (n_reg == 2 && reg == gpr)
12859 {
12860 regalign = 1;
12861 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12862 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12863 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12864 unshare_expr (reg), u);
12865 }
12866 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12867 reg number is 0 for f1, so we want to make it odd. */
12868 else if (reg == fpr && mode == TDmode)
12869 {
12870 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12871 build_int_cst (TREE_TYPE (reg), 1));
12872 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12873 }
12874
12875 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12876 t = build2 (GE_EXPR, boolean_type_node, u, t);
12877 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12878 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12879 gimplify_and_add (t, pre_p);
12880
12881 t = sav;
12882 if (sav_ofs)
12883 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12884
12885 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12886 build_int_cst (TREE_TYPE (reg), n_reg));
12887 u = fold_convert (sizetype, u);
12888 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12889 t = fold_build_pointer_plus (t, u);
12890
12891 /* _Decimal32 varargs are located in the second word of the 64-bit
12892 FP register for 32-bit binaries. */
12893 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12894 t = fold_build_pointer_plus_hwi (t, size);
12895
12896 /* Args are passed right-aligned. */
12897 if (BYTES_BIG_ENDIAN)
12898 t = fold_build_pointer_plus_hwi (t, pad);
12899
12900 gimplify_assign (addr, t, pre_p);
12901
12902 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12903
12904 stmt = gimple_build_label (lab_false);
12905 gimple_seq_add_stmt (pre_p, stmt);
12906
12907 if ((n_reg == 2 && !regalign) || n_reg > 2)
12908 {
12909 /* Ensure that we don't find any more args in regs.
12910 Alignment has taken care of for special cases. */
12911 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12912 }
12913 }
12914
12915 /* ... otherwise out of the overflow area. */
12916
12917 /* Care for on-stack alignment if needed. */
12918 t = ovf;
12919 if (align != 1)
12920 {
12921 t = fold_build_pointer_plus_hwi (t, align - 1);
12922 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12923 build_int_cst (TREE_TYPE (t), -align));
12924 }
12925
12926 /* Args are passed right-aligned. */
12927 if (BYTES_BIG_ENDIAN)
12928 t = fold_build_pointer_plus_hwi (t, pad);
12929
12930 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12931
12932 gimplify_assign (unshare_expr (addr), t, pre_p);
12933
12934 t = fold_build_pointer_plus_hwi (t, size);
12935 gimplify_assign (unshare_expr (ovf), t, pre_p);
12936
12937 if (lab_over)
12938 {
12939 stmt = gimple_build_label (lab_over);
12940 gimple_seq_add_stmt (pre_p, stmt);
12941 }
12942
12943 if (STRICT_ALIGNMENT
12944 && (TYPE_ALIGN (type)
12945 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12946 {
12947 /* The value (of type complex double, for example) may not be
12948 aligned in memory in the saved registers, so copy via a
12949 temporary. (This is the same code as used for SPARC.) */
12950 tree tmp = create_tmp_var (type, "va_arg_tmp");
12951 tree dest_addr = build_fold_addr_expr (tmp);
12952
12953 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12954 3, dest_addr, addr, size_int (rsize * 4));
12955 TREE_ADDRESSABLE (tmp) = 1;
12956
12957 gimplify_and_add (copy, pre_p);
12958 addr = dest_addr;
12959 }
12960
12961 addr = fold_convert (ptrtype, addr);
12962 return build_va_arg_indirect_ref (addr);
12963 }
12964
12965 /* Builtins. */
12966
12967 static void
12968 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12969 {
12970 tree t;
12971 unsigned classify = rs6000_builtin_info[(int)code].attr;
12972 const char *attr_string = "";
12973
12974 gcc_assert (name != NULL);
12975 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12976
12977 if (rs6000_builtin_decls[(int)code])
12978 fatal_error (input_location,
12979 "internal error: builtin function %qs already processed",
12980 name);
12981
12982 rs6000_builtin_decls[(int)code] = t =
12983 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12984
12985 /* Set any special attributes. */
12986 if ((classify & RS6000_BTC_CONST) != 0)
12987 {
12988 /* const function, function only depends on the inputs. */
12989 TREE_READONLY (t) = 1;
12990 TREE_NOTHROW (t) = 1;
12991 attr_string = ", const";
12992 }
12993 else if ((classify & RS6000_BTC_PURE) != 0)
12994 {
12995 /* pure function, function can read global memory, but does not set any
12996 external state. */
12997 DECL_PURE_P (t) = 1;
12998 TREE_NOTHROW (t) = 1;
12999 attr_string = ", pure";
13000 }
13001 else if ((classify & RS6000_BTC_FP) != 0)
13002 {
13003 /* Function is a math function. If rounding mode is on, then treat the
13004 function as not reading global memory, but it can have arbitrary side
13005 effects. If it is off, then assume the function is a const function.
13006 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13007 builtin-attribute.def that is used for the math functions. */
13008 TREE_NOTHROW (t) = 1;
13009 if (flag_rounding_math)
13010 {
13011 DECL_PURE_P (t) = 1;
13012 DECL_IS_NOVOPS (t) = 1;
13013 attr_string = ", fp, pure";
13014 }
13015 else
13016 {
13017 TREE_READONLY (t) = 1;
13018 attr_string = ", fp, const";
13019 }
13020 }
13021 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13022 gcc_unreachable ();
13023
13024 if (TARGET_DEBUG_BUILTIN)
13025 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13026 (int)code, name, attr_string);
13027 }
13028
13029 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13030
13031 #undef RS6000_BUILTIN_0
13032 #undef RS6000_BUILTIN_1
13033 #undef RS6000_BUILTIN_2
13034 #undef RS6000_BUILTIN_3
13035 #undef RS6000_BUILTIN_A
13036 #undef RS6000_BUILTIN_D
13037 #undef RS6000_BUILTIN_H
13038 #undef RS6000_BUILTIN_P
13039 #undef RS6000_BUILTIN_X
13040
13041 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13042 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13043 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13044 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13045 { MASK, ICODE, NAME, ENUM },
13046
13047 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13048 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13049 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13050 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13051 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13052
13053 static const struct builtin_description bdesc_3arg[] =
13054 {
13055 #include "rs6000-builtin.def"
13056 };
13057
13058 /* DST operations: void foo (void *, const int, const char). */
13059
13060 #undef RS6000_BUILTIN_0
13061 #undef RS6000_BUILTIN_1
13062 #undef RS6000_BUILTIN_2
13063 #undef RS6000_BUILTIN_3
13064 #undef RS6000_BUILTIN_A
13065 #undef RS6000_BUILTIN_D
13066 #undef RS6000_BUILTIN_H
13067 #undef RS6000_BUILTIN_P
13068 #undef RS6000_BUILTIN_X
13069
13070 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13071 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13072 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13073 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13074 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13075 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13076 { MASK, ICODE, NAME, ENUM },
13077
13078 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13079 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13080 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13081
13082 static const struct builtin_description bdesc_dst[] =
13083 {
13084 #include "rs6000-builtin.def"
13085 };
13086
13087 /* Simple binary operations: VECc = foo (VECa, VECb). */
13088
13089 #undef RS6000_BUILTIN_0
13090 #undef RS6000_BUILTIN_1
13091 #undef RS6000_BUILTIN_2
13092 #undef RS6000_BUILTIN_3
13093 #undef RS6000_BUILTIN_A
13094 #undef RS6000_BUILTIN_D
13095 #undef RS6000_BUILTIN_H
13096 #undef RS6000_BUILTIN_P
13097 #undef RS6000_BUILTIN_X
13098
13099 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13100 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13101 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13102 { MASK, ICODE, NAME, ENUM },
13103
13104 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13105 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13106 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13107 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13108 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13109 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13110
13111 static const struct builtin_description bdesc_2arg[] =
13112 {
13113 #include "rs6000-builtin.def"
13114 };
13115
13116 #undef RS6000_BUILTIN_0
13117 #undef RS6000_BUILTIN_1
13118 #undef RS6000_BUILTIN_2
13119 #undef RS6000_BUILTIN_3
13120 #undef RS6000_BUILTIN_A
13121 #undef RS6000_BUILTIN_D
13122 #undef RS6000_BUILTIN_H
13123 #undef RS6000_BUILTIN_P
13124 #undef RS6000_BUILTIN_X
13125
13126 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13127 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13128 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13129 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13130 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13131 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13132 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13133 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13134 { MASK, ICODE, NAME, ENUM },
13135
13136 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13137
13138 /* AltiVec predicates. */
13139
13140 static const struct builtin_description bdesc_altivec_preds[] =
13141 {
13142 #include "rs6000-builtin.def"
13143 };
13144
13145 /* ABS* operations. */
13146
13147 #undef RS6000_BUILTIN_0
13148 #undef RS6000_BUILTIN_1
13149 #undef RS6000_BUILTIN_2
13150 #undef RS6000_BUILTIN_3
13151 #undef RS6000_BUILTIN_A
13152 #undef RS6000_BUILTIN_D
13153 #undef RS6000_BUILTIN_H
13154 #undef RS6000_BUILTIN_P
13155 #undef RS6000_BUILTIN_X
13156
13157 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13158 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13159 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13160 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13161 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13162 { MASK, ICODE, NAME, ENUM },
13163
13164 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13165 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13166 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13167 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13168
13169 static const struct builtin_description bdesc_abs[] =
13170 {
13171 #include "rs6000-builtin.def"
13172 };
13173
13174 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13175 foo (VECa). */
13176
13177 #undef RS6000_BUILTIN_0
13178 #undef RS6000_BUILTIN_1
13179 #undef RS6000_BUILTIN_2
13180 #undef RS6000_BUILTIN_3
13181 #undef RS6000_BUILTIN_A
13182 #undef RS6000_BUILTIN_D
13183 #undef RS6000_BUILTIN_H
13184 #undef RS6000_BUILTIN_P
13185 #undef RS6000_BUILTIN_X
13186
13187 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13188 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13189 { MASK, ICODE, NAME, ENUM },
13190
13191 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13192 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13193 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13194 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13195 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13196 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13197 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13198
13199 static const struct builtin_description bdesc_1arg[] =
13200 {
13201 #include "rs6000-builtin.def"
13202 };
13203
13204 /* Simple no-argument operations: result = __builtin_darn_32 () */
13205
13206 #undef RS6000_BUILTIN_0
13207 #undef RS6000_BUILTIN_1
13208 #undef RS6000_BUILTIN_2
13209 #undef RS6000_BUILTIN_3
13210 #undef RS6000_BUILTIN_A
13211 #undef RS6000_BUILTIN_D
13212 #undef RS6000_BUILTIN_H
13213 #undef RS6000_BUILTIN_P
13214 #undef RS6000_BUILTIN_X
13215
13216 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13217 { MASK, ICODE, NAME, ENUM },
13218
13219 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13220 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13221 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13222 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13223 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13224 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13225 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13226 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13227
13228 static const struct builtin_description bdesc_0arg[] =
13229 {
13230 #include "rs6000-builtin.def"
13231 };
13232
13233 /* HTM builtins. */
13234 #undef RS6000_BUILTIN_0
13235 #undef RS6000_BUILTIN_1
13236 #undef RS6000_BUILTIN_2
13237 #undef RS6000_BUILTIN_3
13238 #undef RS6000_BUILTIN_A
13239 #undef RS6000_BUILTIN_D
13240 #undef RS6000_BUILTIN_H
13241 #undef RS6000_BUILTIN_P
13242 #undef RS6000_BUILTIN_X
13243
13244 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13245 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13246 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13247 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13248 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13249 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13250 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13251 { MASK, ICODE, NAME, ENUM },
13252
13253 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13254 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13255
13256 static const struct builtin_description bdesc_htm[] =
13257 {
13258 #include "rs6000-builtin.def"
13259 };
13260
13261 #undef RS6000_BUILTIN_0
13262 #undef RS6000_BUILTIN_1
13263 #undef RS6000_BUILTIN_2
13264 #undef RS6000_BUILTIN_3
13265 #undef RS6000_BUILTIN_A
13266 #undef RS6000_BUILTIN_D
13267 #undef RS6000_BUILTIN_H
13268 #undef RS6000_BUILTIN_P
13269
13270 /* Return true if a builtin function is overloaded. */
13271 bool
13272 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13273 {
13274 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13275 }
13276
13277 const char *
13278 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13279 {
13280 return rs6000_builtin_info[(int)fncode].name;
13281 }
13282
13283 /* Expand an expression EXP that calls a builtin without arguments. */
13284 static rtx
13285 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13286 {
13287 rtx pat;
13288 machine_mode tmode = insn_data[icode].operand[0].mode;
13289
13290 if (icode == CODE_FOR_nothing)
13291 /* Builtin not supported on this processor. */
13292 return 0;
13293
13294 if (icode == CODE_FOR_rs6000_mffsl
13295 && rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13296 {
13297 error ("__builtin_mffsl() not supported with -msoft-float");
13298 return const0_rtx;
13299 }
13300
13301 if (target == 0
13302 || GET_MODE (target) != tmode
13303 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13304 target = gen_reg_rtx (tmode);
13305
13306 pat = GEN_FCN (icode) (target);
13307 if (! pat)
13308 return 0;
13309 emit_insn (pat);
13310
13311 return target;
13312 }
13313
13314
13315 static rtx
13316 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13317 {
13318 rtx pat;
13319 tree arg0 = CALL_EXPR_ARG (exp, 0);
13320 tree arg1 = CALL_EXPR_ARG (exp, 1);
13321 rtx op0 = expand_normal (arg0);
13322 rtx op1 = expand_normal (arg1);
13323 machine_mode mode0 = insn_data[icode].operand[0].mode;
13324 machine_mode mode1 = insn_data[icode].operand[1].mode;
13325
13326 if (icode == CODE_FOR_nothing)
13327 /* Builtin not supported on this processor. */
13328 return 0;
13329
13330 /* If we got invalid arguments bail out before generating bad rtl. */
13331 if (arg0 == error_mark_node || arg1 == error_mark_node)
13332 return const0_rtx;
13333
13334 if (GET_CODE (op0) != CONST_INT
13335 || INTVAL (op0) > 255
13336 || INTVAL (op0) < 0)
13337 {
13338 error ("argument 1 must be an 8-bit field value");
13339 return const0_rtx;
13340 }
13341
13342 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13343 op0 = copy_to_mode_reg (mode0, op0);
13344
13345 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13346 op1 = copy_to_mode_reg (mode1, op1);
13347
13348 pat = GEN_FCN (icode) (op0, op1);
13349 if (!pat)
13350 return const0_rtx;
13351 emit_insn (pat);
13352
13353 return NULL_RTX;
13354 }
13355
13356 static rtx
13357 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13358 {
13359 rtx pat;
13360 tree arg0 = CALL_EXPR_ARG (exp, 0);
13361 rtx op0 = expand_normal (arg0);
13362
13363 if (icode == CODE_FOR_nothing)
13364 /* Builtin not supported on this processor. */
13365 return 0;
13366
13367 if (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13368 {
13369 error ("__builtin_mtfsb0 and __builtin_mtfsb1 not supported with -msoft-float");
13370 return const0_rtx;
13371 }
13372
13373 /* If we got invalid arguments bail out before generating bad rtl. */
13374 if (arg0 == error_mark_node)
13375 return const0_rtx;
13376
13377 /* Only allow bit numbers 0 to 31. */
13378 if (!u5bit_cint_operand (op0, VOIDmode))
13379 {
13380 error ("Argument must be a constant between 0 and 31.");
13381 return const0_rtx;
13382 }
13383
13384 pat = GEN_FCN (icode) (op0);
13385 if (!pat)
13386 return const0_rtx;
13387 emit_insn (pat);
13388
13389 return NULL_RTX;
13390 }
13391
13392 static rtx
13393 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13394 {
13395 rtx pat;
13396 tree arg0 = CALL_EXPR_ARG (exp, 0);
13397 rtx op0 = expand_normal (arg0);
13398 machine_mode mode0 = insn_data[icode].operand[0].mode;
13399
13400 if (icode == CODE_FOR_nothing)
13401 /* Builtin not supported on this processor. */
13402 return 0;
13403
13404 if (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13405 {
13406 error ("__builtin_set_fpscr_rn not supported with -msoft-float");
13407 return const0_rtx;
13408 }
13409
13410 /* If we got invalid arguments bail out before generating bad rtl. */
13411 if (arg0 == error_mark_node)
13412 return const0_rtx;
13413
13414 /* If the argument is a constant, check the range. Argument can only be a
13415 2-bit value. Unfortunately, can't check the range of the value at
13416 compile time if the argument is a variable. The least significant two
13417 bits of the argument, regardless of type, are used to set the rounding
13418 mode. All other bits are ignored. */
13419 if (GET_CODE (op0) == CONST_INT && !const_0_to_3_operand(op0, VOIDmode))
13420 {
13421 error ("Argument must be a value between 0 and 3.");
13422 return const0_rtx;
13423 }
13424
13425 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13426 op0 = copy_to_mode_reg (mode0, op0);
13427
13428 pat = GEN_FCN (icode) (op0);
13429 if (!pat)
13430 return const0_rtx;
13431 emit_insn (pat);
13432
13433 return NULL_RTX;
13434 }
13435 static rtx
13436 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13437 {
13438 rtx pat;
13439 tree arg0 = CALL_EXPR_ARG (exp, 0);
13440 rtx op0 = expand_normal (arg0);
13441 machine_mode mode0 = insn_data[icode].operand[0].mode;
13442
13443 if (TARGET_32BIT)
13444 /* Builtin not supported in 32-bit mode. */
13445 fatal_error (input_location,
13446 "__builtin_set_fpscr_drn is not supported in 32-bit mode.");
13447
13448 if (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13449 {
13450 error ("__builtin_set_fpscr_drn not supported with -msoft-float");
13451 return const0_rtx;
13452 }
13453
13454 if (icode == CODE_FOR_nothing)
13455 /* Builtin not supported on this processor. */
13456 return 0;
13457
13458 /* If we got invalid arguments bail out before generating bad rtl. */
13459 if (arg0 == error_mark_node)
13460 return const0_rtx;
13461
13462 /* If the argument is a constant, check the range. Agrument can only be a
13463 3-bit value. Unfortunately, can't check the range of the value at
13464 compile time if the argument is a variable. The least significant two
13465 bits of the argument, regardless of type, are used to set the rounding
13466 mode. All other bits are ignored. */
13467 if (GET_CODE (op0) == CONST_INT && !const_0_to_7_operand(op0, VOIDmode))
13468 {
13469 error ("Argument must be a value between 0 and 7.");
13470 return const0_rtx;
13471 }
13472
13473 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13474 op0 = copy_to_mode_reg (mode0, op0);
13475
13476 pat = GEN_FCN (icode) (op0);
13477 if (! pat)
13478 return const0_rtx;
13479 emit_insn (pat);
13480
13481 return NULL_RTX;
13482 }
13483
13484 static rtx
13485 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13486 {
13487 rtx pat;
13488 tree arg0 = CALL_EXPR_ARG (exp, 0);
13489 rtx op0 = expand_normal (arg0);
13490 machine_mode tmode = insn_data[icode].operand[0].mode;
13491 machine_mode mode0 = insn_data[icode].operand[1].mode;
13492
13493 if (icode == CODE_FOR_nothing)
13494 /* Builtin not supported on this processor. */
13495 return 0;
13496
13497 /* If we got invalid arguments bail out before generating bad rtl. */
13498 if (arg0 == error_mark_node)
13499 return const0_rtx;
13500
13501 if (icode == CODE_FOR_altivec_vspltisb
13502 || icode == CODE_FOR_altivec_vspltish
13503 || icode == CODE_FOR_altivec_vspltisw)
13504 {
13505 /* Only allow 5-bit *signed* literals. */
13506 if (GET_CODE (op0) != CONST_INT
13507 || INTVAL (op0) > 15
13508 || INTVAL (op0) < -16)
13509 {
13510 error ("argument 1 must be a 5-bit signed literal");
13511 return CONST0_RTX (tmode);
13512 }
13513 }
13514
13515 if (target == 0
13516 || GET_MODE (target) != tmode
13517 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13518 target = gen_reg_rtx (tmode);
13519
13520 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13521 op0 = copy_to_mode_reg (mode0, op0);
13522
13523 pat = GEN_FCN (icode) (target, op0);
13524 if (! pat)
13525 return 0;
13526 emit_insn (pat);
13527
13528 return target;
13529 }
13530
13531 static rtx
13532 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13533 {
13534 rtx pat, scratch1, scratch2;
13535 tree arg0 = CALL_EXPR_ARG (exp, 0);
13536 rtx op0 = expand_normal (arg0);
13537 machine_mode tmode = insn_data[icode].operand[0].mode;
13538 machine_mode mode0 = insn_data[icode].operand[1].mode;
13539
13540 /* If we have invalid arguments, bail out before generating bad rtl. */
13541 if (arg0 == error_mark_node)
13542 return const0_rtx;
13543
13544 if (target == 0
13545 || GET_MODE (target) != tmode
13546 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13547 target = gen_reg_rtx (tmode);
13548
13549 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13550 op0 = copy_to_mode_reg (mode0, op0);
13551
13552 scratch1 = gen_reg_rtx (mode0);
13553 scratch2 = gen_reg_rtx (mode0);
13554
13555 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13556 if (! pat)
13557 return 0;
13558 emit_insn (pat);
13559
13560 return target;
13561 }
13562
13563 static rtx
13564 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13565 {
13566 rtx pat;
13567 tree arg0 = CALL_EXPR_ARG (exp, 0);
13568 tree arg1 = CALL_EXPR_ARG (exp, 1);
13569 rtx op0 = expand_normal (arg0);
13570 rtx op1 = expand_normal (arg1);
13571 machine_mode tmode = insn_data[icode].operand[0].mode;
13572 machine_mode mode0 = insn_data[icode].operand[1].mode;
13573 machine_mode mode1 = insn_data[icode].operand[2].mode;
13574
13575 if (icode == CODE_FOR_nothing)
13576 /* Builtin not supported on this processor. */
13577 return 0;
13578
13579 /* If we got invalid arguments bail out before generating bad rtl. */
13580 if (arg0 == error_mark_node || arg1 == error_mark_node)
13581 return const0_rtx;
13582
13583 if (icode == CODE_FOR_unpackv1ti
13584 || icode == CODE_FOR_unpackkf
13585 || icode == CODE_FOR_unpacktf
13586 || icode == CODE_FOR_unpackif
13587 || icode == CODE_FOR_unpacktd)
13588 {
13589 /* Only allow 1-bit unsigned literals. */
13590 STRIP_NOPS (arg1);
13591 if (TREE_CODE (arg1) != INTEGER_CST
13592 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13593 {
13594 error ("argument 2 must be a 1-bit unsigned literal");
13595 return CONST0_RTX (tmode);
13596 }
13597 }
13598 else if (icode == CODE_FOR_altivec_vspltw)
13599 {
13600 /* Only allow 2-bit unsigned literals. */
13601 STRIP_NOPS (arg1);
13602 if (TREE_CODE (arg1) != INTEGER_CST
13603 || TREE_INT_CST_LOW (arg1) & ~3)
13604 {
13605 error ("argument 2 must be a 2-bit unsigned literal");
13606 return CONST0_RTX (tmode);
13607 }
13608 }
13609 else if (icode == CODE_FOR_altivec_vsplth)
13610 {
13611 /* Only allow 3-bit unsigned literals. */
13612 STRIP_NOPS (arg1);
13613 if (TREE_CODE (arg1) != INTEGER_CST
13614 || TREE_INT_CST_LOW (arg1) & ~7)
13615 {
13616 error ("argument 2 must be a 3-bit unsigned literal");
13617 return CONST0_RTX (tmode);
13618 }
13619 }
13620 else if (icode == CODE_FOR_altivec_vspltb)
13621 {
13622 /* Only allow 4-bit unsigned literals. */
13623 STRIP_NOPS (arg1);
13624 if (TREE_CODE (arg1) != INTEGER_CST
13625 || TREE_INT_CST_LOW (arg1) & ~15)
13626 {
13627 error ("argument 2 must be a 4-bit unsigned literal");
13628 return CONST0_RTX (tmode);
13629 }
13630 }
13631 else if (icode == CODE_FOR_altivec_vcfux
13632 || icode == CODE_FOR_altivec_vcfsx
13633 || icode == CODE_FOR_altivec_vctsxs
13634 || icode == CODE_FOR_altivec_vctuxs)
13635 {
13636 /* Only allow 5-bit unsigned literals. */
13637 STRIP_NOPS (arg1);
13638 if (TREE_CODE (arg1) != INTEGER_CST
13639 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13640 {
13641 error ("argument 2 must be a 5-bit unsigned literal");
13642 return CONST0_RTX (tmode);
13643 }
13644 }
13645 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13646 || icode == CODE_FOR_dfptstsfi_lt_dd
13647 || icode == CODE_FOR_dfptstsfi_gt_dd
13648 || icode == CODE_FOR_dfptstsfi_unordered_dd
13649 || icode == CODE_FOR_dfptstsfi_eq_td
13650 || icode == CODE_FOR_dfptstsfi_lt_td
13651 || icode == CODE_FOR_dfptstsfi_gt_td
13652 || icode == CODE_FOR_dfptstsfi_unordered_td)
13653 {
13654 /* Only allow 6-bit unsigned literals. */
13655 STRIP_NOPS (arg0);
13656 if (TREE_CODE (arg0) != INTEGER_CST
13657 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13658 {
13659 error ("argument 1 must be a 6-bit unsigned literal");
13660 return CONST0_RTX (tmode);
13661 }
13662 }
13663 else if (icode == CODE_FOR_xststdcqp_kf
13664 || icode == CODE_FOR_xststdcqp_tf
13665 || icode == CODE_FOR_xststdcdp
13666 || icode == CODE_FOR_xststdcsp
13667 || icode == CODE_FOR_xvtstdcdp
13668 || icode == CODE_FOR_xvtstdcsp)
13669 {
13670 /* Only allow 7-bit unsigned literals. */
13671 STRIP_NOPS (arg1);
13672 if (TREE_CODE (arg1) != INTEGER_CST
13673 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13674 {
13675 error ("argument 2 must be a 7-bit unsigned literal");
13676 return CONST0_RTX (tmode);
13677 }
13678 }
13679
13680 if (target == 0
13681 || GET_MODE (target) != tmode
13682 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13683 target = gen_reg_rtx (tmode);
13684
13685 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13686 op0 = copy_to_mode_reg (mode0, op0);
13687 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13688 op1 = copy_to_mode_reg (mode1, op1);
13689
13690 pat = GEN_FCN (icode) (target, op0, op1);
13691 if (! pat)
13692 return 0;
13693 emit_insn (pat);
13694
13695 return target;
13696 }
13697
13698 static rtx
13699 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13700 {
13701 rtx pat, scratch;
13702 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13703 tree arg0 = CALL_EXPR_ARG (exp, 1);
13704 tree arg1 = CALL_EXPR_ARG (exp, 2);
13705 rtx op0 = expand_normal (arg0);
13706 rtx op1 = expand_normal (arg1);
13707 machine_mode tmode = SImode;
13708 machine_mode mode0 = insn_data[icode].operand[1].mode;
13709 machine_mode mode1 = insn_data[icode].operand[2].mode;
13710 int cr6_form_int;
13711
13712 if (TREE_CODE (cr6_form) != INTEGER_CST)
13713 {
13714 error ("argument 1 of %qs must be a constant",
13715 "__builtin_altivec_predicate");
13716 return const0_rtx;
13717 }
13718 else
13719 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13720
13721 gcc_assert (mode0 == mode1);
13722
13723 /* If we have invalid arguments, bail out before generating bad rtl. */
13724 if (arg0 == error_mark_node || arg1 == error_mark_node)
13725 return const0_rtx;
13726
13727 if (target == 0
13728 || GET_MODE (target) != tmode
13729 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13730 target = gen_reg_rtx (tmode);
13731
13732 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13733 op0 = copy_to_mode_reg (mode0, op0);
13734 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13735 op1 = copy_to_mode_reg (mode1, op1);
13736
13737 /* Note that for many of the relevant operations (e.g. cmpne or
13738 cmpeq) with float or double operands, it makes more sense for the
13739 mode of the allocated scratch register to select a vector of
13740 integer. But the choice to copy the mode of operand 0 was made
13741 long ago and there are no plans to change it. */
13742 scratch = gen_reg_rtx (mode0);
13743
13744 pat = GEN_FCN (icode) (scratch, op0, op1);
13745 if (! pat)
13746 return 0;
13747 emit_insn (pat);
13748
13749 /* The vec_any* and vec_all* predicates use the same opcodes for two
13750 different operations, but the bits in CR6 will be different
13751 depending on what information we want. So we have to play tricks
13752 with CR6 to get the right bits out.
13753
13754 If you think this is disgusting, look at the specs for the
13755 AltiVec predicates. */
13756
13757 switch (cr6_form_int)
13758 {
13759 case 0:
13760 emit_insn (gen_cr6_test_for_zero (target));
13761 break;
13762 case 1:
13763 emit_insn (gen_cr6_test_for_zero_reverse (target));
13764 break;
13765 case 2:
13766 emit_insn (gen_cr6_test_for_lt (target));
13767 break;
13768 case 3:
13769 emit_insn (gen_cr6_test_for_lt_reverse (target));
13770 break;
13771 default:
13772 error ("argument 1 of %qs is out of range",
13773 "__builtin_altivec_predicate");
13774 break;
13775 }
13776
13777 return target;
13778 }
13779
13780 rtx
13781 swap_endian_selector_for_mode (machine_mode mode)
13782 {
13783 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13784 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13785 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13786 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13787
13788 unsigned int *swaparray, i;
13789 rtx perm[16];
13790
13791 switch (mode)
13792 {
13793 case E_V1TImode:
13794 swaparray = swap1;
13795 break;
13796 case E_V2DFmode:
13797 case E_V2DImode:
13798 swaparray = swap2;
13799 break;
13800 case E_V4SFmode:
13801 case E_V4SImode:
13802 swaparray = swap4;
13803 break;
13804 case E_V8HImode:
13805 swaparray = swap8;
13806 break;
13807 default:
13808 gcc_unreachable ();
13809 }
13810
13811 for (i = 0; i < 16; ++i)
13812 perm[i] = GEN_INT (swaparray[i]);
13813
13814 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13815 gen_rtvec_v (16, perm)));
13816 }
13817
13818 static rtx
13819 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13820 {
13821 rtx pat, addr;
13822 tree arg0 = CALL_EXPR_ARG (exp, 0);
13823 tree arg1 = CALL_EXPR_ARG (exp, 1);
13824 machine_mode tmode = insn_data[icode].operand[0].mode;
13825 machine_mode mode0 = Pmode;
13826 machine_mode mode1 = Pmode;
13827 rtx op0 = expand_normal (arg0);
13828 rtx op1 = expand_normal (arg1);
13829
13830 if (icode == CODE_FOR_nothing)
13831 /* Builtin not supported on this processor. */
13832 return 0;
13833
13834 /* If we got invalid arguments bail out before generating bad rtl. */
13835 if (arg0 == error_mark_node || arg1 == error_mark_node)
13836 return const0_rtx;
13837
13838 if (target == 0
13839 || GET_MODE (target) != tmode
13840 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13841 target = gen_reg_rtx (tmode);
13842
13843 op1 = copy_to_mode_reg (mode1, op1);
13844
13845 /* For LVX, express the RTL accurately by ANDing the address with -16.
13846 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13847 so the raw address is fine. */
13848 if (icode == CODE_FOR_altivec_lvx_v1ti
13849 || icode == CODE_FOR_altivec_lvx_v2df
13850 || icode == CODE_FOR_altivec_lvx_v2di
13851 || icode == CODE_FOR_altivec_lvx_v4sf
13852 || icode == CODE_FOR_altivec_lvx_v4si
13853 || icode == CODE_FOR_altivec_lvx_v8hi
13854 || icode == CODE_FOR_altivec_lvx_v16qi)
13855 {
13856 rtx rawaddr;
13857 if (op0 == const0_rtx)
13858 rawaddr = op1;
13859 else
13860 {
13861 op0 = copy_to_mode_reg (mode0, op0);
13862 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13863 }
13864 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13865 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13866
13867 emit_insn (gen_rtx_SET (target, addr));
13868 }
13869 else
13870 {
13871 if (op0 == const0_rtx)
13872 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13873 else
13874 {
13875 op0 = copy_to_mode_reg (mode0, op0);
13876 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13877 gen_rtx_PLUS (Pmode, op1, op0));
13878 }
13879
13880 pat = GEN_FCN (icode) (target, addr);
13881 if (! pat)
13882 return 0;
13883 emit_insn (pat);
13884 }
13885
13886 return target;
13887 }
13888
13889 static rtx
13890 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13891 {
13892 rtx pat;
13893 tree arg0 = CALL_EXPR_ARG (exp, 0);
13894 tree arg1 = CALL_EXPR_ARG (exp, 1);
13895 tree arg2 = CALL_EXPR_ARG (exp, 2);
13896 rtx op0 = expand_normal (arg0);
13897 rtx op1 = expand_normal (arg1);
13898 rtx op2 = expand_normal (arg2);
13899 machine_mode mode0 = insn_data[icode].operand[0].mode;
13900 machine_mode mode1 = insn_data[icode].operand[1].mode;
13901 machine_mode mode2 = insn_data[icode].operand[2].mode;
13902
13903 if (icode == CODE_FOR_nothing)
13904 /* Builtin not supported on this processor. */
13905 return NULL_RTX;
13906
13907 /* If we got invalid arguments bail out before generating bad rtl. */
13908 if (arg0 == error_mark_node
13909 || arg1 == error_mark_node
13910 || arg2 == error_mark_node)
13911 return NULL_RTX;
13912
13913 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13914 op0 = copy_to_mode_reg (mode0, op0);
13915 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13916 op1 = copy_to_mode_reg (mode1, op1);
13917 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13918 op2 = copy_to_mode_reg (mode2, op2);
13919
13920 pat = GEN_FCN (icode) (op0, op1, op2);
13921 if (pat)
13922 emit_insn (pat);
13923
13924 return NULL_RTX;
13925 }
13926
13927 static rtx
13928 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13929 {
13930 tree arg0 = CALL_EXPR_ARG (exp, 0);
13931 tree arg1 = CALL_EXPR_ARG (exp, 1);
13932 tree arg2 = CALL_EXPR_ARG (exp, 2);
13933 rtx op0 = expand_normal (arg0);
13934 rtx op1 = expand_normal (arg1);
13935 rtx op2 = expand_normal (arg2);
13936 rtx pat, addr, rawaddr;
13937 machine_mode tmode = insn_data[icode].operand[0].mode;
13938 machine_mode smode = insn_data[icode].operand[1].mode;
13939 machine_mode mode1 = Pmode;
13940 machine_mode mode2 = Pmode;
13941
13942 /* Invalid arguments. Bail before doing anything stoopid! */
13943 if (arg0 == error_mark_node
13944 || arg1 == error_mark_node
13945 || arg2 == error_mark_node)
13946 return const0_rtx;
13947
13948 op2 = copy_to_mode_reg (mode2, op2);
13949
13950 /* For STVX, express the RTL accurately by ANDing the address with -16.
13951 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13952 so the raw address is fine. */
13953 if (icode == CODE_FOR_altivec_stvx_v2df
13954 || icode == CODE_FOR_altivec_stvx_v2di
13955 || icode == CODE_FOR_altivec_stvx_v4sf
13956 || icode == CODE_FOR_altivec_stvx_v4si
13957 || icode == CODE_FOR_altivec_stvx_v8hi
13958 || icode == CODE_FOR_altivec_stvx_v16qi)
13959 {
13960 if (op1 == const0_rtx)
13961 rawaddr = op2;
13962 else
13963 {
13964 op1 = copy_to_mode_reg (mode1, op1);
13965 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13966 }
13967
13968 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13969 addr = gen_rtx_MEM (tmode, addr);
13970
13971 op0 = copy_to_mode_reg (tmode, op0);
13972
13973 emit_insn (gen_rtx_SET (addr, op0));
13974 }
13975 else
13976 {
13977 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13978 op0 = copy_to_mode_reg (smode, op0);
13979
13980 if (op1 == const0_rtx)
13981 addr = gen_rtx_MEM (tmode, op2);
13982 else
13983 {
13984 op1 = copy_to_mode_reg (mode1, op1);
13985 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13986 }
13987
13988 pat = GEN_FCN (icode) (addr, op0);
13989 if (pat)
13990 emit_insn (pat);
13991 }
13992
13993 return NULL_RTX;
13994 }
13995
13996 /* Return the appropriate SPR number associated with the given builtin. */
13997 static inline HOST_WIDE_INT
13998 htm_spr_num (enum rs6000_builtins code)
13999 {
14000 if (code == HTM_BUILTIN_GET_TFHAR
14001 || code == HTM_BUILTIN_SET_TFHAR)
14002 return TFHAR_SPR;
14003 else if (code == HTM_BUILTIN_GET_TFIAR
14004 || code == HTM_BUILTIN_SET_TFIAR)
14005 return TFIAR_SPR;
14006 else if (code == HTM_BUILTIN_GET_TEXASR
14007 || code == HTM_BUILTIN_SET_TEXASR)
14008 return TEXASR_SPR;
14009 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14010 || code == HTM_BUILTIN_SET_TEXASRU);
14011 return TEXASRU_SPR;
14012 }
14013
14014 /* Return the appropriate SPR regno associated with the given builtin. */
14015 static inline HOST_WIDE_INT
14016 htm_spr_regno (enum rs6000_builtins code)
14017 {
14018 if (code == HTM_BUILTIN_GET_TFHAR
14019 || code == HTM_BUILTIN_SET_TFHAR)
14020 return TFHAR_REGNO;
14021 else if (code == HTM_BUILTIN_GET_TFIAR
14022 || code == HTM_BUILTIN_SET_TFIAR)
14023 return TFIAR_REGNO;
14024 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14025 || code == HTM_BUILTIN_SET_TEXASR
14026 || code == HTM_BUILTIN_GET_TEXASRU
14027 || code == HTM_BUILTIN_SET_TEXASRU);
14028 return TEXASR_REGNO;
14029 }
14030
14031 /* Return the correct ICODE value depending on whether we are
14032 setting or reading the HTM SPRs. */
14033 static inline enum insn_code
14034 rs6000_htm_spr_icode (bool nonvoid)
14035 {
14036 if (nonvoid)
14037 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14038 else
14039 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14040 }
14041
14042 /* Expand the HTM builtin in EXP and store the result in TARGET.
14043 Store true in *EXPANDEDP if we found a builtin to expand. */
14044 static rtx
14045 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14046 {
14047 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14048 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14049 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14050 const struct builtin_description *d;
14051 size_t i;
14052
14053 *expandedp = true;
14054
14055 if (!TARGET_POWERPC64
14056 && (fcode == HTM_BUILTIN_TABORTDC
14057 || fcode == HTM_BUILTIN_TABORTDCI))
14058 {
14059 size_t uns_fcode = (size_t)fcode;
14060 const char *name = rs6000_builtin_info[uns_fcode].name;
14061 error ("builtin %qs is only valid in 64-bit mode", name);
14062 return const0_rtx;
14063 }
14064
14065 /* Expand the HTM builtins. */
14066 d = bdesc_htm;
14067 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14068 if (d->code == fcode)
14069 {
14070 rtx op[MAX_HTM_OPERANDS], pat;
14071 int nopnds = 0;
14072 tree arg;
14073 call_expr_arg_iterator iter;
14074 unsigned attr = rs6000_builtin_info[fcode].attr;
14075 enum insn_code icode = d->icode;
14076 const struct insn_operand_data *insn_op;
14077 bool uses_spr = (attr & RS6000_BTC_SPR);
14078 rtx cr = NULL_RTX;
14079
14080 if (uses_spr)
14081 icode = rs6000_htm_spr_icode (nonvoid);
14082 insn_op = &insn_data[icode].operand[0];
14083
14084 if (nonvoid)
14085 {
14086 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14087 if (!target
14088 || GET_MODE (target) != tmode
14089 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14090 target = gen_reg_rtx (tmode);
14091 if (uses_spr)
14092 op[nopnds++] = target;
14093 }
14094
14095 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14096 {
14097 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14098 return const0_rtx;
14099
14100 insn_op = &insn_data[icode].operand[nopnds];
14101
14102 op[nopnds] = expand_normal (arg);
14103
14104 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14105 {
14106 if (!strcmp (insn_op->constraint, "n"))
14107 {
14108 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14109 if (!CONST_INT_P (op[nopnds]))
14110 error ("argument %d must be an unsigned literal", arg_num);
14111 else
14112 error ("argument %d is an unsigned literal that is "
14113 "out of range", arg_num);
14114 return const0_rtx;
14115 }
14116 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14117 }
14118
14119 nopnds++;
14120 }
14121
14122 /* Handle the builtins for extended mnemonics. These accept
14123 no arguments, but map to builtins that take arguments. */
14124 switch (fcode)
14125 {
14126 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14127 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14128 op[nopnds++] = GEN_INT (1);
14129 if (flag_checking)
14130 attr |= RS6000_BTC_UNARY;
14131 break;
14132 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14133 op[nopnds++] = GEN_INT (0);
14134 if (flag_checking)
14135 attr |= RS6000_BTC_UNARY;
14136 break;
14137 default:
14138 break;
14139 }
14140
14141 /* If this builtin accesses SPRs, then pass in the appropriate
14142 SPR number and SPR regno as the last two operands. */
14143 if (uses_spr)
14144 {
14145 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14146 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14147 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14148 }
14149 /* If this builtin accesses a CR, then pass in a scratch
14150 CR as the last operand. */
14151 else if (attr & RS6000_BTC_CR)
14152 { cr = gen_reg_rtx (CCmode);
14153 op[nopnds++] = cr;
14154 }
14155
14156 if (flag_checking)
14157 {
14158 int expected_nopnds = 0;
14159 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14160 expected_nopnds = 1;
14161 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14162 expected_nopnds = 2;
14163 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14164 expected_nopnds = 3;
14165 if (!(attr & RS6000_BTC_VOID))
14166 expected_nopnds += 1;
14167 if (uses_spr)
14168 expected_nopnds += 2;
14169
14170 gcc_assert (nopnds == expected_nopnds
14171 && nopnds <= MAX_HTM_OPERANDS);
14172 }
14173
14174 switch (nopnds)
14175 {
14176 case 1:
14177 pat = GEN_FCN (icode) (op[0]);
14178 break;
14179 case 2:
14180 pat = GEN_FCN (icode) (op[0], op[1]);
14181 break;
14182 case 3:
14183 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14184 break;
14185 case 4:
14186 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14187 break;
14188 default:
14189 gcc_unreachable ();
14190 }
14191 if (!pat)
14192 return NULL_RTX;
14193 emit_insn (pat);
14194
14195 if (attr & RS6000_BTC_CR)
14196 {
14197 if (fcode == HTM_BUILTIN_TBEGIN)
14198 {
14199 /* Emit code to set TARGET to true or false depending on
14200 whether the tbegin. instruction successfully or failed
14201 to start a transaction. We do this by placing the 1's
14202 complement of CR's EQ bit into TARGET. */
14203 rtx scratch = gen_reg_rtx (SImode);
14204 emit_insn (gen_rtx_SET (scratch,
14205 gen_rtx_EQ (SImode, cr,
14206 const0_rtx)));
14207 emit_insn (gen_rtx_SET (target,
14208 gen_rtx_XOR (SImode, scratch,
14209 GEN_INT (1))));
14210 }
14211 else
14212 {
14213 /* Emit code to copy the 4-bit condition register field
14214 CR into the least significant end of register TARGET. */
14215 rtx scratch1 = gen_reg_rtx (SImode);
14216 rtx scratch2 = gen_reg_rtx (SImode);
14217 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14218 emit_insn (gen_movcc (subreg, cr));
14219 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14220 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14221 }
14222 }
14223
14224 if (nonvoid)
14225 return target;
14226 return const0_rtx;
14227 }
14228
14229 *expandedp = false;
14230 return NULL_RTX;
14231 }
14232
14233 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14234
14235 static rtx
14236 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14237 rtx target)
14238 {
14239 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14240 if (fcode == RS6000_BUILTIN_CPU_INIT)
14241 return const0_rtx;
14242
14243 if (target == 0 || GET_MODE (target) != SImode)
14244 target = gen_reg_rtx (SImode);
14245
14246 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14247 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14248 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14249 to a STRING_CST. */
14250 if (TREE_CODE (arg) == ARRAY_REF
14251 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14252 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14253 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14254 arg = TREE_OPERAND (arg, 0);
14255
14256 if (TREE_CODE (arg) != STRING_CST)
14257 {
14258 error ("builtin %qs only accepts a string argument",
14259 rs6000_builtin_info[(size_t) fcode].name);
14260 return const0_rtx;
14261 }
14262
14263 if (fcode == RS6000_BUILTIN_CPU_IS)
14264 {
14265 const char *cpu = TREE_STRING_POINTER (arg);
14266 rtx cpuid = NULL_RTX;
14267 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14268 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14269 {
14270 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14271 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14272 break;
14273 }
14274 if (cpuid == NULL_RTX)
14275 {
14276 /* Invalid CPU argument. */
14277 error ("cpu %qs is an invalid argument to builtin %qs",
14278 cpu, rs6000_builtin_info[(size_t) fcode].name);
14279 return const0_rtx;
14280 }
14281
14282 rtx platform = gen_reg_rtx (SImode);
14283 rtx tcbmem = gen_const_mem (SImode,
14284 gen_rtx_PLUS (Pmode,
14285 gen_rtx_REG (Pmode, TLS_REGNUM),
14286 GEN_INT (TCB_PLATFORM_OFFSET)));
14287 emit_move_insn (platform, tcbmem);
14288 emit_insn (gen_eqsi3 (target, platform, cpuid));
14289 }
14290 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14291 {
14292 const char *hwcap = TREE_STRING_POINTER (arg);
14293 rtx mask = NULL_RTX;
14294 int hwcap_offset;
14295 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14296 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14297 {
14298 mask = GEN_INT (cpu_supports_info[i].mask);
14299 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14300 break;
14301 }
14302 if (mask == NULL_RTX)
14303 {
14304 /* Invalid HWCAP argument. */
14305 error ("%s %qs is an invalid argument to builtin %qs",
14306 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14307 return const0_rtx;
14308 }
14309
14310 rtx tcb_hwcap = gen_reg_rtx (SImode);
14311 rtx tcbmem = gen_const_mem (SImode,
14312 gen_rtx_PLUS (Pmode,
14313 gen_rtx_REG (Pmode, TLS_REGNUM),
14314 GEN_INT (hwcap_offset)));
14315 emit_move_insn (tcb_hwcap, tcbmem);
14316 rtx scratch1 = gen_reg_rtx (SImode);
14317 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14318 rtx scratch2 = gen_reg_rtx (SImode);
14319 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14320 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14321 }
14322 else
14323 gcc_unreachable ();
14324
14325 /* Record that we have expanded a CPU builtin, so that we can later
14326 emit a reference to the special symbol exported by LIBC to ensure we
14327 do not link against an old LIBC that doesn't support this feature. */
14328 cpu_builtin_p = true;
14329
14330 #else
14331 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14332 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14333
14334 /* For old LIBCs, always return FALSE. */
14335 emit_move_insn (target, GEN_INT (0));
14336 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14337
14338 return target;
14339 }
14340
14341 static rtx
14342 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14343 {
14344 rtx pat;
14345 tree arg0 = CALL_EXPR_ARG (exp, 0);
14346 tree arg1 = CALL_EXPR_ARG (exp, 1);
14347 tree arg2 = CALL_EXPR_ARG (exp, 2);
14348 rtx op0 = expand_normal (arg0);
14349 rtx op1 = expand_normal (arg1);
14350 rtx op2 = expand_normal (arg2);
14351 machine_mode tmode = insn_data[icode].operand[0].mode;
14352 machine_mode mode0 = insn_data[icode].operand[1].mode;
14353 machine_mode mode1 = insn_data[icode].operand[2].mode;
14354 machine_mode mode2 = insn_data[icode].operand[3].mode;
14355
14356 if (icode == CODE_FOR_nothing)
14357 /* Builtin not supported on this processor. */
14358 return 0;
14359
14360 /* If we got invalid arguments bail out before generating bad rtl. */
14361 if (arg0 == error_mark_node
14362 || arg1 == error_mark_node
14363 || arg2 == error_mark_node)
14364 return const0_rtx;
14365
14366 /* Check and prepare argument depending on the instruction code.
14367
14368 Note that a switch statement instead of the sequence of tests
14369 would be incorrect as many of the CODE_FOR values could be
14370 CODE_FOR_nothing and that would yield multiple alternatives
14371 with identical values. We'd never reach here at runtime in
14372 this case. */
14373 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14374 || icode == CODE_FOR_altivec_vsldoi_v2df
14375 || icode == CODE_FOR_altivec_vsldoi_v4si
14376 || icode == CODE_FOR_altivec_vsldoi_v8hi
14377 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14378 {
14379 /* Only allow 4-bit unsigned literals. */
14380 STRIP_NOPS (arg2);
14381 if (TREE_CODE (arg2) != INTEGER_CST
14382 || TREE_INT_CST_LOW (arg2) & ~0xf)
14383 {
14384 error ("argument 3 must be a 4-bit unsigned literal");
14385 return CONST0_RTX (tmode);
14386 }
14387 }
14388 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14389 || icode == CODE_FOR_vsx_xxpermdi_v2di
14390 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14391 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14392 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14393 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14394 || icode == CODE_FOR_vsx_xxpermdi_v4si
14395 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14396 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14397 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14398 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14399 || icode == CODE_FOR_vsx_xxsldwi_v4si
14400 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14401 || icode == CODE_FOR_vsx_xxsldwi_v2di
14402 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14403 {
14404 /* Only allow 2-bit unsigned literals. */
14405 STRIP_NOPS (arg2);
14406 if (TREE_CODE (arg2) != INTEGER_CST
14407 || TREE_INT_CST_LOW (arg2) & ~0x3)
14408 {
14409 error ("argument 3 must be a 2-bit unsigned literal");
14410 return CONST0_RTX (tmode);
14411 }
14412 }
14413 else if (icode == CODE_FOR_vsx_set_v2df
14414 || icode == CODE_FOR_vsx_set_v2di
14415 || icode == CODE_FOR_bcdadd
14416 || icode == CODE_FOR_bcdadd_lt
14417 || icode == CODE_FOR_bcdadd_eq
14418 || icode == CODE_FOR_bcdadd_gt
14419 || icode == CODE_FOR_bcdsub
14420 || icode == CODE_FOR_bcdsub_lt
14421 || icode == CODE_FOR_bcdsub_eq
14422 || icode == CODE_FOR_bcdsub_gt)
14423 {
14424 /* Only allow 1-bit unsigned literals. */
14425 STRIP_NOPS (arg2);
14426 if (TREE_CODE (arg2) != INTEGER_CST
14427 || TREE_INT_CST_LOW (arg2) & ~0x1)
14428 {
14429 error ("argument 3 must be a 1-bit unsigned literal");
14430 return CONST0_RTX (tmode);
14431 }
14432 }
14433 else if (icode == CODE_FOR_dfp_ddedpd_dd
14434 || icode == CODE_FOR_dfp_ddedpd_td)
14435 {
14436 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14437 STRIP_NOPS (arg0);
14438 if (TREE_CODE (arg0) != INTEGER_CST
14439 || TREE_INT_CST_LOW (arg2) & ~0x3)
14440 {
14441 error ("argument 1 must be 0 or 2");
14442 return CONST0_RTX (tmode);
14443 }
14444 }
14445 else if (icode == CODE_FOR_dfp_denbcd_dd
14446 || icode == CODE_FOR_dfp_denbcd_td)
14447 {
14448 /* Only allow 1-bit unsigned literals. */
14449 STRIP_NOPS (arg0);
14450 if (TREE_CODE (arg0) != INTEGER_CST
14451 || TREE_INT_CST_LOW (arg0) & ~0x1)
14452 {
14453 error ("argument 1 must be a 1-bit unsigned literal");
14454 return CONST0_RTX (tmode);
14455 }
14456 }
14457 else if (icode == CODE_FOR_dfp_dscli_dd
14458 || icode == CODE_FOR_dfp_dscli_td
14459 || icode == CODE_FOR_dfp_dscri_dd
14460 || icode == CODE_FOR_dfp_dscri_td)
14461 {
14462 /* Only allow 6-bit unsigned literals. */
14463 STRIP_NOPS (arg1);
14464 if (TREE_CODE (arg1) != INTEGER_CST
14465 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14466 {
14467 error ("argument 2 must be a 6-bit unsigned literal");
14468 return CONST0_RTX (tmode);
14469 }
14470 }
14471 else if (icode == CODE_FOR_crypto_vshasigmaw
14472 || icode == CODE_FOR_crypto_vshasigmad)
14473 {
14474 /* Check whether the 2nd and 3rd arguments are integer constants and in
14475 range and prepare arguments. */
14476 STRIP_NOPS (arg1);
14477 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14478 {
14479 error ("argument 2 must be 0 or 1");
14480 return CONST0_RTX (tmode);
14481 }
14482
14483 STRIP_NOPS (arg2);
14484 if (TREE_CODE (arg2) != INTEGER_CST
14485 || wi::geu_p (wi::to_wide (arg2), 16))
14486 {
14487 error ("argument 3 must be in the range 0..15");
14488 return CONST0_RTX (tmode);
14489 }
14490 }
14491
14492 if (target == 0
14493 || GET_MODE (target) != tmode
14494 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14495 target = gen_reg_rtx (tmode);
14496
14497 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14498 op0 = copy_to_mode_reg (mode0, op0);
14499 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14500 op1 = copy_to_mode_reg (mode1, op1);
14501 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14502 op2 = copy_to_mode_reg (mode2, op2);
14503
14504 pat = GEN_FCN (icode) (target, op0, op1, op2);
14505 if (! pat)
14506 return 0;
14507 emit_insn (pat);
14508
14509 return target;
14510 }
14511
14512
14513 /* Expand the dst builtins. */
14514 static rtx
14515 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14516 bool *expandedp)
14517 {
14518 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14519 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14520 tree arg0, arg1, arg2;
14521 machine_mode mode0, mode1;
14522 rtx pat, op0, op1, op2;
14523 const struct builtin_description *d;
14524 size_t i;
14525
14526 *expandedp = false;
14527
14528 /* Handle DST variants. */
14529 d = bdesc_dst;
14530 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14531 if (d->code == fcode)
14532 {
14533 arg0 = CALL_EXPR_ARG (exp, 0);
14534 arg1 = CALL_EXPR_ARG (exp, 1);
14535 arg2 = CALL_EXPR_ARG (exp, 2);
14536 op0 = expand_normal (arg0);
14537 op1 = expand_normal (arg1);
14538 op2 = expand_normal (arg2);
14539 mode0 = insn_data[d->icode].operand[0].mode;
14540 mode1 = insn_data[d->icode].operand[1].mode;
14541
14542 /* Invalid arguments, bail out before generating bad rtl. */
14543 if (arg0 == error_mark_node
14544 || arg1 == error_mark_node
14545 || arg2 == error_mark_node)
14546 return const0_rtx;
14547
14548 *expandedp = true;
14549 STRIP_NOPS (arg2);
14550 if (TREE_CODE (arg2) != INTEGER_CST
14551 || TREE_INT_CST_LOW (arg2) & ~0x3)
14552 {
14553 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14554 return const0_rtx;
14555 }
14556
14557 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14558 op0 = copy_to_mode_reg (Pmode, op0);
14559 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14560 op1 = copy_to_mode_reg (mode1, op1);
14561
14562 pat = GEN_FCN (d->icode) (op0, op1, op2);
14563 if (pat != 0)
14564 emit_insn (pat);
14565
14566 return NULL_RTX;
14567 }
14568
14569 return NULL_RTX;
14570 }
14571
14572 /* Expand vec_init builtin. */
14573 static rtx
14574 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14575 {
14576 machine_mode tmode = TYPE_MODE (type);
14577 machine_mode inner_mode = GET_MODE_INNER (tmode);
14578 int i, n_elt = GET_MODE_NUNITS (tmode);
14579
14580 gcc_assert (VECTOR_MODE_P (tmode));
14581 gcc_assert (n_elt == call_expr_nargs (exp));
14582
14583 if (!target || !register_operand (target, tmode))
14584 target = gen_reg_rtx (tmode);
14585
14586 /* If we have a vector compromised of a single element, such as V1TImode, do
14587 the initialization directly. */
14588 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14589 {
14590 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14591 emit_move_insn (target, gen_lowpart (tmode, x));
14592 }
14593 else
14594 {
14595 rtvec v = rtvec_alloc (n_elt);
14596
14597 for (i = 0; i < n_elt; ++i)
14598 {
14599 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14600 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14601 }
14602
14603 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14604 }
14605
14606 return target;
14607 }
14608
14609 /* Return the integer constant in ARG. Constrain it to be in the range
14610 of the subparts of VEC_TYPE; issue an error if not. */
14611
14612 static int
14613 get_element_number (tree vec_type, tree arg)
14614 {
14615 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14616
14617 if (!tree_fits_uhwi_p (arg)
14618 || (elt = tree_to_uhwi (arg), elt > max))
14619 {
14620 error ("selector must be an integer constant in the range 0..%wi", max);
14621 return 0;
14622 }
14623
14624 return elt;
14625 }
14626
14627 /* Expand vec_set builtin. */
14628 static rtx
14629 altivec_expand_vec_set_builtin (tree exp)
14630 {
14631 machine_mode tmode, mode1;
14632 tree arg0, arg1, arg2;
14633 int elt;
14634 rtx op0, op1;
14635
14636 arg0 = CALL_EXPR_ARG (exp, 0);
14637 arg1 = CALL_EXPR_ARG (exp, 1);
14638 arg2 = CALL_EXPR_ARG (exp, 2);
14639
14640 tmode = TYPE_MODE (TREE_TYPE (arg0));
14641 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14642 gcc_assert (VECTOR_MODE_P (tmode));
14643
14644 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14645 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14646 elt = get_element_number (TREE_TYPE (arg0), arg2);
14647
14648 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14649 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14650
14651 op0 = force_reg (tmode, op0);
14652 op1 = force_reg (mode1, op1);
14653
14654 rs6000_expand_vector_set (op0, op1, elt);
14655
14656 return op0;
14657 }
14658
14659 /* Expand vec_ext builtin. */
14660 static rtx
14661 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14662 {
14663 machine_mode tmode, mode0;
14664 tree arg0, arg1;
14665 rtx op0;
14666 rtx op1;
14667
14668 arg0 = CALL_EXPR_ARG (exp, 0);
14669 arg1 = CALL_EXPR_ARG (exp, 1);
14670
14671 op0 = expand_normal (arg0);
14672 op1 = expand_normal (arg1);
14673
14674 /* Call get_element_number to validate arg1 if it is a constant. */
14675 if (TREE_CODE (arg1) == INTEGER_CST)
14676 (void) get_element_number (TREE_TYPE (arg0), arg1);
14677
14678 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14679 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14680 gcc_assert (VECTOR_MODE_P (mode0));
14681
14682 op0 = force_reg (mode0, op0);
14683
14684 if (optimize || !target || !register_operand (target, tmode))
14685 target = gen_reg_rtx (tmode);
14686
14687 rs6000_expand_vector_extract (target, op0, op1);
14688
14689 return target;
14690 }
14691
14692 /* Expand the builtin in EXP and store the result in TARGET. Store
14693 true in *EXPANDEDP if we found a builtin to expand. */
14694 static rtx
14695 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14696 {
14697 const struct builtin_description *d;
14698 size_t i;
14699 enum insn_code icode;
14700 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14701 tree arg0, arg1, arg2;
14702 rtx op0, pat;
14703 machine_mode tmode, mode0;
14704 enum rs6000_builtins fcode
14705 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14706
14707 if (rs6000_overloaded_builtin_p (fcode))
14708 {
14709 *expandedp = true;
14710 error ("unresolved overload for Altivec builtin %qF", fndecl);
14711
14712 /* Given it is invalid, just generate a normal call. */
14713 return expand_call (exp, target, false);
14714 }
14715
14716 target = altivec_expand_dst_builtin (exp, target, expandedp);
14717 if (*expandedp)
14718 return target;
14719
14720 *expandedp = true;
14721
14722 switch (fcode)
14723 {
14724 case ALTIVEC_BUILTIN_STVX_V2DF:
14725 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14726 case ALTIVEC_BUILTIN_STVX_V2DI:
14727 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14728 case ALTIVEC_BUILTIN_STVX_V4SF:
14729 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14730 case ALTIVEC_BUILTIN_STVX:
14731 case ALTIVEC_BUILTIN_STVX_V4SI:
14732 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14733 case ALTIVEC_BUILTIN_STVX_V8HI:
14734 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14735 case ALTIVEC_BUILTIN_STVX_V16QI:
14736 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14737 case ALTIVEC_BUILTIN_STVEBX:
14738 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14739 case ALTIVEC_BUILTIN_STVEHX:
14740 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14741 case ALTIVEC_BUILTIN_STVEWX:
14742 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14743 case ALTIVEC_BUILTIN_STVXL_V2DF:
14744 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14745 case ALTIVEC_BUILTIN_STVXL_V2DI:
14746 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14747 case ALTIVEC_BUILTIN_STVXL_V4SF:
14748 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14749 case ALTIVEC_BUILTIN_STVXL:
14750 case ALTIVEC_BUILTIN_STVXL_V4SI:
14751 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14752 case ALTIVEC_BUILTIN_STVXL_V8HI:
14753 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14754 case ALTIVEC_BUILTIN_STVXL_V16QI:
14755 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14756
14757 case ALTIVEC_BUILTIN_STVLX:
14758 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14759 case ALTIVEC_BUILTIN_STVLXL:
14760 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14761 case ALTIVEC_BUILTIN_STVRX:
14762 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14763 case ALTIVEC_BUILTIN_STVRXL:
14764 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14765
14766 case P9V_BUILTIN_STXVL:
14767 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14768
14769 case P9V_BUILTIN_XST_LEN_R:
14770 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14771
14772 case VSX_BUILTIN_STXVD2X_V1TI:
14773 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14774 case VSX_BUILTIN_STXVD2X_V2DF:
14775 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14776 case VSX_BUILTIN_STXVD2X_V2DI:
14777 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14778 case VSX_BUILTIN_STXVW4X_V4SF:
14779 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14780 case VSX_BUILTIN_STXVW4X_V4SI:
14781 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14782 case VSX_BUILTIN_STXVW4X_V8HI:
14783 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14784 case VSX_BUILTIN_STXVW4X_V16QI:
14785 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14786
14787 /* For the following on big endian, it's ok to use any appropriate
14788 unaligned-supporting store, so use a generic expander. For
14789 little-endian, the exact element-reversing instruction must
14790 be used. */
14791 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14792 {
14793 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14794 : CODE_FOR_vsx_st_elemrev_v1ti);
14795 return altivec_expand_stv_builtin (code, exp);
14796 }
14797 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14798 {
14799 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14800 : CODE_FOR_vsx_st_elemrev_v2df);
14801 return altivec_expand_stv_builtin (code, exp);
14802 }
14803 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14804 {
14805 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14806 : CODE_FOR_vsx_st_elemrev_v2di);
14807 return altivec_expand_stv_builtin (code, exp);
14808 }
14809 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14810 {
14811 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14812 : CODE_FOR_vsx_st_elemrev_v4sf);
14813 return altivec_expand_stv_builtin (code, exp);
14814 }
14815 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14816 {
14817 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14818 : CODE_FOR_vsx_st_elemrev_v4si);
14819 return altivec_expand_stv_builtin (code, exp);
14820 }
14821 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14822 {
14823 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14824 : CODE_FOR_vsx_st_elemrev_v8hi);
14825 return altivec_expand_stv_builtin (code, exp);
14826 }
14827 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14828 {
14829 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14830 : CODE_FOR_vsx_st_elemrev_v16qi);
14831 return altivec_expand_stv_builtin (code, exp);
14832 }
14833
14834 case ALTIVEC_BUILTIN_MFVSCR:
14835 icode = CODE_FOR_altivec_mfvscr;
14836 tmode = insn_data[icode].operand[0].mode;
14837
14838 if (target == 0
14839 || GET_MODE (target) != tmode
14840 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14841 target = gen_reg_rtx (tmode);
14842
14843 pat = GEN_FCN (icode) (target);
14844 if (! pat)
14845 return 0;
14846 emit_insn (pat);
14847 return target;
14848
14849 case ALTIVEC_BUILTIN_MTVSCR:
14850 icode = CODE_FOR_altivec_mtvscr;
14851 arg0 = CALL_EXPR_ARG (exp, 0);
14852 op0 = expand_normal (arg0);
14853 mode0 = insn_data[icode].operand[0].mode;
14854
14855 /* If we got invalid arguments bail out before generating bad rtl. */
14856 if (arg0 == error_mark_node)
14857 return const0_rtx;
14858
14859 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14860 op0 = copy_to_mode_reg (mode0, op0);
14861
14862 pat = GEN_FCN (icode) (op0);
14863 if (pat)
14864 emit_insn (pat);
14865 return NULL_RTX;
14866
14867 case ALTIVEC_BUILTIN_DSSALL:
14868 emit_insn (gen_altivec_dssall ());
14869 return NULL_RTX;
14870
14871 case ALTIVEC_BUILTIN_DSS:
14872 icode = CODE_FOR_altivec_dss;
14873 arg0 = CALL_EXPR_ARG (exp, 0);
14874 STRIP_NOPS (arg0);
14875 op0 = expand_normal (arg0);
14876 mode0 = insn_data[icode].operand[0].mode;
14877
14878 /* If we got invalid arguments bail out before generating bad rtl. */
14879 if (arg0 == error_mark_node)
14880 return const0_rtx;
14881
14882 if (TREE_CODE (arg0) != INTEGER_CST
14883 || TREE_INT_CST_LOW (arg0) & ~0x3)
14884 {
14885 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14886 return const0_rtx;
14887 }
14888
14889 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14890 op0 = copy_to_mode_reg (mode0, op0);
14891
14892 emit_insn (gen_altivec_dss (op0));
14893 return NULL_RTX;
14894
14895 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14896 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14897 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14898 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14899 case VSX_BUILTIN_VEC_INIT_V2DF:
14900 case VSX_BUILTIN_VEC_INIT_V2DI:
14901 case VSX_BUILTIN_VEC_INIT_V1TI:
14902 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14903
14904 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14905 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14906 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14907 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14908 case VSX_BUILTIN_VEC_SET_V2DF:
14909 case VSX_BUILTIN_VEC_SET_V2DI:
14910 case VSX_BUILTIN_VEC_SET_V1TI:
14911 return altivec_expand_vec_set_builtin (exp);
14912
14913 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14914 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14915 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14916 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14917 case VSX_BUILTIN_VEC_EXT_V2DF:
14918 case VSX_BUILTIN_VEC_EXT_V2DI:
14919 case VSX_BUILTIN_VEC_EXT_V1TI:
14920 return altivec_expand_vec_ext_builtin (exp, target);
14921
14922 case P9V_BUILTIN_VEC_EXTRACT4B:
14923 arg1 = CALL_EXPR_ARG (exp, 1);
14924 STRIP_NOPS (arg1);
14925
14926 /* Generate a normal call if it is invalid. */
14927 if (arg1 == error_mark_node)
14928 return expand_call (exp, target, false);
14929
14930 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14931 {
14932 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14933 return expand_call (exp, target, false);
14934 }
14935 break;
14936
14937 case P9V_BUILTIN_VEC_INSERT4B:
14938 arg2 = CALL_EXPR_ARG (exp, 2);
14939 STRIP_NOPS (arg2);
14940
14941 /* Generate a normal call if it is invalid. */
14942 if (arg2 == error_mark_node)
14943 return expand_call (exp, target, false);
14944
14945 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14946 {
14947 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
14948 return expand_call (exp, target, false);
14949 }
14950 break;
14951
14952 default:
14953 break;
14954 /* Fall through. */
14955 }
14956
14957 /* Expand abs* operations. */
14958 d = bdesc_abs;
14959 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14960 if (d->code == fcode)
14961 return altivec_expand_abs_builtin (d->icode, exp, target);
14962
14963 /* Expand the AltiVec predicates. */
14964 d = bdesc_altivec_preds;
14965 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14966 if (d->code == fcode)
14967 return altivec_expand_predicate_builtin (d->icode, exp, target);
14968
14969 /* LV* are funky. We initialized them differently. */
14970 switch (fcode)
14971 {
14972 case ALTIVEC_BUILTIN_LVSL:
14973 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14974 exp, target, false);
14975 case ALTIVEC_BUILTIN_LVSR:
14976 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14977 exp, target, false);
14978 case ALTIVEC_BUILTIN_LVEBX:
14979 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14980 exp, target, false);
14981 case ALTIVEC_BUILTIN_LVEHX:
14982 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14983 exp, target, false);
14984 case ALTIVEC_BUILTIN_LVEWX:
14985 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14986 exp, target, false);
14987 case ALTIVEC_BUILTIN_LVXL_V2DF:
14988 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14989 exp, target, false);
14990 case ALTIVEC_BUILTIN_LVXL_V2DI:
14991 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14992 exp, target, false);
14993 case ALTIVEC_BUILTIN_LVXL_V4SF:
14994 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14995 exp, target, false);
14996 case ALTIVEC_BUILTIN_LVXL:
14997 case ALTIVEC_BUILTIN_LVXL_V4SI:
14998 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14999 exp, target, false);
15000 case ALTIVEC_BUILTIN_LVXL_V8HI:
15001 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15002 exp, target, false);
15003 case ALTIVEC_BUILTIN_LVXL_V16QI:
15004 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15005 exp, target, false);
15006 case ALTIVEC_BUILTIN_LVX_V1TI:
15007 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15008 exp, target, false);
15009 case ALTIVEC_BUILTIN_LVX_V2DF:
15010 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15011 exp, target, false);
15012 case ALTIVEC_BUILTIN_LVX_V2DI:
15013 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15014 exp, target, false);
15015 case ALTIVEC_BUILTIN_LVX_V4SF:
15016 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15017 exp, target, false);
15018 case ALTIVEC_BUILTIN_LVX:
15019 case ALTIVEC_BUILTIN_LVX_V4SI:
15020 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15021 exp, target, false);
15022 case ALTIVEC_BUILTIN_LVX_V8HI:
15023 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15024 exp, target, false);
15025 case ALTIVEC_BUILTIN_LVX_V16QI:
15026 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15027 exp, target, false);
15028 case ALTIVEC_BUILTIN_LVLX:
15029 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15030 exp, target, true);
15031 case ALTIVEC_BUILTIN_LVLXL:
15032 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15033 exp, target, true);
15034 case ALTIVEC_BUILTIN_LVRX:
15035 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15036 exp, target, true);
15037 case ALTIVEC_BUILTIN_LVRXL:
15038 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15039 exp, target, true);
15040 case VSX_BUILTIN_LXVD2X_V1TI:
15041 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15042 exp, target, false);
15043 case VSX_BUILTIN_LXVD2X_V2DF:
15044 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15045 exp, target, false);
15046 case VSX_BUILTIN_LXVD2X_V2DI:
15047 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15048 exp, target, false);
15049 case VSX_BUILTIN_LXVW4X_V4SF:
15050 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15051 exp, target, false);
15052 case VSX_BUILTIN_LXVW4X_V4SI:
15053 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15054 exp, target, false);
15055 case VSX_BUILTIN_LXVW4X_V8HI:
15056 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15057 exp, target, false);
15058 case VSX_BUILTIN_LXVW4X_V16QI:
15059 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15060 exp, target, false);
15061 /* For the following on big endian, it's ok to use any appropriate
15062 unaligned-supporting load, so use a generic expander. For
15063 little-endian, the exact element-reversing instruction must
15064 be used. */
15065 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15066 {
15067 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15068 : CODE_FOR_vsx_ld_elemrev_v2df);
15069 return altivec_expand_lv_builtin (code, exp, target, false);
15070 }
15071 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15072 {
15073 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15074 : CODE_FOR_vsx_ld_elemrev_v1ti);
15075 return altivec_expand_lv_builtin (code, exp, target, false);
15076 }
15077 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15078 {
15079 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15080 : CODE_FOR_vsx_ld_elemrev_v2di);
15081 return altivec_expand_lv_builtin (code, exp, target, false);
15082 }
15083 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15084 {
15085 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15086 : CODE_FOR_vsx_ld_elemrev_v4sf);
15087 return altivec_expand_lv_builtin (code, exp, target, false);
15088 }
15089 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15090 {
15091 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15092 : CODE_FOR_vsx_ld_elemrev_v4si);
15093 return altivec_expand_lv_builtin (code, exp, target, false);
15094 }
15095 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15096 {
15097 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15098 : CODE_FOR_vsx_ld_elemrev_v8hi);
15099 return altivec_expand_lv_builtin (code, exp, target, false);
15100 }
15101 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15102 {
15103 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15104 : CODE_FOR_vsx_ld_elemrev_v16qi);
15105 return altivec_expand_lv_builtin (code, exp, target, false);
15106 }
15107 break;
15108 default:
15109 break;
15110 /* Fall through. */
15111 }
15112
15113 *expandedp = false;
15114 return NULL_RTX;
15115 }
15116
15117 /* Check whether a builtin function is supported in this target
15118 configuration. */
15119 bool
15120 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15121 {
15122 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15123 if ((fnmask & rs6000_builtin_mask) != fnmask)
15124 return false;
15125 else
15126 return true;
15127 }
15128
15129 /* Raise an error message for a builtin function that is called without the
15130 appropriate target options being set. */
15131
15132 static void
15133 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15134 {
15135 size_t uns_fncode = (size_t) fncode;
15136 const char *name = rs6000_builtin_info[uns_fncode].name;
15137 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15138
15139 gcc_assert (name != NULL);
15140 if ((fnmask & RS6000_BTM_CELL) != 0)
15141 error ("builtin function %qs is only valid for the cell processor", name);
15142 else if ((fnmask & RS6000_BTM_VSX) != 0)
15143 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15144 else if ((fnmask & RS6000_BTM_HTM) != 0)
15145 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15146 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15147 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15148 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15149 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15150 error ("builtin function %qs requires the %qs and %qs options",
15151 name, "-mhard-dfp", "-mpower8-vector");
15152 else if ((fnmask & RS6000_BTM_DFP) != 0)
15153 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15154 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15155 error ("builtin function %qs requires the %qs option", name,
15156 "-mpower8-vector");
15157 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15158 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15159 error ("builtin function %qs requires the %qs and %qs options",
15160 name, "-mcpu=power9", "-m64");
15161 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15162 error ("builtin function %qs requires the %qs option", name,
15163 "-mcpu=power9");
15164 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15165 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15166 error ("builtin function %qs requires the %qs and %qs options",
15167 name, "-mcpu=power9", "-m64");
15168 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15169 error ("builtin function %qs requires the %qs option", name,
15170 "-mcpu=power9");
15171 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15172 {
15173 if (!TARGET_HARD_FLOAT)
15174 error ("builtin function %qs requires the %qs option", name,
15175 "-mhard-float");
15176 else
15177 error ("builtin function %qs requires the %qs option", name,
15178 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15179 }
15180 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15181 error ("builtin function %qs requires the %qs option", name,
15182 "-mhard-float");
15183 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15184 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15185 name);
15186 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15187 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15188 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15189 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15190 error ("builtin function %qs requires the %qs (or newer), and "
15191 "%qs or %qs options",
15192 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15193 else
15194 error ("builtin function %qs is not supported with the current options",
15195 name);
15196 }
15197
15198 /* Target hook for early folding of built-ins, shamelessly stolen
15199 from ia64.c. */
15200
15201 static tree
15202 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15203 int n_args ATTRIBUTE_UNUSED,
15204 tree *args ATTRIBUTE_UNUSED,
15205 bool ignore ATTRIBUTE_UNUSED)
15206 {
15207 #ifdef SUBTARGET_FOLD_BUILTIN
15208 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15209 #else
15210 return NULL_TREE;
15211 #endif
15212 }
15213
15214 /* Helper function to sort out which built-ins may be valid without having
15215 a LHS. */
15216 static bool
15217 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15218 {
15219 switch (fn_code)
15220 {
15221 case ALTIVEC_BUILTIN_STVX_V16QI:
15222 case ALTIVEC_BUILTIN_STVX_V8HI:
15223 case ALTIVEC_BUILTIN_STVX_V4SI:
15224 case ALTIVEC_BUILTIN_STVX_V4SF:
15225 case ALTIVEC_BUILTIN_STVX_V2DI:
15226 case ALTIVEC_BUILTIN_STVX_V2DF:
15227 case VSX_BUILTIN_STXVW4X_V16QI:
15228 case VSX_BUILTIN_STXVW4X_V8HI:
15229 case VSX_BUILTIN_STXVW4X_V4SF:
15230 case VSX_BUILTIN_STXVW4X_V4SI:
15231 case VSX_BUILTIN_STXVD2X_V2DF:
15232 case VSX_BUILTIN_STXVD2X_V2DI:
15233 return true;
15234 default:
15235 return false;
15236 }
15237 }
15238
15239 /* Helper function to handle the gimple folding of a vector compare
15240 operation. This sets up true/false vectors, and uses the
15241 VEC_COND_EXPR operation.
15242 CODE indicates which comparison is to be made. (EQ, GT, ...).
15243 TYPE indicates the type of the result. */
15244 static tree
15245 fold_build_vec_cmp (tree_code code, tree type,
15246 tree arg0, tree arg1)
15247 {
15248 tree cmp_type = build_same_sized_truth_vector_type (type);
15249 tree zero_vec = build_zero_cst (type);
15250 tree minus_one_vec = build_minus_one_cst (type);
15251 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15252 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15253 }
15254
15255 /* Helper function to handle the in-between steps for the
15256 vector compare built-ins. */
15257 static void
15258 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15259 {
15260 tree arg0 = gimple_call_arg (stmt, 0);
15261 tree arg1 = gimple_call_arg (stmt, 1);
15262 tree lhs = gimple_call_lhs (stmt);
15263 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15264 gimple *g = gimple_build_assign (lhs, cmp);
15265 gimple_set_location (g, gimple_location (stmt));
15266 gsi_replace (gsi, g, true);
15267 }
15268
15269 /* Helper function to map V2DF and V4SF types to their
15270 integral equivalents (V2DI and V4SI). */
15271 tree map_to_integral_tree_type (tree input_tree_type)
15272 {
15273 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15274 return input_tree_type;
15275 else
15276 {
15277 if (types_compatible_p (TREE_TYPE (input_tree_type),
15278 TREE_TYPE (V2DF_type_node)))
15279 return V2DI_type_node;
15280 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15281 TREE_TYPE (V4SF_type_node)))
15282 return V4SI_type_node;
15283 else
15284 gcc_unreachable ();
15285 }
15286 }
15287
15288 /* Helper function to handle the vector merge[hl] built-ins. The
15289 implementation difference between h and l versions for this code are in
15290 the values used when building of the permute vector for high word versus
15291 low word merge. The variance is keyed off the use_high parameter. */
15292 static void
15293 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15294 {
15295 tree arg0 = gimple_call_arg (stmt, 0);
15296 tree arg1 = gimple_call_arg (stmt, 1);
15297 tree lhs = gimple_call_lhs (stmt);
15298 tree lhs_type = TREE_TYPE (lhs);
15299 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15300 int midpoint = n_elts / 2;
15301 int offset = 0;
15302
15303 if (use_high == 1)
15304 offset = midpoint;
15305
15306 /* The permute_type will match the lhs for integral types. For double and
15307 float types, the permute type needs to map to the V2 or V4 type that
15308 matches size. */
15309 tree permute_type;
15310 permute_type = map_to_integral_tree_type (lhs_type);
15311 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15312
15313 for (int i = 0; i < midpoint; i++)
15314 {
15315 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15316 offset + i));
15317 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15318 offset + n_elts + i));
15319 }
15320
15321 tree permute = elts.build ();
15322
15323 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15324 gimple_set_location (g, gimple_location (stmt));
15325 gsi_replace (gsi, g, true);
15326 }
15327
15328 /* Helper function to handle the vector merge[eo] built-ins. */
15329 static void
15330 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15331 {
15332 tree arg0 = gimple_call_arg (stmt, 0);
15333 tree arg1 = gimple_call_arg (stmt, 1);
15334 tree lhs = gimple_call_lhs (stmt);
15335 tree lhs_type = TREE_TYPE (lhs);
15336 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15337
15338 /* The permute_type will match the lhs for integral types. For double and
15339 float types, the permute type needs to map to the V2 or V4 type that
15340 matches size. */
15341 tree permute_type;
15342 permute_type = map_to_integral_tree_type (lhs_type);
15343
15344 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15345
15346 /* Build the permute vector. */
15347 for (int i = 0; i < n_elts / 2; i++)
15348 {
15349 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15350 2*i + use_odd));
15351 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15352 2*i + use_odd + n_elts));
15353 }
15354
15355 tree permute = elts.build ();
15356
15357 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15358 gimple_set_location (g, gimple_location (stmt));
15359 gsi_replace (gsi, g, true);
15360 }
15361
15362 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15363 a constant, use rs6000_fold_builtin.) */
15364
15365 bool
15366 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15367 {
15368 gimple *stmt = gsi_stmt (*gsi);
15369 tree fndecl = gimple_call_fndecl (stmt);
15370 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15371 enum rs6000_builtins fn_code
15372 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15373 tree arg0, arg1, lhs, temp;
15374 enum tree_code bcode;
15375 gimple *g;
15376
15377 size_t uns_fncode = (size_t) fn_code;
15378 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15379 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15380 const char *fn_name2 = (icode != CODE_FOR_nothing)
15381 ? get_insn_name ((int) icode)
15382 : "nothing";
15383
15384 if (TARGET_DEBUG_BUILTIN)
15385 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15386 fn_code, fn_name1, fn_name2);
15387
15388 if (!rs6000_fold_gimple)
15389 return false;
15390
15391 /* Prevent gimple folding for code that does not have a LHS, unless it is
15392 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15393 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15394 return false;
15395
15396 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15397 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15398 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15399 if (!func_valid_p)
15400 return false;
15401
15402 switch (fn_code)
15403 {
15404 /* Flavors of vec_add. We deliberately don't expand
15405 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15406 TImode, resulting in much poorer code generation. */
15407 case ALTIVEC_BUILTIN_VADDUBM:
15408 case ALTIVEC_BUILTIN_VADDUHM:
15409 case ALTIVEC_BUILTIN_VADDUWM:
15410 case P8V_BUILTIN_VADDUDM:
15411 case ALTIVEC_BUILTIN_VADDFP:
15412 case VSX_BUILTIN_XVADDDP:
15413 bcode = PLUS_EXPR;
15414 do_binary:
15415 arg0 = gimple_call_arg (stmt, 0);
15416 arg1 = gimple_call_arg (stmt, 1);
15417 lhs = gimple_call_lhs (stmt);
15418 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15419 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15420 {
15421 /* Ensure the binary operation is performed in a type
15422 that wraps if it is integral type. */
15423 gimple_seq stmts = NULL;
15424 tree type = unsigned_type_for (TREE_TYPE (lhs));
15425 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15426 type, arg0);
15427 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15428 type, arg1);
15429 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15430 type, uarg0, uarg1);
15431 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15432 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15433 build1 (VIEW_CONVERT_EXPR,
15434 TREE_TYPE (lhs), res));
15435 gsi_replace (gsi, g, true);
15436 return true;
15437 }
15438 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15439 gimple_set_location (g, gimple_location (stmt));
15440 gsi_replace (gsi, g, true);
15441 return true;
15442 /* Flavors of vec_sub. We deliberately don't expand
15443 P8V_BUILTIN_VSUBUQM. */
15444 case ALTIVEC_BUILTIN_VSUBUBM:
15445 case ALTIVEC_BUILTIN_VSUBUHM:
15446 case ALTIVEC_BUILTIN_VSUBUWM:
15447 case P8V_BUILTIN_VSUBUDM:
15448 case ALTIVEC_BUILTIN_VSUBFP:
15449 case VSX_BUILTIN_XVSUBDP:
15450 bcode = MINUS_EXPR;
15451 goto do_binary;
15452 case VSX_BUILTIN_XVMULSP:
15453 case VSX_BUILTIN_XVMULDP:
15454 arg0 = gimple_call_arg (stmt, 0);
15455 arg1 = gimple_call_arg (stmt, 1);
15456 lhs = gimple_call_lhs (stmt);
15457 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15458 gimple_set_location (g, gimple_location (stmt));
15459 gsi_replace (gsi, g, true);
15460 return true;
15461 /* Even element flavors of vec_mul (signed). */
15462 case ALTIVEC_BUILTIN_VMULESB:
15463 case ALTIVEC_BUILTIN_VMULESH:
15464 case P8V_BUILTIN_VMULESW:
15465 /* Even element flavors of vec_mul (unsigned). */
15466 case ALTIVEC_BUILTIN_VMULEUB:
15467 case ALTIVEC_BUILTIN_VMULEUH:
15468 case P8V_BUILTIN_VMULEUW:
15469 arg0 = gimple_call_arg (stmt, 0);
15470 arg1 = gimple_call_arg (stmt, 1);
15471 lhs = gimple_call_lhs (stmt);
15472 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15473 gimple_set_location (g, gimple_location (stmt));
15474 gsi_replace (gsi, g, true);
15475 return true;
15476 /* Odd element flavors of vec_mul (signed). */
15477 case ALTIVEC_BUILTIN_VMULOSB:
15478 case ALTIVEC_BUILTIN_VMULOSH:
15479 case P8V_BUILTIN_VMULOSW:
15480 /* Odd element flavors of vec_mul (unsigned). */
15481 case ALTIVEC_BUILTIN_VMULOUB:
15482 case ALTIVEC_BUILTIN_VMULOUH:
15483 case P8V_BUILTIN_VMULOUW:
15484 arg0 = gimple_call_arg (stmt, 0);
15485 arg1 = gimple_call_arg (stmt, 1);
15486 lhs = gimple_call_lhs (stmt);
15487 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15488 gimple_set_location (g, gimple_location (stmt));
15489 gsi_replace (gsi, g, true);
15490 return true;
15491 /* Flavors of vec_div (Integer). */
15492 case VSX_BUILTIN_DIV_V2DI:
15493 case VSX_BUILTIN_UDIV_V2DI:
15494 arg0 = gimple_call_arg (stmt, 0);
15495 arg1 = gimple_call_arg (stmt, 1);
15496 lhs = gimple_call_lhs (stmt);
15497 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15498 gimple_set_location (g, gimple_location (stmt));
15499 gsi_replace (gsi, g, true);
15500 return true;
15501 /* Flavors of vec_div (Float). */
15502 case VSX_BUILTIN_XVDIVSP:
15503 case VSX_BUILTIN_XVDIVDP:
15504 arg0 = gimple_call_arg (stmt, 0);
15505 arg1 = gimple_call_arg (stmt, 1);
15506 lhs = gimple_call_lhs (stmt);
15507 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15508 gimple_set_location (g, gimple_location (stmt));
15509 gsi_replace (gsi, g, true);
15510 return true;
15511 /* Flavors of vec_and. */
15512 case ALTIVEC_BUILTIN_VAND:
15513 arg0 = gimple_call_arg (stmt, 0);
15514 arg1 = gimple_call_arg (stmt, 1);
15515 lhs = gimple_call_lhs (stmt);
15516 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15517 gimple_set_location (g, gimple_location (stmt));
15518 gsi_replace (gsi, g, true);
15519 return true;
15520 /* Flavors of vec_andc. */
15521 case ALTIVEC_BUILTIN_VANDC:
15522 arg0 = gimple_call_arg (stmt, 0);
15523 arg1 = gimple_call_arg (stmt, 1);
15524 lhs = gimple_call_lhs (stmt);
15525 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15526 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15527 gimple_set_location (g, gimple_location (stmt));
15528 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15529 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15530 gimple_set_location (g, gimple_location (stmt));
15531 gsi_replace (gsi, g, true);
15532 return true;
15533 /* Flavors of vec_nand. */
15534 case P8V_BUILTIN_VEC_NAND:
15535 case P8V_BUILTIN_NAND_V16QI:
15536 case P8V_BUILTIN_NAND_V8HI:
15537 case P8V_BUILTIN_NAND_V4SI:
15538 case P8V_BUILTIN_NAND_V4SF:
15539 case P8V_BUILTIN_NAND_V2DF:
15540 case P8V_BUILTIN_NAND_V2DI:
15541 arg0 = gimple_call_arg (stmt, 0);
15542 arg1 = gimple_call_arg (stmt, 1);
15543 lhs = gimple_call_lhs (stmt);
15544 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15545 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15546 gimple_set_location (g, gimple_location (stmt));
15547 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15548 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15549 gimple_set_location (g, gimple_location (stmt));
15550 gsi_replace (gsi, g, true);
15551 return true;
15552 /* Flavors of vec_or. */
15553 case ALTIVEC_BUILTIN_VOR:
15554 arg0 = gimple_call_arg (stmt, 0);
15555 arg1 = gimple_call_arg (stmt, 1);
15556 lhs = gimple_call_lhs (stmt);
15557 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15558 gimple_set_location (g, gimple_location (stmt));
15559 gsi_replace (gsi, g, true);
15560 return true;
15561 /* flavors of vec_orc. */
15562 case P8V_BUILTIN_ORC_V16QI:
15563 case P8V_BUILTIN_ORC_V8HI:
15564 case P8V_BUILTIN_ORC_V4SI:
15565 case P8V_BUILTIN_ORC_V4SF:
15566 case P8V_BUILTIN_ORC_V2DF:
15567 case P8V_BUILTIN_ORC_V2DI:
15568 arg0 = gimple_call_arg (stmt, 0);
15569 arg1 = gimple_call_arg (stmt, 1);
15570 lhs = gimple_call_lhs (stmt);
15571 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15572 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15573 gimple_set_location (g, gimple_location (stmt));
15574 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15575 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15576 gimple_set_location (g, gimple_location (stmt));
15577 gsi_replace (gsi, g, true);
15578 return true;
15579 /* Flavors of vec_xor. */
15580 case ALTIVEC_BUILTIN_VXOR:
15581 arg0 = gimple_call_arg (stmt, 0);
15582 arg1 = gimple_call_arg (stmt, 1);
15583 lhs = gimple_call_lhs (stmt);
15584 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15585 gimple_set_location (g, gimple_location (stmt));
15586 gsi_replace (gsi, g, true);
15587 return true;
15588 /* Flavors of vec_nor. */
15589 case ALTIVEC_BUILTIN_VNOR:
15590 arg0 = gimple_call_arg (stmt, 0);
15591 arg1 = gimple_call_arg (stmt, 1);
15592 lhs = gimple_call_lhs (stmt);
15593 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15594 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15595 gimple_set_location (g, gimple_location (stmt));
15596 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15597 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15598 gimple_set_location (g, gimple_location (stmt));
15599 gsi_replace (gsi, g, true);
15600 return true;
15601 /* flavors of vec_abs. */
15602 case ALTIVEC_BUILTIN_ABS_V16QI:
15603 case ALTIVEC_BUILTIN_ABS_V8HI:
15604 case ALTIVEC_BUILTIN_ABS_V4SI:
15605 case ALTIVEC_BUILTIN_ABS_V4SF:
15606 case P8V_BUILTIN_ABS_V2DI:
15607 case VSX_BUILTIN_XVABSDP:
15608 arg0 = gimple_call_arg (stmt, 0);
15609 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15610 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15611 return false;
15612 lhs = gimple_call_lhs (stmt);
15613 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15614 gimple_set_location (g, gimple_location (stmt));
15615 gsi_replace (gsi, g, true);
15616 return true;
15617 /* flavors of vec_min. */
15618 case VSX_BUILTIN_XVMINDP:
15619 case P8V_BUILTIN_VMINSD:
15620 case P8V_BUILTIN_VMINUD:
15621 case ALTIVEC_BUILTIN_VMINSB:
15622 case ALTIVEC_BUILTIN_VMINSH:
15623 case ALTIVEC_BUILTIN_VMINSW:
15624 case ALTIVEC_BUILTIN_VMINUB:
15625 case ALTIVEC_BUILTIN_VMINUH:
15626 case ALTIVEC_BUILTIN_VMINUW:
15627 case ALTIVEC_BUILTIN_VMINFP:
15628 arg0 = gimple_call_arg (stmt, 0);
15629 arg1 = gimple_call_arg (stmt, 1);
15630 lhs = gimple_call_lhs (stmt);
15631 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15632 gimple_set_location (g, gimple_location (stmt));
15633 gsi_replace (gsi, g, true);
15634 return true;
15635 /* flavors of vec_max. */
15636 case VSX_BUILTIN_XVMAXDP:
15637 case P8V_BUILTIN_VMAXSD:
15638 case P8V_BUILTIN_VMAXUD:
15639 case ALTIVEC_BUILTIN_VMAXSB:
15640 case ALTIVEC_BUILTIN_VMAXSH:
15641 case ALTIVEC_BUILTIN_VMAXSW:
15642 case ALTIVEC_BUILTIN_VMAXUB:
15643 case ALTIVEC_BUILTIN_VMAXUH:
15644 case ALTIVEC_BUILTIN_VMAXUW:
15645 case ALTIVEC_BUILTIN_VMAXFP:
15646 arg0 = gimple_call_arg (stmt, 0);
15647 arg1 = gimple_call_arg (stmt, 1);
15648 lhs = gimple_call_lhs (stmt);
15649 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15650 gimple_set_location (g, gimple_location (stmt));
15651 gsi_replace (gsi, g, true);
15652 return true;
15653 /* Flavors of vec_eqv. */
15654 case P8V_BUILTIN_EQV_V16QI:
15655 case P8V_BUILTIN_EQV_V8HI:
15656 case P8V_BUILTIN_EQV_V4SI:
15657 case P8V_BUILTIN_EQV_V4SF:
15658 case P8V_BUILTIN_EQV_V2DF:
15659 case P8V_BUILTIN_EQV_V2DI:
15660 arg0 = gimple_call_arg (stmt, 0);
15661 arg1 = gimple_call_arg (stmt, 1);
15662 lhs = gimple_call_lhs (stmt);
15663 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15664 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15665 gimple_set_location (g, gimple_location (stmt));
15666 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15667 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15668 gimple_set_location (g, gimple_location (stmt));
15669 gsi_replace (gsi, g, true);
15670 return true;
15671 /* Flavors of vec_rotate_left. */
15672 case ALTIVEC_BUILTIN_VRLB:
15673 case ALTIVEC_BUILTIN_VRLH:
15674 case ALTIVEC_BUILTIN_VRLW:
15675 case P8V_BUILTIN_VRLD:
15676 arg0 = gimple_call_arg (stmt, 0);
15677 arg1 = gimple_call_arg (stmt, 1);
15678 lhs = gimple_call_lhs (stmt);
15679 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15680 gimple_set_location (g, gimple_location (stmt));
15681 gsi_replace (gsi, g, true);
15682 return true;
15683 /* Flavors of vector shift right algebraic.
15684 vec_sra{b,h,w} -> vsra{b,h,w}. */
15685 case ALTIVEC_BUILTIN_VSRAB:
15686 case ALTIVEC_BUILTIN_VSRAH:
15687 case ALTIVEC_BUILTIN_VSRAW:
15688 case P8V_BUILTIN_VSRAD:
15689 arg0 = gimple_call_arg (stmt, 0);
15690 arg1 = gimple_call_arg (stmt, 1);
15691 lhs = gimple_call_lhs (stmt);
15692 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
15693 gimple_set_location (g, gimple_location (stmt));
15694 gsi_replace (gsi, g, true);
15695 return true;
15696 /* Flavors of vector shift left.
15697 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15698 case ALTIVEC_BUILTIN_VSLB:
15699 case ALTIVEC_BUILTIN_VSLH:
15700 case ALTIVEC_BUILTIN_VSLW:
15701 case P8V_BUILTIN_VSLD:
15702 {
15703 location_t loc;
15704 gimple_seq stmts = NULL;
15705 arg0 = gimple_call_arg (stmt, 0);
15706 tree arg0_type = TREE_TYPE (arg0);
15707 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15708 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15709 return false;
15710 arg1 = gimple_call_arg (stmt, 1);
15711 tree arg1_type = TREE_TYPE (arg1);
15712 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15713 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15714 loc = gimple_location (stmt);
15715 lhs = gimple_call_lhs (stmt);
15716 /* Force arg1 into the range valid matching the arg0 type. */
15717 /* Build a vector consisting of the max valid bit-size values. */
15718 int n_elts = VECTOR_CST_NELTS (arg1);
15719 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15720 * BITS_PER_UNIT;
15721 tree element_size = build_int_cst (unsigned_element_type,
15722 tree_size_in_bits / n_elts);
15723 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15724 for (int i = 0; i < n_elts; i++)
15725 elts.safe_push (element_size);
15726 tree modulo_tree = elts.build ();
15727 /* Modulo the provided shift value against that vector. */
15728 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15729 unsigned_arg1_type, arg1);
15730 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15731 unsigned_arg1_type, unsigned_arg1,
15732 modulo_tree);
15733 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15734 /* And finally, do the shift. */
15735 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15736 gimple_set_location (g, gimple_location (stmt));
15737 gsi_replace (gsi, g, true);
15738 return true;
15739 }
15740 /* Flavors of vector shift right. */
15741 case ALTIVEC_BUILTIN_VSRB:
15742 case ALTIVEC_BUILTIN_VSRH:
15743 case ALTIVEC_BUILTIN_VSRW:
15744 case P8V_BUILTIN_VSRD:
15745 {
15746 arg0 = gimple_call_arg (stmt, 0);
15747 arg1 = gimple_call_arg (stmt, 1);
15748 lhs = gimple_call_lhs (stmt);
15749 gimple_seq stmts = NULL;
15750 /* Convert arg0 to unsigned. */
15751 tree arg0_unsigned
15752 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15753 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15754 tree res
15755 = gimple_build (&stmts, RSHIFT_EXPR,
15756 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
15757 /* Convert result back to the lhs type. */
15758 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15759 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15760 update_call_from_tree (gsi, res);
15761 return true;
15762 }
15763 /* Vector loads. */
15764 case ALTIVEC_BUILTIN_LVX_V16QI:
15765 case ALTIVEC_BUILTIN_LVX_V8HI:
15766 case ALTIVEC_BUILTIN_LVX_V4SI:
15767 case ALTIVEC_BUILTIN_LVX_V4SF:
15768 case ALTIVEC_BUILTIN_LVX_V2DI:
15769 case ALTIVEC_BUILTIN_LVX_V2DF:
15770 case ALTIVEC_BUILTIN_LVX_V1TI:
15771 {
15772 arg0 = gimple_call_arg (stmt, 0); // offset
15773 arg1 = gimple_call_arg (stmt, 1); // address
15774 lhs = gimple_call_lhs (stmt);
15775 location_t loc = gimple_location (stmt);
15776 /* Since arg1 may be cast to a different type, just use ptr_type_node
15777 here instead of trying to enforce TBAA on pointer types. */
15778 tree arg1_type = ptr_type_node;
15779 tree lhs_type = TREE_TYPE (lhs);
15780 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15781 the tree using the value from arg0. The resulting type will match
15782 the type of arg1. */
15783 gimple_seq stmts = NULL;
15784 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15785 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15786 arg1_type, arg1, temp_offset);
15787 /* Mask off any lower bits from the address. */
15788 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15789 arg1_type, temp_addr,
15790 build_int_cst (arg1_type, -16));
15791 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15792 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15793 take an offset, but since we've already incorporated the offset
15794 above, here we just pass in a zero. */
15795 gimple *g
15796 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15797 build_int_cst (arg1_type, 0)));
15798 gimple_set_location (g, loc);
15799 gsi_replace (gsi, g, true);
15800 return true;
15801 }
15802 /* Vector stores. */
15803 case ALTIVEC_BUILTIN_STVX_V16QI:
15804 case ALTIVEC_BUILTIN_STVX_V8HI:
15805 case ALTIVEC_BUILTIN_STVX_V4SI:
15806 case ALTIVEC_BUILTIN_STVX_V4SF:
15807 case ALTIVEC_BUILTIN_STVX_V2DI:
15808 case ALTIVEC_BUILTIN_STVX_V2DF:
15809 {
15810 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15811 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15812 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15813 location_t loc = gimple_location (stmt);
15814 tree arg0_type = TREE_TYPE (arg0);
15815 /* Use ptr_type_node (no TBAA) for the arg2_type.
15816 FIXME: (Richard) "A proper fix would be to transition this type as
15817 seen from the frontend to GIMPLE, for example in a similar way we
15818 do for MEM_REFs by piggy-backing that on an extra argument, a
15819 constant zero pointer of the alias pointer type to use (which would
15820 also serve as a type indicator of the store itself). I'd use a
15821 target specific internal function for this (not sure if we can have
15822 those target specific, but I guess if it's folded away then that's
15823 fine) and get away with the overload set." */
15824 tree arg2_type = ptr_type_node;
15825 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15826 the tree using the value from arg0. The resulting type will match
15827 the type of arg2. */
15828 gimple_seq stmts = NULL;
15829 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15830 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15831 arg2_type, arg2, temp_offset);
15832 /* Mask off any lower bits from the address. */
15833 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15834 arg2_type, temp_addr,
15835 build_int_cst (arg2_type, -16));
15836 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15837 /* The desired gimple result should be similar to:
15838 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15839 gimple *g
15840 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15841 build_int_cst (arg2_type, 0)), arg0);
15842 gimple_set_location (g, loc);
15843 gsi_replace (gsi, g, true);
15844 return true;
15845 }
15846
15847 /* unaligned Vector loads. */
15848 case VSX_BUILTIN_LXVW4X_V16QI:
15849 case VSX_BUILTIN_LXVW4X_V8HI:
15850 case VSX_BUILTIN_LXVW4X_V4SF:
15851 case VSX_BUILTIN_LXVW4X_V4SI:
15852 case VSX_BUILTIN_LXVD2X_V2DF:
15853 case VSX_BUILTIN_LXVD2X_V2DI:
15854 {
15855 arg0 = gimple_call_arg (stmt, 0); // offset
15856 arg1 = gimple_call_arg (stmt, 1); // address
15857 lhs = gimple_call_lhs (stmt);
15858 location_t loc = gimple_location (stmt);
15859 /* Since arg1 may be cast to a different type, just use ptr_type_node
15860 here instead of trying to enforce TBAA on pointer types. */
15861 tree arg1_type = ptr_type_node;
15862 tree lhs_type = TREE_TYPE (lhs);
15863 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15864 required alignment (power) is 4 bytes regardless of data type. */
15865 tree align_ltype = build_aligned_type (lhs_type, 4);
15866 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15867 the tree using the value from arg0. The resulting type will match
15868 the type of arg1. */
15869 gimple_seq stmts = NULL;
15870 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15871 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15872 arg1_type, arg1, temp_offset);
15873 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15874 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15875 take an offset, but since we've already incorporated the offset
15876 above, here we just pass in a zero. */
15877 gimple *g;
15878 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15879 build_int_cst (arg1_type, 0)));
15880 gimple_set_location (g, loc);
15881 gsi_replace (gsi, g, true);
15882 return true;
15883 }
15884
15885 /* unaligned Vector stores. */
15886 case VSX_BUILTIN_STXVW4X_V16QI:
15887 case VSX_BUILTIN_STXVW4X_V8HI:
15888 case VSX_BUILTIN_STXVW4X_V4SF:
15889 case VSX_BUILTIN_STXVW4X_V4SI:
15890 case VSX_BUILTIN_STXVD2X_V2DF:
15891 case VSX_BUILTIN_STXVD2X_V2DI:
15892 {
15893 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15894 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15895 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15896 location_t loc = gimple_location (stmt);
15897 tree arg0_type = TREE_TYPE (arg0);
15898 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15899 tree arg2_type = ptr_type_node;
15900 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15901 required alignment (power) is 4 bytes regardless of data type. */
15902 tree align_stype = build_aligned_type (arg0_type, 4);
15903 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15904 the tree using the value from arg1. */
15905 gimple_seq stmts = NULL;
15906 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15907 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15908 arg2_type, arg2, temp_offset);
15909 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15910 gimple *g;
15911 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15912 build_int_cst (arg2_type, 0)), arg0);
15913 gimple_set_location (g, loc);
15914 gsi_replace (gsi, g, true);
15915 return true;
15916 }
15917
15918 /* Vector Fused multiply-add (fma). */
15919 case ALTIVEC_BUILTIN_VMADDFP:
15920 case VSX_BUILTIN_XVMADDDP:
15921 case ALTIVEC_BUILTIN_VMLADDUHM:
15922 {
15923 arg0 = gimple_call_arg (stmt, 0);
15924 arg1 = gimple_call_arg (stmt, 1);
15925 tree arg2 = gimple_call_arg (stmt, 2);
15926 lhs = gimple_call_lhs (stmt);
15927 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15928 gimple_call_set_lhs (g, lhs);
15929 gimple_call_set_nothrow (g, true);
15930 gimple_set_location (g, gimple_location (stmt));
15931 gsi_replace (gsi, g, true);
15932 return true;
15933 }
15934
15935 /* Vector compares; EQ, NE, GE, GT, LE. */
15936 case ALTIVEC_BUILTIN_VCMPEQUB:
15937 case ALTIVEC_BUILTIN_VCMPEQUH:
15938 case ALTIVEC_BUILTIN_VCMPEQUW:
15939 case P8V_BUILTIN_VCMPEQUD:
15940 fold_compare_helper (gsi, EQ_EXPR, stmt);
15941 return true;
15942
15943 case P9V_BUILTIN_CMPNEB:
15944 case P9V_BUILTIN_CMPNEH:
15945 case P9V_BUILTIN_CMPNEW:
15946 fold_compare_helper (gsi, NE_EXPR, stmt);
15947 return true;
15948
15949 case VSX_BUILTIN_CMPGE_16QI:
15950 case VSX_BUILTIN_CMPGE_U16QI:
15951 case VSX_BUILTIN_CMPGE_8HI:
15952 case VSX_BUILTIN_CMPGE_U8HI:
15953 case VSX_BUILTIN_CMPGE_4SI:
15954 case VSX_BUILTIN_CMPGE_U4SI:
15955 case VSX_BUILTIN_CMPGE_2DI:
15956 case VSX_BUILTIN_CMPGE_U2DI:
15957 fold_compare_helper (gsi, GE_EXPR, stmt);
15958 return true;
15959
15960 case ALTIVEC_BUILTIN_VCMPGTSB:
15961 case ALTIVEC_BUILTIN_VCMPGTUB:
15962 case ALTIVEC_BUILTIN_VCMPGTSH:
15963 case ALTIVEC_BUILTIN_VCMPGTUH:
15964 case ALTIVEC_BUILTIN_VCMPGTSW:
15965 case ALTIVEC_BUILTIN_VCMPGTUW:
15966 case P8V_BUILTIN_VCMPGTUD:
15967 case P8V_BUILTIN_VCMPGTSD:
15968 fold_compare_helper (gsi, GT_EXPR, stmt);
15969 return true;
15970
15971 case VSX_BUILTIN_CMPLE_16QI:
15972 case VSX_BUILTIN_CMPLE_U16QI:
15973 case VSX_BUILTIN_CMPLE_8HI:
15974 case VSX_BUILTIN_CMPLE_U8HI:
15975 case VSX_BUILTIN_CMPLE_4SI:
15976 case VSX_BUILTIN_CMPLE_U4SI:
15977 case VSX_BUILTIN_CMPLE_2DI:
15978 case VSX_BUILTIN_CMPLE_U2DI:
15979 fold_compare_helper (gsi, LE_EXPR, stmt);
15980 return true;
15981
15982 /* flavors of vec_splat_[us]{8,16,32}. */
15983 case ALTIVEC_BUILTIN_VSPLTISB:
15984 case ALTIVEC_BUILTIN_VSPLTISH:
15985 case ALTIVEC_BUILTIN_VSPLTISW:
15986 {
15987 int size;
15988 if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
15989 size = 8;
15990 else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
15991 size = 16;
15992 else
15993 size = 32;
15994
15995 arg0 = gimple_call_arg (stmt, 0);
15996 lhs = gimple_call_lhs (stmt);
15997
15998 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15999 5-bit signed constant in range -16 to +15. */
16000 if (TREE_CODE (arg0) != INTEGER_CST
16001 || !IN_RANGE (sext_hwi (TREE_INT_CST_LOW (arg0), size),
16002 -16, 15))
16003 return false;
16004 gimple_seq stmts = NULL;
16005 location_t loc = gimple_location (stmt);
16006 tree splat_value = gimple_convert (&stmts, loc,
16007 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16008 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16009 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16010 g = gimple_build_assign (lhs, splat_tree);
16011 gimple_set_location (g, gimple_location (stmt));
16012 gsi_replace (gsi, g, true);
16013 return true;
16014 }
16015
16016 /* Flavors of vec_splat. */
16017 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
16018 case ALTIVEC_BUILTIN_VSPLTB:
16019 case ALTIVEC_BUILTIN_VSPLTH:
16020 case ALTIVEC_BUILTIN_VSPLTW:
16021 case VSX_BUILTIN_XXSPLTD_V2DI:
16022 case VSX_BUILTIN_XXSPLTD_V2DF:
16023 {
16024 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
16025 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
16026 /* Only fold the vec_splat_*() if arg1 is both a constant value and
16027 is a valid index into the arg0 vector. */
16028 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
16029 if (TREE_CODE (arg1) != INTEGER_CST
16030 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
16031 return false;
16032 lhs = gimple_call_lhs (stmt);
16033 tree lhs_type = TREE_TYPE (lhs);
16034 tree arg0_type = TREE_TYPE (arg0);
16035 tree splat;
16036 if (TREE_CODE (arg0) == VECTOR_CST)
16037 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
16038 else
16039 {
16040 /* Determine (in bits) the length and start location of the
16041 splat value for a call to the tree_vec_extract helper. */
16042 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
16043 * BITS_PER_UNIT / n_elts;
16044 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16045 tree len = build_int_cst (bitsizetype, splat_elem_size);
16046 tree start = build_int_cst (bitsizetype, splat_start_bit);
16047 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16048 len, start);
16049 }
16050 /* And finally, build the new vector. */
16051 tree splat_tree = build_vector_from_val (lhs_type, splat);
16052 g = gimple_build_assign (lhs, splat_tree);
16053 gimple_set_location (g, gimple_location (stmt));
16054 gsi_replace (gsi, g, true);
16055 return true;
16056 }
16057
16058 /* vec_mergel (integrals). */
16059 case ALTIVEC_BUILTIN_VMRGLH:
16060 case ALTIVEC_BUILTIN_VMRGLW:
16061 case VSX_BUILTIN_XXMRGLW_4SI:
16062 case ALTIVEC_BUILTIN_VMRGLB:
16063 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16064 case VSX_BUILTIN_XXMRGLW_4SF:
16065 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16066 fold_mergehl_helper (gsi, stmt, 1);
16067 return true;
16068 /* vec_mergeh (integrals). */
16069 case ALTIVEC_BUILTIN_VMRGHH:
16070 case ALTIVEC_BUILTIN_VMRGHW:
16071 case VSX_BUILTIN_XXMRGHW_4SI:
16072 case ALTIVEC_BUILTIN_VMRGHB:
16073 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16074 case VSX_BUILTIN_XXMRGHW_4SF:
16075 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16076 fold_mergehl_helper (gsi, stmt, 0);
16077 return true;
16078
16079 /* Flavors of vec_mergee. */
16080 case P8V_BUILTIN_VMRGEW_V4SI:
16081 case P8V_BUILTIN_VMRGEW_V2DI:
16082 case P8V_BUILTIN_VMRGEW_V4SF:
16083 case P8V_BUILTIN_VMRGEW_V2DF:
16084 fold_mergeeo_helper (gsi, stmt, 0);
16085 return true;
16086 /* Flavors of vec_mergeo. */
16087 case P8V_BUILTIN_VMRGOW_V4SI:
16088 case P8V_BUILTIN_VMRGOW_V2DI:
16089 case P8V_BUILTIN_VMRGOW_V4SF:
16090 case P8V_BUILTIN_VMRGOW_V2DF:
16091 fold_mergeeo_helper (gsi, stmt, 1);
16092 return true;
16093
16094 /* d = vec_pack (a, b) */
16095 case P8V_BUILTIN_VPKUDUM:
16096 case ALTIVEC_BUILTIN_VPKUHUM:
16097 case ALTIVEC_BUILTIN_VPKUWUM:
16098 {
16099 arg0 = gimple_call_arg (stmt, 0);
16100 arg1 = gimple_call_arg (stmt, 1);
16101 lhs = gimple_call_lhs (stmt);
16102 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16103 gimple_set_location (g, gimple_location (stmt));
16104 gsi_replace (gsi, g, true);
16105 return true;
16106 }
16107
16108 /* d = vec_unpackh (a) */
16109 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16110 in this code is sensitive to endian-ness, and needs to be inverted to
16111 handle both LE and BE targets. */
16112 case ALTIVEC_BUILTIN_VUPKHSB:
16113 case ALTIVEC_BUILTIN_VUPKHSH:
16114 case P8V_BUILTIN_VUPKHSW:
16115 {
16116 arg0 = gimple_call_arg (stmt, 0);
16117 lhs = gimple_call_lhs (stmt);
16118 if (BYTES_BIG_ENDIAN)
16119 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16120 else
16121 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16122 gimple_set_location (g, gimple_location (stmt));
16123 gsi_replace (gsi, g, true);
16124 return true;
16125 }
16126 /* d = vec_unpackl (a) */
16127 case ALTIVEC_BUILTIN_VUPKLSB:
16128 case ALTIVEC_BUILTIN_VUPKLSH:
16129 case P8V_BUILTIN_VUPKLSW:
16130 {
16131 arg0 = gimple_call_arg (stmt, 0);
16132 lhs = gimple_call_lhs (stmt);
16133 if (BYTES_BIG_ENDIAN)
16134 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16135 else
16136 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16137 gimple_set_location (g, gimple_location (stmt));
16138 gsi_replace (gsi, g, true);
16139 return true;
16140 }
16141 /* There is no gimple type corresponding with pixel, so just return. */
16142 case ALTIVEC_BUILTIN_VUPKHPX:
16143 case ALTIVEC_BUILTIN_VUPKLPX:
16144 return false;
16145
16146 /* vec_perm. */
16147 case ALTIVEC_BUILTIN_VPERM_16QI:
16148 case ALTIVEC_BUILTIN_VPERM_8HI:
16149 case ALTIVEC_BUILTIN_VPERM_4SI:
16150 case ALTIVEC_BUILTIN_VPERM_2DI:
16151 case ALTIVEC_BUILTIN_VPERM_4SF:
16152 case ALTIVEC_BUILTIN_VPERM_2DF:
16153 {
16154 arg0 = gimple_call_arg (stmt, 0);
16155 arg1 = gimple_call_arg (stmt, 1);
16156 tree permute = gimple_call_arg (stmt, 2);
16157 lhs = gimple_call_lhs (stmt);
16158 location_t loc = gimple_location (stmt);
16159 gimple_seq stmts = NULL;
16160 // convert arg0 and arg1 to match the type of the permute
16161 // for the VEC_PERM_EXPR operation.
16162 tree permute_type = (TREE_TYPE (permute));
16163 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16164 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16165 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16166 permute_type, arg0_ptype, arg1_ptype,
16167 permute);
16168 // Convert the result back to the desired lhs type upon completion.
16169 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16170 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16171 g = gimple_build_assign (lhs, temp);
16172 gimple_set_location (g, loc);
16173 gsi_replace (gsi, g, true);
16174 return true;
16175 }
16176
16177 default:
16178 if (TARGET_DEBUG_BUILTIN)
16179 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16180 fn_code, fn_name1, fn_name2);
16181 break;
16182 }
16183
16184 return false;
16185 }
16186
16187 /* Expand an expression EXP that calls a built-in function,
16188 with result going to TARGET if that's convenient
16189 (and in mode MODE if that's convenient).
16190 SUBTARGET may be used as the target for computing one of EXP's operands.
16191 IGNORE is nonzero if the value is to be ignored. */
16192
16193 static rtx
16194 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16195 machine_mode mode ATTRIBUTE_UNUSED,
16196 int ignore ATTRIBUTE_UNUSED)
16197 {
16198 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16199 enum rs6000_builtins fcode
16200 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16201 size_t uns_fcode = (size_t)fcode;
16202 const struct builtin_description *d;
16203 size_t i;
16204 rtx ret;
16205 bool success;
16206 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16207 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16208 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16209
16210 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16211 floating point type, depending on whether long double is the IBM extended
16212 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16213 we only define one variant of the built-in function, and switch the code
16214 when defining it, rather than defining two built-ins and using the
16215 overload table in rs6000-c.c to switch between the two. If we don't have
16216 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16217 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16218 if (FLOAT128_IEEE_P (TFmode))
16219 switch (icode)
16220 {
16221 default:
16222 break;
16223
16224 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16225 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16226 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16227 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16228 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16229 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16230 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16231 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16232 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16233 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16234 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16235 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16236 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16237 }
16238
16239 if (TARGET_DEBUG_BUILTIN)
16240 {
16241 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16242 const char *name2 = (icode != CODE_FOR_nothing)
16243 ? get_insn_name ((int) icode)
16244 : "nothing";
16245 const char *name3;
16246
16247 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16248 {
16249 default: name3 = "unknown"; break;
16250 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16251 case RS6000_BTC_UNARY: name3 = "unary"; break;
16252 case RS6000_BTC_BINARY: name3 = "binary"; break;
16253 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16254 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16255 case RS6000_BTC_ABS: name3 = "abs"; break;
16256 case RS6000_BTC_DST: name3 = "dst"; break;
16257 }
16258
16259
16260 fprintf (stderr,
16261 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16262 (name1) ? name1 : "---", fcode,
16263 (name2) ? name2 : "---", (int) icode,
16264 name3,
16265 func_valid_p ? "" : ", not valid");
16266 }
16267
16268 if (!func_valid_p)
16269 {
16270 rs6000_invalid_builtin (fcode);
16271
16272 /* Given it is invalid, just generate a normal call. */
16273 return expand_call (exp, target, ignore);
16274 }
16275
16276 switch (fcode)
16277 {
16278 case RS6000_BUILTIN_RECIP:
16279 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16280
16281 case RS6000_BUILTIN_RECIPF:
16282 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16283
16284 case RS6000_BUILTIN_RSQRTF:
16285 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16286
16287 case RS6000_BUILTIN_RSQRT:
16288 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16289
16290 case POWER7_BUILTIN_BPERMD:
16291 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16292 ? CODE_FOR_bpermd_di
16293 : CODE_FOR_bpermd_si), exp, target);
16294
16295 case RS6000_BUILTIN_GET_TB:
16296 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16297 target);
16298
16299 case RS6000_BUILTIN_MFTB:
16300 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16301 ? CODE_FOR_rs6000_mftb_di
16302 : CODE_FOR_rs6000_mftb_si),
16303 target);
16304
16305 case RS6000_BUILTIN_MFFS:
16306 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16307
16308 case RS6000_BUILTIN_MTFSB0:
16309 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16310
16311 case RS6000_BUILTIN_MTFSB1:
16312 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16313
16314 case RS6000_BUILTIN_SET_FPSCR_RN:
16315 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16316 exp);
16317
16318 case RS6000_BUILTIN_SET_FPSCR_DRN:
16319 return
16320 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16321 exp);
16322
16323 case RS6000_BUILTIN_MFFSL:
16324 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16325
16326 case RS6000_BUILTIN_MTFSF:
16327 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16328
16329 case RS6000_BUILTIN_CPU_INIT:
16330 case RS6000_BUILTIN_CPU_IS:
16331 case RS6000_BUILTIN_CPU_SUPPORTS:
16332 return cpu_expand_builtin (fcode, exp, target);
16333
16334 case MISC_BUILTIN_SPEC_BARRIER:
16335 {
16336 emit_insn (gen_speculation_barrier ());
16337 return NULL_RTX;
16338 }
16339
16340 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16341 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16342 {
16343 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16344 : (int) CODE_FOR_altivec_lvsl_direct);
16345 machine_mode tmode = insn_data[icode2].operand[0].mode;
16346 machine_mode mode = insn_data[icode2].operand[1].mode;
16347 tree arg;
16348 rtx op, addr, pat;
16349
16350 gcc_assert (TARGET_ALTIVEC);
16351
16352 arg = CALL_EXPR_ARG (exp, 0);
16353 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16354 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16355 addr = memory_address (mode, op);
16356 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16357 op = addr;
16358 else
16359 {
16360 /* For the load case need to negate the address. */
16361 op = gen_reg_rtx (GET_MODE (addr));
16362 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16363 }
16364 op = gen_rtx_MEM (mode, op);
16365
16366 if (target == 0
16367 || GET_MODE (target) != tmode
16368 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16369 target = gen_reg_rtx (tmode);
16370
16371 pat = GEN_FCN (icode2) (target, op);
16372 if (!pat)
16373 return 0;
16374 emit_insn (pat);
16375
16376 return target;
16377 }
16378
16379 case ALTIVEC_BUILTIN_VCFUX:
16380 case ALTIVEC_BUILTIN_VCFSX:
16381 case ALTIVEC_BUILTIN_VCTUXS:
16382 case ALTIVEC_BUILTIN_VCTSXS:
16383 /* FIXME: There's got to be a nicer way to handle this case than
16384 constructing a new CALL_EXPR. */
16385 if (call_expr_nargs (exp) == 1)
16386 {
16387 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16388 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16389 }
16390 break;
16391
16392 /* For the pack and unpack int128 routines, fix up the builtin so it
16393 uses the correct IBM128 type. */
16394 case MISC_BUILTIN_PACK_IF:
16395 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16396 {
16397 icode = CODE_FOR_packtf;
16398 fcode = MISC_BUILTIN_PACK_TF;
16399 uns_fcode = (size_t)fcode;
16400 }
16401 break;
16402
16403 case MISC_BUILTIN_UNPACK_IF:
16404 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16405 {
16406 icode = CODE_FOR_unpacktf;
16407 fcode = MISC_BUILTIN_UNPACK_TF;
16408 uns_fcode = (size_t)fcode;
16409 }
16410 break;
16411
16412 default:
16413 break;
16414 }
16415
16416 if (TARGET_ALTIVEC)
16417 {
16418 ret = altivec_expand_builtin (exp, target, &success);
16419
16420 if (success)
16421 return ret;
16422 }
16423 if (TARGET_HTM)
16424 {
16425 ret = htm_expand_builtin (exp, target, &success);
16426
16427 if (success)
16428 return ret;
16429 }
16430
16431 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16432 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16433 gcc_assert (attr == RS6000_BTC_UNARY
16434 || attr == RS6000_BTC_BINARY
16435 || attr == RS6000_BTC_TERNARY
16436 || attr == RS6000_BTC_SPECIAL);
16437
16438 /* Handle simple unary operations. */
16439 d = bdesc_1arg;
16440 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16441 if (d->code == fcode)
16442 return rs6000_expand_unop_builtin (icode, exp, target);
16443
16444 /* Handle simple binary operations. */
16445 d = bdesc_2arg;
16446 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16447 if (d->code == fcode)
16448 return rs6000_expand_binop_builtin (icode, exp, target);
16449
16450 /* Handle simple ternary operations. */
16451 d = bdesc_3arg;
16452 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16453 if (d->code == fcode)
16454 return rs6000_expand_ternop_builtin (icode, exp, target);
16455
16456 /* Handle simple no-argument operations. */
16457 d = bdesc_0arg;
16458 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16459 if (d->code == fcode)
16460 return rs6000_expand_zeroop_builtin (icode, target);
16461
16462 gcc_unreachable ();
16463 }
16464
16465 /* Create a builtin vector type with a name. Taking care not to give
16466 the canonical type a name. */
16467
16468 static tree
16469 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16470 {
16471 tree result = build_vector_type (elt_type, num_elts);
16472
16473 /* Copy so we don't give the canonical type a name. */
16474 result = build_variant_type_copy (result);
16475
16476 add_builtin_type (name, result);
16477
16478 return result;
16479 }
16480
16481 static void
16482 rs6000_init_builtins (void)
16483 {
16484 tree tdecl;
16485 tree ftype;
16486 machine_mode mode;
16487
16488 if (TARGET_DEBUG_BUILTIN)
16489 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16490 (TARGET_ALTIVEC) ? ", altivec" : "",
16491 (TARGET_VSX) ? ", vsx" : "");
16492
16493 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16494 : "__vector long long",
16495 intDI_type_node, 2);
16496 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16497 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16498 intSI_type_node, 4);
16499 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16500 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16501 intHI_type_node, 8);
16502 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16503 intQI_type_node, 16);
16504
16505 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16506 unsigned_intQI_type_node, 16);
16507 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16508 unsigned_intHI_type_node, 8);
16509 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16510 unsigned_intSI_type_node, 4);
16511 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16512 ? "__vector unsigned long"
16513 : "__vector unsigned long long",
16514 unsigned_intDI_type_node, 2);
16515
16516 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16517
16518 const_str_type_node
16519 = build_pointer_type (build_qualified_type (char_type_node,
16520 TYPE_QUAL_CONST));
16521
16522 /* We use V1TI mode as a special container to hold __int128_t items that
16523 must live in VSX registers. */
16524 if (intTI_type_node)
16525 {
16526 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16527 intTI_type_node, 1);
16528 unsigned_V1TI_type_node
16529 = rs6000_vector_type ("__vector unsigned __int128",
16530 unsigned_intTI_type_node, 1);
16531 }
16532
16533 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16534 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16535 'vector unsigned short'. */
16536
16537 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16538 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16539 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16540 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16541 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16542
16543 long_integer_type_internal_node = long_integer_type_node;
16544 long_unsigned_type_internal_node = long_unsigned_type_node;
16545 long_long_integer_type_internal_node = long_long_integer_type_node;
16546 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16547 intQI_type_internal_node = intQI_type_node;
16548 uintQI_type_internal_node = unsigned_intQI_type_node;
16549 intHI_type_internal_node = intHI_type_node;
16550 uintHI_type_internal_node = unsigned_intHI_type_node;
16551 intSI_type_internal_node = intSI_type_node;
16552 uintSI_type_internal_node = unsigned_intSI_type_node;
16553 intDI_type_internal_node = intDI_type_node;
16554 uintDI_type_internal_node = unsigned_intDI_type_node;
16555 intTI_type_internal_node = intTI_type_node;
16556 uintTI_type_internal_node = unsigned_intTI_type_node;
16557 float_type_internal_node = float_type_node;
16558 double_type_internal_node = double_type_node;
16559 long_double_type_internal_node = long_double_type_node;
16560 dfloat64_type_internal_node = dfloat64_type_node;
16561 dfloat128_type_internal_node = dfloat128_type_node;
16562 void_type_internal_node = void_type_node;
16563
16564 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16565 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16566 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16567 format that uses a pair of doubles, depending on the switches and
16568 defaults.
16569
16570 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16571 floating point, we need make sure the type is non-zero or else self-test
16572 fails during bootstrap.
16573
16574 Always create __ibm128 as a separate type, even if the current long double
16575 format is IBM extended double.
16576
16577 For IEEE 128-bit floating point, always create the type __ieee128. If the
16578 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16579 __ieee128. */
16580 if (TARGET_FLOAT128_TYPE)
16581 {
16582 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16583 ibm128_float_type_node = long_double_type_node;
16584 else
16585 {
16586 ibm128_float_type_node = make_node (REAL_TYPE);
16587 TYPE_PRECISION (ibm128_float_type_node) = 128;
16588 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16589 layout_type (ibm128_float_type_node);
16590 }
16591
16592 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16593 "__ibm128");
16594
16595 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16596 ieee128_float_type_node = long_double_type_node;
16597 else
16598 ieee128_float_type_node = float128_type_node;
16599
16600 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16601 "__ieee128");
16602 }
16603
16604 else
16605 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16606
16607 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16608 tree type node. */
16609 builtin_mode_to_type[QImode][0] = integer_type_node;
16610 builtin_mode_to_type[HImode][0] = integer_type_node;
16611 builtin_mode_to_type[SImode][0] = intSI_type_node;
16612 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16613 builtin_mode_to_type[DImode][0] = intDI_type_node;
16614 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16615 builtin_mode_to_type[TImode][0] = intTI_type_node;
16616 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16617 builtin_mode_to_type[SFmode][0] = float_type_node;
16618 builtin_mode_to_type[DFmode][0] = double_type_node;
16619 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16620 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16621 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16622 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16623 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16624 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16625 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16626 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16627 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16628 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16629 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16630 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16631 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16632 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16633 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16634 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16635 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16636
16637 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16638 TYPE_NAME (bool_char_type_node) = tdecl;
16639
16640 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16641 TYPE_NAME (bool_short_type_node) = tdecl;
16642
16643 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16644 TYPE_NAME (bool_int_type_node) = tdecl;
16645
16646 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16647 TYPE_NAME (pixel_type_node) = tdecl;
16648
16649 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16650 bool_char_type_node, 16);
16651 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16652 bool_short_type_node, 8);
16653 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16654 bool_int_type_node, 4);
16655 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16656 ? "__vector __bool long"
16657 : "__vector __bool long long",
16658 bool_long_long_type_node, 2);
16659 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16660 pixel_type_node, 8);
16661
16662 /* Create Altivec and VSX builtins on machines with at least the
16663 general purpose extensions (970 and newer) to allow the use of
16664 the target attribute. */
16665 if (TARGET_EXTRA_BUILTINS)
16666 altivec_init_builtins ();
16667 if (TARGET_HTM)
16668 htm_init_builtins ();
16669
16670 if (TARGET_EXTRA_BUILTINS)
16671 rs6000_common_init_builtins ();
16672
16673 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16674 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16675 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16676
16677 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16678 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16679 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16680
16681 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16682 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16683 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16684
16685 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16686 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16687 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16688
16689 mode = (TARGET_64BIT) ? DImode : SImode;
16690 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16691 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16692 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16693
16694 ftype = build_function_type_list (unsigned_intDI_type_node,
16695 NULL_TREE);
16696 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16697
16698 if (TARGET_64BIT)
16699 ftype = build_function_type_list (unsigned_intDI_type_node,
16700 NULL_TREE);
16701 else
16702 ftype = build_function_type_list (unsigned_intSI_type_node,
16703 NULL_TREE);
16704 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16705
16706 ftype = build_function_type_list (double_type_node, NULL_TREE);
16707 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16708
16709 ftype = build_function_type_list (double_type_node, NULL_TREE);
16710 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16711
16712 ftype = build_function_type_list (void_type_node,
16713 intSI_type_node,
16714 NULL_TREE);
16715 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16716
16717 ftype = build_function_type_list (void_type_node,
16718 intSI_type_node,
16719 NULL_TREE);
16720 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16721
16722 ftype = build_function_type_list (void_type_node,
16723 intDI_type_node,
16724 NULL_TREE);
16725 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16726
16727 ftype = build_function_type_list (void_type_node,
16728 intDI_type_node,
16729 NULL_TREE);
16730 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16731
16732 ftype = build_function_type_list (void_type_node,
16733 intSI_type_node, double_type_node,
16734 NULL_TREE);
16735 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16736
16737 ftype = build_function_type_list (void_type_node, NULL_TREE);
16738 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16739 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16740 MISC_BUILTIN_SPEC_BARRIER);
16741
16742 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16743 NULL_TREE);
16744 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16745 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16746
16747 /* AIX libm provides clog as __clog. */
16748 if (TARGET_XCOFF &&
16749 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16750 set_user_assembler_name (tdecl, "__clog");
16751
16752 #ifdef SUBTARGET_INIT_BUILTINS
16753 SUBTARGET_INIT_BUILTINS;
16754 #endif
16755 }
16756
16757 /* Returns the rs6000 builtin decl for CODE. */
16758
16759 static tree
16760 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16761 {
16762 HOST_WIDE_INT fnmask;
16763
16764 if (code >= RS6000_BUILTIN_COUNT)
16765 return error_mark_node;
16766
16767 fnmask = rs6000_builtin_info[code].mask;
16768 if ((fnmask & rs6000_builtin_mask) != fnmask)
16769 {
16770 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16771 return error_mark_node;
16772 }
16773
16774 return rs6000_builtin_decls[code];
16775 }
16776
16777 static void
16778 altivec_init_builtins (void)
16779 {
16780 const struct builtin_description *d;
16781 size_t i;
16782 tree ftype;
16783 tree decl;
16784 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16785
16786 tree pvoid_type_node = build_pointer_type (void_type_node);
16787
16788 tree pcvoid_type_node
16789 = build_pointer_type (build_qualified_type (void_type_node,
16790 TYPE_QUAL_CONST));
16791
16792 tree int_ftype_opaque
16793 = build_function_type_list (integer_type_node,
16794 opaque_V4SI_type_node, NULL_TREE);
16795 tree opaque_ftype_opaque
16796 = build_function_type_list (integer_type_node, NULL_TREE);
16797 tree opaque_ftype_opaque_int
16798 = build_function_type_list (opaque_V4SI_type_node,
16799 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16800 tree opaque_ftype_opaque_opaque_int
16801 = build_function_type_list (opaque_V4SI_type_node,
16802 opaque_V4SI_type_node, opaque_V4SI_type_node,
16803 integer_type_node, NULL_TREE);
16804 tree opaque_ftype_opaque_opaque_opaque
16805 = build_function_type_list (opaque_V4SI_type_node,
16806 opaque_V4SI_type_node, opaque_V4SI_type_node,
16807 opaque_V4SI_type_node, NULL_TREE);
16808 tree opaque_ftype_opaque_opaque
16809 = build_function_type_list (opaque_V4SI_type_node,
16810 opaque_V4SI_type_node, opaque_V4SI_type_node,
16811 NULL_TREE);
16812 tree int_ftype_int_opaque_opaque
16813 = build_function_type_list (integer_type_node,
16814 integer_type_node, opaque_V4SI_type_node,
16815 opaque_V4SI_type_node, NULL_TREE);
16816 tree int_ftype_int_v4si_v4si
16817 = build_function_type_list (integer_type_node,
16818 integer_type_node, V4SI_type_node,
16819 V4SI_type_node, NULL_TREE);
16820 tree int_ftype_int_v2di_v2di
16821 = build_function_type_list (integer_type_node,
16822 integer_type_node, V2DI_type_node,
16823 V2DI_type_node, NULL_TREE);
16824 tree void_ftype_v4si
16825 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16826 tree v8hi_ftype_void
16827 = build_function_type_list (V8HI_type_node, NULL_TREE);
16828 tree void_ftype_void
16829 = build_function_type_list (void_type_node, NULL_TREE);
16830 tree void_ftype_int
16831 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16832
16833 tree opaque_ftype_long_pcvoid
16834 = build_function_type_list (opaque_V4SI_type_node,
16835 long_integer_type_node, pcvoid_type_node,
16836 NULL_TREE);
16837 tree v16qi_ftype_long_pcvoid
16838 = build_function_type_list (V16QI_type_node,
16839 long_integer_type_node, pcvoid_type_node,
16840 NULL_TREE);
16841 tree v8hi_ftype_long_pcvoid
16842 = build_function_type_list (V8HI_type_node,
16843 long_integer_type_node, pcvoid_type_node,
16844 NULL_TREE);
16845 tree v4si_ftype_long_pcvoid
16846 = build_function_type_list (V4SI_type_node,
16847 long_integer_type_node, pcvoid_type_node,
16848 NULL_TREE);
16849 tree v4sf_ftype_long_pcvoid
16850 = build_function_type_list (V4SF_type_node,
16851 long_integer_type_node, pcvoid_type_node,
16852 NULL_TREE);
16853 tree v2df_ftype_long_pcvoid
16854 = build_function_type_list (V2DF_type_node,
16855 long_integer_type_node, pcvoid_type_node,
16856 NULL_TREE);
16857 tree v2di_ftype_long_pcvoid
16858 = build_function_type_list (V2DI_type_node,
16859 long_integer_type_node, pcvoid_type_node,
16860 NULL_TREE);
16861 tree v1ti_ftype_long_pcvoid
16862 = build_function_type_list (V1TI_type_node,
16863 long_integer_type_node, pcvoid_type_node,
16864 NULL_TREE);
16865
16866 tree void_ftype_opaque_long_pvoid
16867 = build_function_type_list (void_type_node,
16868 opaque_V4SI_type_node, long_integer_type_node,
16869 pvoid_type_node, NULL_TREE);
16870 tree void_ftype_v4si_long_pvoid
16871 = build_function_type_list (void_type_node,
16872 V4SI_type_node, long_integer_type_node,
16873 pvoid_type_node, NULL_TREE);
16874 tree void_ftype_v16qi_long_pvoid
16875 = build_function_type_list (void_type_node,
16876 V16QI_type_node, long_integer_type_node,
16877 pvoid_type_node, NULL_TREE);
16878
16879 tree void_ftype_v16qi_pvoid_long
16880 = build_function_type_list (void_type_node,
16881 V16QI_type_node, pvoid_type_node,
16882 long_integer_type_node, NULL_TREE);
16883
16884 tree void_ftype_v8hi_long_pvoid
16885 = build_function_type_list (void_type_node,
16886 V8HI_type_node, long_integer_type_node,
16887 pvoid_type_node, NULL_TREE);
16888 tree void_ftype_v4sf_long_pvoid
16889 = build_function_type_list (void_type_node,
16890 V4SF_type_node, long_integer_type_node,
16891 pvoid_type_node, NULL_TREE);
16892 tree void_ftype_v2df_long_pvoid
16893 = build_function_type_list (void_type_node,
16894 V2DF_type_node, long_integer_type_node,
16895 pvoid_type_node, NULL_TREE);
16896 tree void_ftype_v1ti_long_pvoid
16897 = build_function_type_list (void_type_node,
16898 V1TI_type_node, long_integer_type_node,
16899 pvoid_type_node, NULL_TREE);
16900 tree void_ftype_v2di_long_pvoid
16901 = build_function_type_list (void_type_node,
16902 V2DI_type_node, long_integer_type_node,
16903 pvoid_type_node, NULL_TREE);
16904 tree int_ftype_int_v8hi_v8hi
16905 = build_function_type_list (integer_type_node,
16906 integer_type_node, V8HI_type_node,
16907 V8HI_type_node, NULL_TREE);
16908 tree int_ftype_int_v16qi_v16qi
16909 = build_function_type_list (integer_type_node,
16910 integer_type_node, V16QI_type_node,
16911 V16QI_type_node, NULL_TREE);
16912 tree int_ftype_int_v4sf_v4sf
16913 = build_function_type_list (integer_type_node,
16914 integer_type_node, V4SF_type_node,
16915 V4SF_type_node, NULL_TREE);
16916 tree int_ftype_int_v2df_v2df
16917 = build_function_type_list (integer_type_node,
16918 integer_type_node, V2DF_type_node,
16919 V2DF_type_node, NULL_TREE);
16920 tree v2di_ftype_v2di
16921 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16922 tree v4si_ftype_v4si
16923 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16924 tree v8hi_ftype_v8hi
16925 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16926 tree v16qi_ftype_v16qi
16927 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16928 tree v4sf_ftype_v4sf
16929 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16930 tree v2df_ftype_v2df
16931 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16932 tree void_ftype_pcvoid_int_int
16933 = build_function_type_list (void_type_node,
16934 pcvoid_type_node, integer_type_node,
16935 integer_type_node, NULL_TREE);
16936
16937 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16938 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16939 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16940 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16941 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16942 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16943 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16944 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16945 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16946 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16947 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16948 ALTIVEC_BUILTIN_LVXL_V2DF);
16949 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16950 ALTIVEC_BUILTIN_LVXL_V2DI);
16951 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16952 ALTIVEC_BUILTIN_LVXL_V4SF);
16953 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16954 ALTIVEC_BUILTIN_LVXL_V4SI);
16955 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16956 ALTIVEC_BUILTIN_LVXL_V8HI);
16957 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16958 ALTIVEC_BUILTIN_LVXL_V16QI);
16959 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16960 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16961 ALTIVEC_BUILTIN_LVX_V1TI);
16962 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16963 ALTIVEC_BUILTIN_LVX_V2DF);
16964 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16965 ALTIVEC_BUILTIN_LVX_V2DI);
16966 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16967 ALTIVEC_BUILTIN_LVX_V4SF);
16968 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16969 ALTIVEC_BUILTIN_LVX_V4SI);
16970 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16971 ALTIVEC_BUILTIN_LVX_V8HI);
16972 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16973 ALTIVEC_BUILTIN_LVX_V16QI);
16974 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16975 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16976 ALTIVEC_BUILTIN_STVX_V2DF);
16977 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16978 ALTIVEC_BUILTIN_STVX_V2DI);
16979 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16980 ALTIVEC_BUILTIN_STVX_V4SF);
16981 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16982 ALTIVEC_BUILTIN_STVX_V4SI);
16983 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16984 ALTIVEC_BUILTIN_STVX_V8HI);
16985 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16986 ALTIVEC_BUILTIN_STVX_V16QI);
16987 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16988 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16989 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16990 ALTIVEC_BUILTIN_STVXL_V2DF);
16991 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16992 ALTIVEC_BUILTIN_STVXL_V2DI);
16993 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16994 ALTIVEC_BUILTIN_STVXL_V4SF);
16995 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16996 ALTIVEC_BUILTIN_STVXL_V4SI);
16997 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16998 ALTIVEC_BUILTIN_STVXL_V8HI);
16999 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17000 ALTIVEC_BUILTIN_STVXL_V16QI);
17001 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17002 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17003 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17004 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17005 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17006 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17007 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17008 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17009 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17010 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17011 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17012 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17013 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17014 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17015 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17016 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17017
17018 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17019 VSX_BUILTIN_LXVD2X_V2DF);
17020 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17021 VSX_BUILTIN_LXVD2X_V2DI);
17022 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17023 VSX_BUILTIN_LXVW4X_V4SF);
17024 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17025 VSX_BUILTIN_LXVW4X_V4SI);
17026 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17027 VSX_BUILTIN_LXVW4X_V8HI);
17028 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17029 VSX_BUILTIN_LXVW4X_V16QI);
17030 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17031 VSX_BUILTIN_STXVD2X_V2DF);
17032 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17033 VSX_BUILTIN_STXVD2X_V2DI);
17034 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17035 VSX_BUILTIN_STXVW4X_V4SF);
17036 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17037 VSX_BUILTIN_STXVW4X_V4SI);
17038 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17039 VSX_BUILTIN_STXVW4X_V8HI);
17040 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17041 VSX_BUILTIN_STXVW4X_V16QI);
17042
17043 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17044 VSX_BUILTIN_LD_ELEMREV_V2DF);
17045 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17046 VSX_BUILTIN_LD_ELEMREV_V2DI);
17047 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17048 VSX_BUILTIN_LD_ELEMREV_V4SF);
17049 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17050 VSX_BUILTIN_LD_ELEMREV_V4SI);
17051 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17052 VSX_BUILTIN_LD_ELEMREV_V8HI);
17053 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17054 VSX_BUILTIN_LD_ELEMREV_V16QI);
17055 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17056 VSX_BUILTIN_ST_ELEMREV_V2DF);
17057 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17058 VSX_BUILTIN_ST_ELEMREV_V1TI);
17059 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17060 VSX_BUILTIN_ST_ELEMREV_V2DI);
17061 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17062 VSX_BUILTIN_ST_ELEMREV_V4SF);
17063 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17064 VSX_BUILTIN_ST_ELEMREV_V4SI);
17065 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17066 VSX_BUILTIN_ST_ELEMREV_V8HI);
17067 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17068 VSX_BUILTIN_ST_ELEMREV_V16QI);
17069
17070 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17071 VSX_BUILTIN_VEC_LD);
17072 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17073 VSX_BUILTIN_VEC_ST);
17074 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17075 VSX_BUILTIN_VEC_XL);
17076 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17077 VSX_BUILTIN_VEC_XL_BE);
17078 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17079 VSX_BUILTIN_VEC_XST);
17080 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17081 VSX_BUILTIN_VEC_XST_BE);
17082
17083 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17084 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17085 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17086
17087 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17088 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17089 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17090 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17091 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17092 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17093 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17094 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17095 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17096 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17097 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17098 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17099
17100 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17101 ALTIVEC_BUILTIN_VEC_ADDE);
17102 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17103 ALTIVEC_BUILTIN_VEC_ADDEC);
17104 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17105 ALTIVEC_BUILTIN_VEC_CMPNE);
17106 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17107 ALTIVEC_BUILTIN_VEC_MUL);
17108 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17109 ALTIVEC_BUILTIN_VEC_SUBE);
17110 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17111 ALTIVEC_BUILTIN_VEC_SUBEC);
17112
17113 /* Cell builtins. */
17114 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17115 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17116 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17117 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17118
17119 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17120 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17121 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17122 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17123
17124 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17125 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17126 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17127 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17128
17129 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17130 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17131 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17132 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17133
17134 if (TARGET_P9_VECTOR)
17135 {
17136 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17137 P9V_BUILTIN_STXVL);
17138 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17139 P9V_BUILTIN_XST_LEN_R);
17140 }
17141
17142 /* Add the DST variants. */
17143 d = bdesc_dst;
17144 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17145 {
17146 HOST_WIDE_INT mask = d->mask;
17147
17148 /* It is expected that these dst built-in functions may have
17149 d->icode equal to CODE_FOR_nothing. */
17150 if ((mask & builtin_mask) != mask)
17151 {
17152 if (TARGET_DEBUG_BUILTIN)
17153 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17154 d->name);
17155 continue;
17156 }
17157 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17158 }
17159
17160 /* Initialize the predicates. */
17161 d = bdesc_altivec_preds;
17162 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17163 {
17164 machine_mode mode1;
17165 tree type;
17166 HOST_WIDE_INT mask = d->mask;
17167
17168 if ((mask & builtin_mask) != mask)
17169 {
17170 if (TARGET_DEBUG_BUILTIN)
17171 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17172 d->name);
17173 continue;
17174 }
17175
17176 if (rs6000_overloaded_builtin_p (d->code))
17177 mode1 = VOIDmode;
17178 else
17179 {
17180 /* Cannot define builtin if the instruction is disabled. */
17181 gcc_assert (d->icode != CODE_FOR_nothing);
17182 mode1 = insn_data[d->icode].operand[1].mode;
17183 }
17184
17185 switch (mode1)
17186 {
17187 case E_VOIDmode:
17188 type = int_ftype_int_opaque_opaque;
17189 break;
17190 case E_V2DImode:
17191 type = int_ftype_int_v2di_v2di;
17192 break;
17193 case E_V4SImode:
17194 type = int_ftype_int_v4si_v4si;
17195 break;
17196 case E_V8HImode:
17197 type = int_ftype_int_v8hi_v8hi;
17198 break;
17199 case E_V16QImode:
17200 type = int_ftype_int_v16qi_v16qi;
17201 break;
17202 case E_V4SFmode:
17203 type = int_ftype_int_v4sf_v4sf;
17204 break;
17205 case E_V2DFmode:
17206 type = int_ftype_int_v2df_v2df;
17207 break;
17208 default:
17209 gcc_unreachable ();
17210 }
17211
17212 def_builtin (d->name, type, d->code);
17213 }
17214
17215 /* Initialize the abs* operators. */
17216 d = bdesc_abs;
17217 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17218 {
17219 machine_mode mode0;
17220 tree type;
17221 HOST_WIDE_INT mask = d->mask;
17222
17223 if ((mask & builtin_mask) != mask)
17224 {
17225 if (TARGET_DEBUG_BUILTIN)
17226 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17227 d->name);
17228 continue;
17229 }
17230
17231 /* Cannot define builtin if the instruction is disabled. */
17232 gcc_assert (d->icode != CODE_FOR_nothing);
17233 mode0 = insn_data[d->icode].operand[0].mode;
17234
17235 switch (mode0)
17236 {
17237 case E_V2DImode:
17238 type = v2di_ftype_v2di;
17239 break;
17240 case E_V4SImode:
17241 type = v4si_ftype_v4si;
17242 break;
17243 case E_V8HImode:
17244 type = v8hi_ftype_v8hi;
17245 break;
17246 case E_V16QImode:
17247 type = v16qi_ftype_v16qi;
17248 break;
17249 case E_V4SFmode:
17250 type = v4sf_ftype_v4sf;
17251 break;
17252 case E_V2DFmode:
17253 type = v2df_ftype_v2df;
17254 break;
17255 default:
17256 gcc_unreachable ();
17257 }
17258
17259 def_builtin (d->name, type, d->code);
17260 }
17261
17262 /* Initialize target builtin that implements
17263 targetm.vectorize.builtin_mask_for_load. */
17264
17265 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17266 v16qi_ftype_long_pcvoid,
17267 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17268 BUILT_IN_MD, NULL, NULL_TREE);
17269 TREE_READONLY (decl) = 1;
17270 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17271 altivec_builtin_mask_for_load = decl;
17272
17273 /* Access to the vec_init patterns. */
17274 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17275 integer_type_node, integer_type_node,
17276 integer_type_node, NULL_TREE);
17277 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17278
17279 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17280 short_integer_type_node,
17281 short_integer_type_node,
17282 short_integer_type_node,
17283 short_integer_type_node,
17284 short_integer_type_node,
17285 short_integer_type_node,
17286 short_integer_type_node, NULL_TREE);
17287 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17288
17289 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17290 char_type_node, char_type_node,
17291 char_type_node, char_type_node,
17292 char_type_node, char_type_node,
17293 char_type_node, char_type_node,
17294 char_type_node, char_type_node,
17295 char_type_node, char_type_node,
17296 char_type_node, char_type_node,
17297 char_type_node, NULL_TREE);
17298 def_builtin ("__builtin_vec_init_v16qi", ftype,
17299 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17300
17301 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17302 float_type_node, float_type_node,
17303 float_type_node, NULL_TREE);
17304 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17305
17306 /* VSX builtins. */
17307 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17308 double_type_node, NULL_TREE);
17309 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17310
17311 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17312 intDI_type_node, NULL_TREE);
17313 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17314
17315 /* Access to the vec_set patterns. */
17316 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17317 intSI_type_node,
17318 integer_type_node, NULL_TREE);
17319 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17320
17321 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17322 intHI_type_node,
17323 integer_type_node, NULL_TREE);
17324 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17325
17326 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17327 intQI_type_node,
17328 integer_type_node, NULL_TREE);
17329 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17330
17331 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17332 float_type_node,
17333 integer_type_node, NULL_TREE);
17334 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17335
17336 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17337 double_type_node,
17338 integer_type_node, NULL_TREE);
17339 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17340
17341 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17342 intDI_type_node,
17343 integer_type_node, NULL_TREE);
17344 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17345
17346 /* Access to the vec_extract patterns. */
17347 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17348 integer_type_node, NULL_TREE);
17349 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17350
17351 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17352 integer_type_node, NULL_TREE);
17353 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17354
17355 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17356 integer_type_node, NULL_TREE);
17357 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17358
17359 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17360 integer_type_node, NULL_TREE);
17361 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17362
17363 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17364 integer_type_node, NULL_TREE);
17365 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17366
17367 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17368 integer_type_node, NULL_TREE);
17369 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17370
17371
17372 if (V1TI_type_node)
17373 {
17374 tree v1ti_ftype_long_pcvoid
17375 = build_function_type_list (V1TI_type_node,
17376 long_integer_type_node, pcvoid_type_node,
17377 NULL_TREE);
17378 tree void_ftype_v1ti_long_pvoid
17379 = build_function_type_list (void_type_node,
17380 V1TI_type_node, long_integer_type_node,
17381 pvoid_type_node, NULL_TREE);
17382 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17383 VSX_BUILTIN_LD_ELEMREV_V1TI);
17384 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17385 VSX_BUILTIN_LXVD2X_V1TI);
17386 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17387 VSX_BUILTIN_STXVD2X_V1TI);
17388 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17389 NULL_TREE, NULL_TREE);
17390 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17391 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17392 intTI_type_node,
17393 integer_type_node, NULL_TREE);
17394 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17395 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17396 integer_type_node, NULL_TREE);
17397 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17398 }
17399
17400 }
17401
17402 static void
17403 htm_init_builtins (void)
17404 {
17405 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17406 const struct builtin_description *d;
17407 size_t i;
17408
17409 d = bdesc_htm;
17410 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17411 {
17412 tree op[MAX_HTM_OPERANDS], type;
17413 HOST_WIDE_INT mask = d->mask;
17414 unsigned attr = rs6000_builtin_info[d->code].attr;
17415 bool void_func = (attr & RS6000_BTC_VOID);
17416 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17417 int nopnds = 0;
17418 tree gpr_type_node;
17419 tree rettype;
17420 tree argtype;
17421
17422 /* It is expected that these htm built-in functions may have
17423 d->icode equal to CODE_FOR_nothing. */
17424
17425 if (TARGET_32BIT && TARGET_POWERPC64)
17426 gpr_type_node = long_long_unsigned_type_node;
17427 else
17428 gpr_type_node = long_unsigned_type_node;
17429
17430 if (attr & RS6000_BTC_SPR)
17431 {
17432 rettype = gpr_type_node;
17433 argtype = gpr_type_node;
17434 }
17435 else if (d->code == HTM_BUILTIN_TABORTDC
17436 || d->code == HTM_BUILTIN_TABORTDCI)
17437 {
17438 rettype = unsigned_type_node;
17439 argtype = gpr_type_node;
17440 }
17441 else
17442 {
17443 rettype = unsigned_type_node;
17444 argtype = unsigned_type_node;
17445 }
17446
17447 if ((mask & builtin_mask) != mask)
17448 {
17449 if (TARGET_DEBUG_BUILTIN)
17450 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17451 continue;
17452 }
17453
17454 if (d->name == 0)
17455 {
17456 if (TARGET_DEBUG_BUILTIN)
17457 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17458 (long unsigned) i);
17459 continue;
17460 }
17461
17462 op[nopnds++] = (void_func) ? void_type_node : rettype;
17463
17464 if (attr_args == RS6000_BTC_UNARY)
17465 op[nopnds++] = argtype;
17466 else if (attr_args == RS6000_BTC_BINARY)
17467 {
17468 op[nopnds++] = argtype;
17469 op[nopnds++] = argtype;
17470 }
17471 else if (attr_args == RS6000_BTC_TERNARY)
17472 {
17473 op[nopnds++] = argtype;
17474 op[nopnds++] = argtype;
17475 op[nopnds++] = argtype;
17476 }
17477
17478 switch (nopnds)
17479 {
17480 case 1:
17481 type = build_function_type_list (op[0], NULL_TREE);
17482 break;
17483 case 2:
17484 type = build_function_type_list (op[0], op[1], NULL_TREE);
17485 break;
17486 case 3:
17487 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17488 break;
17489 case 4:
17490 type = build_function_type_list (op[0], op[1], op[2], op[3],
17491 NULL_TREE);
17492 break;
17493 default:
17494 gcc_unreachable ();
17495 }
17496
17497 def_builtin (d->name, type, d->code);
17498 }
17499 }
17500
17501 /* Hash function for builtin functions with up to 3 arguments and a return
17502 type. */
17503 hashval_t
17504 builtin_hasher::hash (builtin_hash_struct *bh)
17505 {
17506 unsigned ret = 0;
17507 int i;
17508
17509 for (i = 0; i < 4; i++)
17510 {
17511 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17512 ret = (ret * 2) + bh->uns_p[i];
17513 }
17514
17515 return ret;
17516 }
17517
17518 /* Compare builtin hash entries H1 and H2 for equivalence. */
17519 bool
17520 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17521 {
17522 return ((p1->mode[0] == p2->mode[0])
17523 && (p1->mode[1] == p2->mode[1])
17524 && (p1->mode[2] == p2->mode[2])
17525 && (p1->mode[3] == p2->mode[3])
17526 && (p1->uns_p[0] == p2->uns_p[0])
17527 && (p1->uns_p[1] == p2->uns_p[1])
17528 && (p1->uns_p[2] == p2->uns_p[2])
17529 && (p1->uns_p[3] == p2->uns_p[3]));
17530 }
17531
17532 /* Map types for builtin functions with an explicit return type and up to 3
17533 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17534 of the argument. */
17535 static tree
17536 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17537 machine_mode mode_arg1, machine_mode mode_arg2,
17538 enum rs6000_builtins builtin, const char *name)
17539 {
17540 struct builtin_hash_struct h;
17541 struct builtin_hash_struct *h2;
17542 int num_args = 3;
17543 int i;
17544 tree ret_type = NULL_TREE;
17545 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17546
17547 /* Create builtin_hash_table. */
17548 if (builtin_hash_table == NULL)
17549 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17550
17551 h.type = NULL_TREE;
17552 h.mode[0] = mode_ret;
17553 h.mode[1] = mode_arg0;
17554 h.mode[2] = mode_arg1;
17555 h.mode[3] = mode_arg2;
17556 h.uns_p[0] = 0;
17557 h.uns_p[1] = 0;
17558 h.uns_p[2] = 0;
17559 h.uns_p[3] = 0;
17560
17561 /* If the builtin is a type that produces unsigned results or takes unsigned
17562 arguments, and it is returned as a decl for the vectorizer (such as
17563 widening multiplies, permute), make sure the arguments and return value
17564 are type correct. */
17565 switch (builtin)
17566 {
17567 /* unsigned 1 argument functions. */
17568 case CRYPTO_BUILTIN_VSBOX:
17569 case P8V_BUILTIN_VGBBD:
17570 case MISC_BUILTIN_CDTBCD:
17571 case MISC_BUILTIN_CBCDTD:
17572 h.uns_p[0] = 1;
17573 h.uns_p[1] = 1;
17574 break;
17575
17576 /* unsigned 2 argument functions. */
17577 case ALTIVEC_BUILTIN_VMULEUB:
17578 case ALTIVEC_BUILTIN_VMULEUH:
17579 case P8V_BUILTIN_VMULEUW:
17580 case ALTIVEC_BUILTIN_VMULOUB:
17581 case ALTIVEC_BUILTIN_VMULOUH:
17582 case P8V_BUILTIN_VMULOUW:
17583 case CRYPTO_BUILTIN_VCIPHER:
17584 case CRYPTO_BUILTIN_VCIPHERLAST:
17585 case CRYPTO_BUILTIN_VNCIPHER:
17586 case CRYPTO_BUILTIN_VNCIPHERLAST:
17587 case CRYPTO_BUILTIN_VPMSUMB:
17588 case CRYPTO_BUILTIN_VPMSUMH:
17589 case CRYPTO_BUILTIN_VPMSUMW:
17590 case CRYPTO_BUILTIN_VPMSUMD:
17591 case CRYPTO_BUILTIN_VPMSUM:
17592 case MISC_BUILTIN_ADDG6S:
17593 case MISC_BUILTIN_DIVWEU:
17594 case MISC_BUILTIN_DIVDEU:
17595 case VSX_BUILTIN_UDIV_V2DI:
17596 case ALTIVEC_BUILTIN_VMAXUB:
17597 case ALTIVEC_BUILTIN_VMINUB:
17598 case ALTIVEC_BUILTIN_VMAXUH:
17599 case ALTIVEC_BUILTIN_VMINUH:
17600 case ALTIVEC_BUILTIN_VMAXUW:
17601 case ALTIVEC_BUILTIN_VMINUW:
17602 case P8V_BUILTIN_VMAXUD:
17603 case P8V_BUILTIN_VMINUD:
17604 h.uns_p[0] = 1;
17605 h.uns_p[1] = 1;
17606 h.uns_p[2] = 1;
17607 break;
17608
17609 /* unsigned 3 argument functions. */
17610 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17611 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17612 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17613 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17614 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17615 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17616 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17617 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17618 case VSX_BUILTIN_VPERM_16QI_UNS:
17619 case VSX_BUILTIN_VPERM_8HI_UNS:
17620 case VSX_BUILTIN_VPERM_4SI_UNS:
17621 case VSX_BUILTIN_VPERM_2DI_UNS:
17622 case VSX_BUILTIN_XXSEL_16QI_UNS:
17623 case VSX_BUILTIN_XXSEL_8HI_UNS:
17624 case VSX_BUILTIN_XXSEL_4SI_UNS:
17625 case VSX_BUILTIN_XXSEL_2DI_UNS:
17626 case CRYPTO_BUILTIN_VPERMXOR:
17627 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17628 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17629 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17630 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17631 case CRYPTO_BUILTIN_VSHASIGMAW:
17632 case CRYPTO_BUILTIN_VSHASIGMAD:
17633 case CRYPTO_BUILTIN_VSHASIGMA:
17634 h.uns_p[0] = 1;
17635 h.uns_p[1] = 1;
17636 h.uns_p[2] = 1;
17637 h.uns_p[3] = 1;
17638 break;
17639
17640 /* signed permute functions with unsigned char mask. */
17641 case ALTIVEC_BUILTIN_VPERM_16QI:
17642 case ALTIVEC_BUILTIN_VPERM_8HI:
17643 case ALTIVEC_BUILTIN_VPERM_4SI:
17644 case ALTIVEC_BUILTIN_VPERM_4SF:
17645 case ALTIVEC_BUILTIN_VPERM_2DI:
17646 case ALTIVEC_BUILTIN_VPERM_2DF:
17647 case VSX_BUILTIN_VPERM_16QI:
17648 case VSX_BUILTIN_VPERM_8HI:
17649 case VSX_BUILTIN_VPERM_4SI:
17650 case VSX_BUILTIN_VPERM_4SF:
17651 case VSX_BUILTIN_VPERM_2DI:
17652 case VSX_BUILTIN_VPERM_2DF:
17653 h.uns_p[3] = 1;
17654 break;
17655
17656 /* unsigned args, signed return. */
17657 case VSX_BUILTIN_XVCVUXDSP:
17658 case VSX_BUILTIN_XVCVUXDDP_UNS:
17659 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17660 h.uns_p[1] = 1;
17661 break;
17662
17663 /* signed args, unsigned return. */
17664 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17665 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17666 case MISC_BUILTIN_UNPACK_TD:
17667 case MISC_BUILTIN_UNPACK_V1TI:
17668 h.uns_p[0] = 1;
17669 break;
17670
17671 /* unsigned arguments, bool return (compares). */
17672 case ALTIVEC_BUILTIN_VCMPEQUB:
17673 case ALTIVEC_BUILTIN_VCMPEQUH:
17674 case ALTIVEC_BUILTIN_VCMPEQUW:
17675 case P8V_BUILTIN_VCMPEQUD:
17676 case VSX_BUILTIN_CMPGE_U16QI:
17677 case VSX_BUILTIN_CMPGE_U8HI:
17678 case VSX_BUILTIN_CMPGE_U4SI:
17679 case VSX_BUILTIN_CMPGE_U2DI:
17680 case ALTIVEC_BUILTIN_VCMPGTUB:
17681 case ALTIVEC_BUILTIN_VCMPGTUH:
17682 case ALTIVEC_BUILTIN_VCMPGTUW:
17683 case P8V_BUILTIN_VCMPGTUD:
17684 h.uns_p[1] = 1;
17685 h.uns_p[2] = 1;
17686 break;
17687
17688 /* unsigned arguments for 128-bit pack instructions. */
17689 case MISC_BUILTIN_PACK_TD:
17690 case MISC_BUILTIN_PACK_V1TI:
17691 h.uns_p[1] = 1;
17692 h.uns_p[2] = 1;
17693 break;
17694
17695 /* unsigned second arguments (vector shift right). */
17696 case ALTIVEC_BUILTIN_VSRB:
17697 case ALTIVEC_BUILTIN_VSRH:
17698 case ALTIVEC_BUILTIN_VSRW:
17699 case P8V_BUILTIN_VSRD:
17700 h.uns_p[2] = 1;
17701 break;
17702
17703 default:
17704 break;
17705 }
17706
17707 /* Figure out how many args are present. */
17708 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17709 num_args--;
17710
17711 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17712 if (!ret_type && h.uns_p[0])
17713 ret_type = builtin_mode_to_type[h.mode[0]][0];
17714
17715 if (!ret_type)
17716 fatal_error (input_location,
17717 "internal error: builtin function %qs had an unexpected "
17718 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17719
17720 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17721 arg_type[i] = NULL_TREE;
17722
17723 for (i = 0; i < num_args; i++)
17724 {
17725 int m = (int) h.mode[i+1];
17726 int uns_p = h.uns_p[i+1];
17727
17728 arg_type[i] = builtin_mode_to_type[m][uns_p];
17729 if (!arg_type[i] && uns_p)
17730 arg_type[i] = builtin_mode_to_type[m][0];
17731
17732 if (!arg_type[i])
17733 fatal_error (input_location,
17734 "internal error: builtin function %qs, argument %d "
17735 "had unexpected argument type %qs", name, i,
17736 GET_MODE_NAME (m));
17737 }
17738
17739 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17740 if (*found == NULL)
17741 {
17742 h2 = ggc_alloc<builtin_hash_struct> ();
17743 *h2 = h;
17744 *found = h2;
17745
17746 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17747 arg_type[2], NULL_TREE);
17748 }
17749
17750 return (*found)->type;
17751 }
17752
17753 static void
17754 rs6000_common_init_builtins (void)
17755 {
17756 const struct builtin_description *d;
17757 size_t i;
17758
17759 tree opaque_ftype_opaque = NULL_TREE;
17760 tree opaque_ftype_opaque_opaque = NULL_TREE;
17761 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17762 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17763
17764 /* Create Altivec and VSX builtins on machines with at least the
17765 general purpose extensions (970 and newer) to allow the use of
17766 the target attribute. */
17767
17768 if (TARGET_EXTRA_BUILTINS)
17769 builtin_mask |= RS6000_BTM_COMMON;
17770
17771 /* Add the ternary operators. */
17772 d = bdesc_3arg;
17773 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17774 {
17775 tree type;
17776 HOST_WIDE_INT mask = d->mask;
17777
17778 if ((mask & builtin_mask) != mask)
17779 {
17780 if (TARGET_DEBUG_BUILTIN)
17781 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17782 continue;
17783 }
17784
17785 if (rs6000_overloaded_builtin_p (d->code))
17786 {
17787 if (! (type = opaque_ftype_opaque_opaque_opaque))
17788 type = opaque_ftype_opaque_opaque_opaque
17789 = build_function_type_list (opaque_V4SI_type_node,
17790 opaque_V4SI_type_node,
17791 opaque_V4SI_type_node,
17792 opaque_V4SI_type_node,
17793 NULL_TREE);
17794 }
17795 else
17796 {
17797 enum insn_code icode = d->icode;
17798 if (d->name == 0)
17799 {
17800 if (TARGET_DEBUG_BUILTIN)
17801 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17802 (long unsigned)i);
17803
17804 continue;
17805 }
17806
17807 if (icode == CODE_FOR_nothing)
17808 {
17809 if (TARGET_DEBUG_BUILTIN)
17810 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17811 d->name);
17812
17813 continue;
17814 }
17815
17816 type = builtin_function_type (insn_data[icode].operand[0].mode,
17817 insn_data[icode].operand[1].mode,
17818 insn_data[icode].operand[2].mode,
17819 insn_data[icode].operand[3].mode,
17820 d->code, d->name);
17821 }
17822
17823 def_builtin (d->name, type, d->code);
17824 }
17825
17826 /* Add the binary operators. */
17827 d = bdesc_2arg;
17828 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17829 {
17830 machine_mode mode0, mode1, mode2;
17831 tree type;
17832 HOST_WIDE_INT mask = d->mask;
17833
17834 if ((mask & builtin_mask) != mask)
17835 {
17836 if (TARGET_DEBUG_BUILTIN)
17837 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17838 continue;
17839 }
17840
17841 if (rs6000_overloaded_builtin_p (d->code))
17842 {
17843 if (! (type = opaque_ftype_opaque_opaque))
17844 type = opaque_ftype_opaque_opaque
17845 = build_function_type_list (opaque_V4SI_type_node,
17846 opaque_V4SI_type_node,
17847 opaque_V4SI_type_node,
17848 NULL_TREE);
17849 }
17850 else
17851 {
17852 enum insn_code icode = d->icode;
17853 if (d->name == 0)
17854 {
17855 if (TARGET_DEBUG_BUILTIN)
17856 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17857 (long unsigned)i);
17858
17859 continue;
17860 }
17861
17862 if (icode == CODE_FOR_nothing)
17863 {
17864 if (TARGET_DEBUG_BUILTIN)
17865 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17866 d->name);
17867
17868 continue;
17869 }
17870
17871 mode0 = insn_data[icode].operand[0].mode;
17872 mode1 = insn_data[icode].operand[1].mode;
17873 mode2 = insn_data[icode].operand[2].mode;
17874
17875 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17876 d->code, d->name);
17877 }
17878
17879 def_builtin (d->name, type, d->code);
17880 }
17881
17882 /* Add the simple unary operators. */
17883 d = bdesc_1arg;
17884 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17885 {
17886 machine_mode mode0, mode1;
17887 tree type;
17888 HOST_WIDE_INT mask = d->mask;
17889
17890 if ((mask & builtin_mask) != mask)
17891 {
17892 if (TARGET_DEBUG_BUILTIN)
17893 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17894 continue;
17895 }
17896
17897 if (rs6000_overloaded_builtin_p (d->code))
17898 {
17899 if (! (type = opaque_ftype_opaque))
17900 type = opaque_ftype_opaque
17901 = build_function_type_list (opaque_V4SI_type_node,
17902 opaque_V4SI_type_node,
17903 NULL_TREE);
17904 }
17905 else
17906 {
17907 enum insn_code icode = d->icode;
17908 if (d->name == 0)
17909 {
17910 if (TARGET_DEBUG_BUILTIN)
17911 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17912 (long unsigned)i);
17913
17914 continue;
17915 }
17916
17917 if (icode == CODE_FOR_nothing)
17918 {
17919 if (TARGET_DEBUG_BUILTIN)
17920 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17921 d->name);
17922
17923 continue;
17924 }
17925
17926 mode0 = insn_data[icode].operand[0].mode;
17927 mode1 = insn_data[icode].operand[1].mode;
17928
17929 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17930 d->code, d->name);
17931 }
17932
17933 def_builtin (d->name, type, d->code);
17934 }
17935
17936 /* Add the simple no-argument operators. */
17937 d = bdesc_0arg;
17938 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17939 {
17940 machine_mode mode0;
17941 tree type;
17942 HOST_WIDE_INT mask = d->mask;
17943
17944 if ((mask & builtin_mask) != mask)
17945 {
17946 if (TARGET_DEBUG_BUILTIN)
17947 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17948 continue;
17949 }
17950 if (rs6000_overloaded_builtin_p (d->code))
17951 {
17952 if (!opaque_ftype_opaque)
17953 opaque_ftype_opaque
17954 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17955 type = opaque_ftype_opaque;
17956 }
17957 else
17958 {
17959 enum insn_code icode = d->icode;
17960 if (d->name == 0)
17961 {
17962 if (TARGET_DEBUG_BUILTIN)
17963 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17964 (long unsigned) i);
17965 continue;
17966 }
17967 if (icode == CODE_FOR_nothing)
17968 {
17969 if (TARGET_DEBUG_BUILTIN)
17970 fprintf (stderr,
17971 "rs6000_builtin, skip no-argument %s (no code)\n",
17972 d->name);
17973 continue;
17974 }
17975 mode0 = insn_data[icode].operand[0].mode;
17976 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17977 d->code, d->name);
17978 }
17979 def_builtin (d->name, type, d->code);
17980 }
17981 }
17982
17983 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17984 static void
17985 init_float128_ibm (machine_mode mode)
17986 {
17987 if (!TARGET_XL_COMPAT)
17988 {
17989 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17990 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17991 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17992 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17993
17994 if (!TARGET_HARD_FLOAT)
17995 {
17996 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17997 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17998 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17999 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18000 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18001 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18002 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18003 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18004
18005 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18006 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18007 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18008 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18009 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18010 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18011 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18012 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18013 }
18014 }
18015 else
18016 {
18017 set_optab_libfunc (add_optab, mode, "_xlqadd");
18018 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18019 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18020 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18021 }
18022
18023 /* Add various conversions for IFmode to use the traditional TFmode
18024 names. */
18025 if (mode == IFmode)
18026 {
18027 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
18028 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
18029 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
18030 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
18031 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
18032 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
18033
18034 if (TARGET_POWERPC64)
18035 {
18036 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18037 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18038 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18039 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18040 }
18041 }
18042 }
18043
18044 /* Create a decl for either complex long double multiply or complex long double
18045 divide when long double is IEEE 128-bit floating point. We can't use
18046 __multc3 and __divtc3 because the original long double using IBM extended
18047 double used those names. The complex multiply/divide functions are encoded
18048 as builtin functions with a complex result and 4 scalar inputs. */
18049
18050 static void
18051 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18052 {
18053 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18054 name, NULL_TREE);
18055
18056 set_builtin_decl (fncode, fndecl, true);
18057
18058 if (TARGET_DEBUG_BUILTIN)
18059 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18060
18061 return;
18062 }
18063
18064 /* Set up IEEE 128-bit floating point routines. Use different names if the
18065 arguments can be passed in a vector register. The historical PowerPC
18066 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18067 continue to use that if we aren't using vector registers to pass IEEE
18068 128-bit floating point. */
18069
18070 static void
18071 init_float128_ieee (machine_mode mode)
18072 {
18073 if (FLOAT128_VECTOR_P (mode))
18074 {
18075 static bool complex_muldiv_init_p = false;
18076
18077 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18078 we have clone or target attributes, this will be called a second
18079 time. We want to create the built-in function only once. */
18080 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18081 {
18082 complex_muldiv_init_p = true;
18083 built_in_function fncode_mul =
18084 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18085 - MIN_MODE_COMPLEX_FLOAT);
18086 built_in_function fncode_div =
18087 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18088 - MIN_MODE_COMPLEX_FLOAT);
18089
18090 tree fntype = build_function_type_list (complex_long_double_type_node,
18091 long_double_type_node,
18092 long_double_type_node,
18093 long_double_type_node,
18094 long_double_type_node,
18095 NULL_TREE);
18096
18097 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18098 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18099 }
18100
18101 set_optab_libfunc (add_optab, mode, "__addkf3");
18102 set_optab_libfunc (sub_optab, mode, "__subkf3");
18103 set_optab_libfunc (neg_optab, mode, "__negkf2");
18104 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18105 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18106 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18107 set_optab_libfunc (abs_optab, mode, "__abskf2");
18108 set_optab_libfunc (powi_optab, mode, "__powikf2");
18109
18110 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18111 set_optab_libfunc (ne_optab, mode, "__nekf2");
18112 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18113 set_optab_libfunc (ge_optab, mode, "__gekf2");
18114 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18115 set_optab_libfunc (le_optab, mode, "__lekf2");
18116 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18117
18118 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18119 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18120 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18121 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18122
18123 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18124 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18125 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18126
18127 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18128 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18129 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18130
18131 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18132 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18133 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18134 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18135 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18136 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18137
18138 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18139 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18140 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18141 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18142
18143 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18144 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18145 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18146 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18147
18148 if (TARGET_POWERPC64)
18149 {
18150 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18151 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18152 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18153 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18154 }
18155 }
18156
18157 else
18158 {
18159 set_optab_libfunc (add_optab, mode, "_q_add");
18160 set_optab_libfunc (sub_optab, mode, "_q_sub");
18161 set_optab_libfunc (neg_optab, mode, "_q_neg");
18162 set_optab_libfunc (smul_optab, mode, "_q_mul");
18163 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18164 if (TARGET_PPC_GPOPT)
18165 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18166
18167 set_optab_libfunc (eq_optab, mode, "_q_feq");
18168 set_optab_libfunc (ne_optab, mode, "_q_fne");
18169 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18170 set_optab_libfunc (ge_optab, mode, "_q_fge");
18171 set_optab_libfunc (lt_optab, mode, "_q_flt");
18172 set_optab_libfunc (le_optab, mode, "_q_fle");
18173
18174 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18175 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18176 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18177 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18178 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18179 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18180 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18181 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18182 }
18183 }
18184
18185 static void
18186 rs6000_init_libfuncs (void)
18187 {
18188 /* __float128 support. */
18189 if (TARGET_FLOAT128_TYPE)
18190 {
18191 init_float128_ibm (IFmode);
18192 init_float128_ieee (KFmode);
18193 }
18194
18195 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18196 if (TARGET_LONG_DOUBLE_128)
18197 {
18198 if (!TARGET_IEEEQUAD)
18199 init_float128_ibm (TFmode);
18200
18201 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18202 else
18203 init_float128_ieee (TFmode);
18204 }
18205 }
18206
18207 /* Emit a potentially record-form instruction, setting DST from SRC.
18208 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18209 signed comparison of DST with zero. If DOT is 1, the generated RTL
18210 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18211 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18212 a separate COMPARE. */
18213
18214 void
18215 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18216 {
18217 if (dot == 0)
18218 {
18219 emit_move_insn (dst, src);
18220 return;
18221 }
18222
18223 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18224 {
18225 emit_move_insn (dst, src);
18226 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18227 return;
18228 }
18229
18230 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18231 if (dot == 1)
18232 {
18233 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18234 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18235 }
18236 else
18237 {
18238 rtx set = gen_rtx_SET (dst, src);
18239 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18240 }
18241 }
18242
18243 \f
18244 /* A validation routine: say whether CODE, a condition code, and MODE
18245 match. The other alternatives either don't make sense or should
18246 never be generated. */
18247
18248 void
18249 validate_condition_mode (enum rtx_code code, machine_mode mode)
18250 {
18251 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18252 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18253 && GET_MODE_CLASS (mode) == MODE_CC);
18254
18255 /* These don't make sense. */
18256 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18257 || mode != CCUNSmode);
18258
18259 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18260 || mode == CCUNSmode);
18261
18262 gcc_assert (mode == CCFPmode
18263 || (code != ORDERED && code != UNORDERED
18264 && code != UNEQ && code != LTGT
18265 && code != UNGT && code != UNLT
18266 && code != UNGE && code != UNLE));
18267
18268 /* These should never be generated except for
18269 flag_finite_math_only. */
18270 gcc_assert (mode != CCFPmode
18271 || flag_finite_math_only
18272 || (code != LE && code != GE
18273 && code != UNEQ && code != LTGT
18274 && code != UNGT && code != UNLT));
18275
18276 /* These are invalid; the information is not there. */
18277 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18278 }
18279
18280 \f
18281 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18282 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18283 not zero, store there the bit offset (counted from the right) where
18284 the single stretch of 1 bits begins; and similarly for B, the bit
18285 offset where it ends. */
18286
18287 bool
18288 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18289 {
18290 unsigned HOST_WIDE_INT val = INTVAL (mask);
18291 unsigned HOST_WIDE_INT bit;
18292 int nb, ne;
18293 int n = GET_MODE_PRECISION (mode);
18294
18295 if (mode != DImode && mode != SImode)
18296 return false;
18297
18298 if (INTVAL (mask) >= 0)
18299 {
18300 bit = val & -val;
18301 ne = exact_log2 (bit);
18302 nb = exact_log2 (val + bit);
18303 }
18304 else if (val + 1 == 0)
18305 {
18306 nb = n;
18307 ne = 0;
18308 }
18309 else if (val & 1)
18310 {
18311 val = ~val;
18312 bit = val & -val;
18313 nb = exact_log2 (bit);
18314 ne = exact_log2 (val + bit);
18315 }
18316 else
18317 {
18318 bit = val & -val;
18319 ne = exact_log2 (bit);
18320 if (val + bit == 0)
18321 nb = n;
18322 else
18323 nb = 0;
18324 }
18325
18326 nb--;
18327
18328 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18329 return false;
18330
18331 if (b)
18332 *b = nb;
18333 if (e)
18334 *e = ne;
18335
18336 return true;
18337 }
18338
18339 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18340 or rldicr instruction, to implement an AND with it in mode MODE. */
18341
18342 bool
18343 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18344 {
18345 int nb, ne;
18346
18347 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18348 return false;
18349
18350 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18351 does not wrap. */
18352 if (mode == DImode)
18353 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18354
18355 /* For SImode, rlwinm can do everything. */
18356 if (mode == SImode)
18357 return (nb < 32 && ne < 32);
18358
18359 return false;
18360 }
18361
18362 /* Return the instruction template for an AND with mask in mode MODE, with
18363 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18364
18365 const char *
18366 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18367 {
18368 int nb, ne;
18369
18370 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18371 gcc_unreachable ();
18372
18373 if (mode == DImode && ne == 0)
18374 {
18375 operands[3] = GEN_INT (63 - nb);
18376 if (dot)
18377 return "rldicl. %0,%1,0,%3";
18378 return "rldicl %0,%1,0,%3";
18379 }
18380
18381 if (mode == DImode && nb == 63)
18382 {
18383 operands[3] = GEN_INT (63 - ne);
18384 if (dot)
18385 return "rldicr. %0,%1,0,%3";
18386 return "rldicr %0,%1,0,%3";
18387 }
18388
18389 if (nb < 32 && ne < 32)
18390 {
18391 operands[3] = GEN_INT (31 - nb);
18392 operands[4] = GEN_INT (31 - ne);
18393 if (dot)
18394 return "rlwinm. %0,%1,0,%3,%4";
18395 return "rlwinm %0,%1,0,%3,%4";
18396 }
18397
18398 gcc_unreachable ();
18399 }
18400
18401 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18402 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18403 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18404
18405 bool
18406 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18407 {
18408 int nb, ne;
18409
18410 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18411 return false;
18412
18413 int n = GET_MODE_PRECISION (mode);
18414 int sh = -1;
18415
18416 if (CONST_INT_P (XEXP (shift, 1)))
18417 {
18418 sh = INTVAL (XEXP (shift, 1));
18419 if (sh < 0 || sh >= n)
18420 return false;
18421 }
18422
18423 rtx_code code = GET_CODE (shift);
18424
18425 /* Convert any shift by 0 to a rotate, to simplify below code. */
18426 if (sh == 0)
18427 code = ROTATE;
18428
18429 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18430 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18431 code = ASHIFT;
18432 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18433 {
18434 code = LSHIFTRT;
18435 sh = n - sh;
18436 }
18437
18438 /* DImode rotates need rld*. */
18439 if (mode == DImode && code == ROTATE)
18440 return (nb == 63 || ne == 0 || ne == sh);
18441
18442 /* SImode rotates need rlw*. */
18443 if (mode == SImode && code == ROTATE)
18444 return (nb < 32 && ne < 32 && sh < 32);
18445
18446 /* Wrap-around masks are only okay for rotates. */
18447 if (ne > nb)
18448 return false;
18449
18450 /* Variable shifts are only okay for rotates. */
18451 if (sh < 0)
18452 return false;
18453
18454 /* Don't allow ASHIFT if the mask is wrong for that. */
18455 if (code == ASHIFT && ne < sh)
18456 return false;
18457
18458 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18459 if the mask is wrong for that. */
18460 if (nb < 32 && ne < 32 && sh < 32
18461 && !(code == LSHIFTRT && nb >= 32 - sh))
18462 return true;
18463
18464 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18465 if the mask is wrong for that. */
18466 if (code == LSHIFTRT)
18467 sh = 64 - sh;
18468 if (nb == 63 || ne == 0 || ne == sh)
18469 return !(code == LSHIFTRT && nb >= sh);
18470
18471 return false;
18472 }
18473
18474 /* Return the instruction template for a shift with mask in mode MODE, with
18475 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18476
18477 const char *
18478 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18479 {
18480 int nb, ne;
18481
18482 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18483 gcc_unreachable ();
18484
18485 if (mode == DImode && ne == 0)
18486 {
18487 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18488 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18489 operands[3] = GEN_INT (63 - nb);
18490 if (dot)
18491 return "rld%I2cl. %0,%1,%2,%3";
18492 return "rld%I2cl %0,%1,%2,%3";
18493 }
18494
18495 if (mode == DImode && nb == 63)
18496 {
18497 operands[3] = GEN_INT (63 - ne);
18498 if (dot)
18499 return "rld%I2cr. %0,%1,%2,%3";
18500 return "rld%I2cr %0,%1,%2,%3";
18501 }
18502
18503 if (mode == DImode
18504 && GET_CODE (operands[4]) != LSHIFTRT
18505 && CONST_INT_P (operands[2])
18506 && ne == INTVAL (operands[2]))
18507 {
18508 operands[3] = GEN_INT (63 - nb);
18509 if (dot)
18510 return "rld%I2c. %0,%1,%2,%3";
18511 return "rld%I2c %0,%1,%2,%3";
18512 }
18513
18514 if (nb < 32 && ne < 32)
18515 {
18516 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18517 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18518 operands[3] = GEN_INT (31 - nb);
18519 operands[4] = GEN_INT (31 - ne);
18520 /* This insn can also be a 64-bit rotate with mask that really makes
18521 it just a shift right (with mask); the %h below are to adjust for
18522 that situation (shift count is >= 32 in that case). */
18523 if (dot)
18524 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18525 return "rlw%I2nm %0,%1,%h2,%3,%4";
18526 }
18527
18528 gcc_unreachable ();
18529 }
18530
18531 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18532 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18533 ASHIFT, or LSHIFTRT) in mode MODE. */
18534
18535 bool
18536 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18537 {
18538 int nb, ne;
18539
18540 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18541 return false;
18542
18543 int n = GET_MODE_PRECISION (mode);
18544
18545 int sh = INTVAL (XEXP (shift, 1));
18546 if (sh < 0 || sh >= n)
18547 return false;
18548
18549 rtx_code code = GET_CODE (shift);
18550
18551 /* Convert any shift by 0 to a rotate, to simplify below code. */
18552 if (sh == 0)
18553 code = ROTATE;
18554
18555 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18556 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18557 code = ASHIFT;
18558 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18559 {
18560 code = LSHIFTRT;
18561 sh = n - sh;
18562 }
18563
18564 /* DImode rotates need rldimi. */
18565 if (mode == DImode && code == ROTATE)
18566 return (ne == sh);
18567
18568 /* SImode rotates need rlwimi. */
18569 if (mode == SImode && code == ROTATE)
18570 return (nb < 32 && ne < 32 && sh < 32);
18571
18572 /* Wrap-around masks are only okay for rotates. */
18573 if (ne > nb)
18574 return false;
18575
18576 /* Don't allow ASHIFT if the mask is wrong for that. */
18577 if (code == ASHIFT && ne < sh)
18578 return false;
18579
18580 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18581 if the mask is wrong for that. */
18582 if (nb < 32 && ne < 32 && sh < 32
18583 && !(code == LSHIFTRT && nb >= 32 - sh))
18584 return true;
18585
18586 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18587 if the mask is wrong for that. */
18588 if (code == LSHIFTRT)
18589 sh = 64 - sh;
18590 if (ne == sh)
18591 return !(code == LSHIFTRT && nb >= sh);
18592
18593 return false;
18594 }
18595
18596 /* Return the instruction template for an insert with mask in mode MODE, with
18597 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18598
18599 const char *
18600 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18601 {
18602 int nb, ne;
18603
18604 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18605 gcc_unreachable ();
18606
18607 /* Prefer rldimi because rlwimi is cracked. */
18608 if (TARGET_POWERPC64
18609 && (!dot || mode == DImode)
18610 && GET_CODE (operands[4]) != LSHIFTRT
18611 && ne == INTVAL (operands[2]))
18612 {
18613 operands[3] = GEN_INT (63 - nb);
18614 if (dot)
18615 return "rldimi. %0,%1,%2,%3";
18616 return "rldimi %0,%1,%2,%3";
18617 }
18618
18619 if (nb < 32 && ne < 32)
18620 {
18621 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18622 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18623 operands[3] = GEN_INT (31 - nb);
18624 operands[4] = GEN_INT (31 - ne);
18625 if (dot)
18626 return "rlwimi. %0,%1,%2,%3,%4";
18627 return "rlwimi %0,%1,%2,%3,%4";
18628 }
18629
18630 gcc_unreachable ();
18631 }
18632
18633 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18634 using two machine instructions. */
18635
18636 bool
18637 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18638 {
18639 /* There are two kinds of AND we can handle with two insns:
18640 1) those we can do with two rl* insn;
18641 2) ori[s];xori[s].
18642
18643 We do not handle that last case yet. */
18644
18645 /* If there is just one stretch of ones, we can do it. */
18646 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18647 return true;
18648
18649 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18650 one insn, we can do the whole thing with two. */
18651 unsigned HOST_WIDE_INT val = INTVAL (c);
18652 unsigned HOST_WIDE_INT bit1 = val & -val;
18653 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18654 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18655 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18656 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18657 }
18658
18659 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18660 If EXPAND is true, split rotate-and-mask instructions we generate to
18661 their constituent parts as well (this is used during expand); if DOT
18662 is 1, make the last insn a record-form instruction clobbering the
18663 destination GPR and setting the CC reg (from operands[3]); if 2, set
18664 that GPR as well as the CC reg. */
18665
18666 void
18667 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18668 {
18669 gcc_assert (!(expand && dot));
18670
18671 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18672
18673 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18674 shift right. This generates better code than doing the masks without
18675 shifts, or shifting first right and then left. */
18676 int nb, ne;
18677 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18678 {
18679 gcc_assert (mode == DImode);
18680
18681 int shift = 63 - nb;
18682 if (expand)
18683 {
18684 rtx tmp1 = gen_reg_rtx (DImode);
18685 rtx tmp2 = gen_reg_rtx (DImode);
18686 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18687 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18688 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18689 }
18690 else
18691 {
18692 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18693 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18694 emit_move_insn (operands[0], tmp);
18695 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18696 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18697 }
18698 return;
18699 }
18700
18701 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18702 that does the rest. */
18703 unsigned HOST_WIDE_INT bit1 = val & -val;
18704 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18705 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18706 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18707
18708 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18709 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18710
18711 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18712
18713 /* Two "no-rotate"-and-mask instructions, for SImode. */
18714 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18715 {
18716 gcc_assert (mode == SImode);
18717
18718 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18719 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18720 emit_move_insn (reg, tmp);
18721 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18722 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18723 return;
18724 }
18725
18726 gcc_assert (mode == DImode);
18727
18728 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18729 insns; we have to do the first in SImode, because it wraps. */
18730 if (mask2 <= 0xffffffff
18731 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18732 {
18733 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18734 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18735 GEN_INT (mask1));
18736 rtx reg_low = gen_lowpart (SImode, reg);
18737 emit_move_insn (reg_low, tmp);
18738 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18739 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18740 return;
18741 }
18742
18743 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18744 at the top end), rotate back and clear the other hole. */
18745 int right = exact_log2 (bit3);
18746 int left = 64 - right;
18747
18748 /* Rotate the mask too. */
18749 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18750
18751 if (expand)
18752 {
18753 rtx tmp1 = gen_reg_rtx (DImode);
18754 rtx tmp2 = gen_reg_rtx (DImode);
18755 rtx tmp3 = gen_reg_rtx (DImode);
18756 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18757 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18758 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18759 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18760 }
18761 else
18762 {
18763 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18764 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18765 emit_move_insn (operands[0], tmp);
18766 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18767 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18768 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18769 }
18770 }
18771 \f
18772 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18773 for lfq and stfq insns iff the registers are hard registers. */
18774
18775 int
18776 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18777 {
18778 /* We might have been passed a SUBREG. */
18779 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18780 return 0;
18781
18782 /* We might have been passed non floating point registers. */
18783 if (!FP_REGNO_P (REGNO (reg1))
18784 || !FP_REGNO_P (REGNO (reg2)))
18785 return 0;
18786
18787 return (REGNO (reg1) == REGNO (reg2) - 1);
18788 }
18789
18790 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18791 addr1 and addr2 must be in consecutive memory locations
18792 (addr2 == addr1 + 8). */
18793
18794 int
18795 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18796 {
18797 rtx addr1, addr2;
18798 unsigned int reg1, reg2;
18799 int offset1, offset2;
18800
18801 /* The mems cannot be volatile. */
18802 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18803 return 0;
18804
18805 addr1 = XEXP (mem1, 0);
18806 addr2 = XEXP (mem2, 0);
18807
18808 /* Extract an offset (if used) from the first addr. */
18809 if (GET_CODE (addr1) == PLUS)
18810 {
18811 /* If not a REG, return zero. */
18812 if (GET_CODE (XEXP (addr1, 0)) != REG)
18813 return 0;
18814 else
18815 {
18816 reg1 = REGNO (XEXP (addr1, 0));
18817 /* The offset must be constant! */
18818 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18819 return 0;
18820 offset1 = INTVAL (XEXP (addr1, 1));
18821 }
18822 }
18823 else if (GET_CODE (addr1) != REG)
18824 return 0;
18825 else
18826 {
18827 reg1 = REGNO (addr1);
18828 /* This was a simple (mem (reg)) expression. Offset is 0. */
18829 offset1 = 0;
18830 }
18831
18832 /* And now for the second addr. */
18833 if (GET_CODE (addr2) == PLUS)
18834 {
18835 /* If not a REG, return zero. */
18836 if (GET_CODE (XEXP (addr2, 0)) != REG)
18837 return 0;
18838 else
18839 {
18840 reg2 = REGNO (XEXP (addr2, 0));
18841 /* The offset must be constant. */
18842 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18843 return 0;
18844 offset2 = INTVAL (XEXP (addr2, 1));
18845 }
18846 }
18847 else if (GET_CODE (addr2) != REG)
18848 return 0;
18849 else
18850 {
18851 reg2 = REGNO (addr2);
18852 /* This was a simple (mem (reg)) expression. Offset is 0. */
18853 offset2 = 0;
18854 }
18855
18856 /* Both of these must have the same base register. */
18857 if (reg1 != reg2)
18858 return 0;
18859
18860 /* The offset for the second addr must be 8 more than the first addr. */
18861 if (offset2 != offset1 + 8)
18862 return 0;
18863
18864 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18865 instructions. */
18866 return 1;
18867 }
18868 \f
18869 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18870 need to use DDmode, in all other cases we can use the same mode. */
18871 static machine_mode
18872 rs6000_secondary_memory_needed_mode (machine_mode mode)
18873 {
18874 if (lra_in_progress && mode == SDmode)
18875 return DDmode;
18876 return mode;
18877 }
18878
18879 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18880 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18881 only work on the traditional altivec registers, note if an altivec register
18882 was chosen. */
18883
18884 static enum rs6000_reg_type
18885 register_to_reg_type (rtx reg, bool *is_altivec)
18886 {
18887 HOST_WIDE_INT regno;
18888 enum reg_class rclass;
18889
18890 if (GET_CODE (reg) == SUBREG)
18891 reg = SUBREG_REG (reg);
18892
18893 if (!REG_P (reg))
18894 return NO_REG_TYPE;
18895
18896 regno = REGNO (reg);
18897 if (regno >= FIRST_PSEUDO_REGISTER)
18898 {
18899 if (!lra_in_progress && !reload_completed)
18900 return PSEUDO_REG_TYPE;
18901
18902 regno = true_regnum (reg);
18903 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18904 return PSEUDO_REG_TYPE;
18905 }
18906
18907 gcc_assert (regno >= 0);
18908
18909 if (is_altivec && ALTIVEC_REGNO_P (regno))
18910 *is_altivec = true;
18911
18912 rclass = rs6000_regno_regclass[regno];
18913 return reg_class_to_reg_type[(int)rclass];
18914 }
18915
18916 /* Helper function to return the cost of adding a TOC entry address. */
18917
18918 static inline int
18919 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18920 {
18921 int ret;
18922
18923 if (TARGET_CMODEL != CMODEL_SMALL)
18924 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18925
18926 else
18927 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18928
18929 return ret;
18930 }
18931
18932 /* Helper function for rs6000_secondary_reload to determine whether the memory
18933 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18934 needs reloading. Return negative if the memory is not handled by the memory
18935 helper functions and to try a different reload method, 0 if no additional
18936 instructions are need, and positive to give the extra cost for the
18937 memory. */
18938
18939 static int
18940 rs6000_secondary_reload_memory (rtx addr,
18941 enum reg_class rclass,
18942 machine_mode mode)
18943 {
18944 int extra_cost = 0;
18945 rtx reg, and_arg, plus_arg0, plus_arg1;
18946 addr_mask_type addr_mask;
18947 const char *type = NULL;
18948 const char *fail_msg = NULL;
18949
18950 if (GPR_REG_CLASS_P (rclass))
18951 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18952
18953 else if (rclass == FLOAT_REGS)
18954 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18955
18956 else if (rclass == ALTIVEC_REGS)
18957 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18958
18959 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18960 else if (rclass == VSX_REGS)
18961 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18962 & ~RELOAD_REG_AND_M16);
18963
18964 /* If the register allocator hasn't made up its mind yet on the register
18965 class to use, settle on defaults to use. */
18966 else if (rclass == NO_REGS)
18967 {
18968 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18969 & ~RELOAD_REG_AND_M16);
18970
18971 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18972 addr_mask &= ~(RELOAD_REG_INDEXED
18973 | RELOAD_REG_PRE_INCDEC
18974 | RELOAD_REG_PRE_MODIFY);
18975 }
18976
18977 else
18978 addr_mask = 0;
18979
18980 /* If the register isn't valid in this register class, just return now. */
18981 if ((addr_mask & RELOAD_REG_VALID) == 0)
18982 {
18983 if (TARGET_DEBUG_ADDR)
18984 {
18985 fprintf (stderr,
18986 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18987 "not valid in class\n",
18988 GET_MODE_NAME (mode), reg_class_names[rclass]);
18989 debug_rtx (addr);
18990 }
18991
18992 return -1;
18993 }
18994
18995 switch (GET_CODE (addr))
18996 {
18997 /* Does the register class supports auto update forms for this mode? We
18998 don't need a scratch register, since the powerpc only supports
18999 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19000 case PRE_INC:
19001 case PRE_DEC:
19002 reg = XEXP (addr, 0);
19003 if (!base_reg_operand (addr, GET_MODE (reg)))
19004 {
19005 fail_msg = "no base register #1";
19006 extra_cost = -1;
19007 }
19008
19009 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19010 {
19011 extra_cost = 1;
19012 type = "update";
19013 }
19014 break;
19015
19016 case PRE_MODIFY:
19017 reg = XEXP (addr, 0);
19018 plus_arg1 = XEXP (addr, 1);
19019 if (!base_reg_operand (reg, GET_MODE (reg))
19020 || GET_CODE (plus_arg1) != PLUS
19021 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19022 {
19023 fail_msg = "bad PRE_MODIFY";
19024 extra_cost = -1;
19025 }
19026
19027 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19028 {
19029 extra_cost = 1;
19030 type = "update";
19031 }
19032 break;
19033
19034 /* Do we need to simulate AND -16 to clear the bottom address bits used
19035 in VMX load/stores? Only allow the AND for vector sizes. */
19036 case AND:
19037 and_arg = XEXP (addr, 0);
19038 if (GET_MODE_SIZE (mode) != 16
19039 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19040 || INTVAL (XEXP (addr, 1)) != -16)
19041 {
19042 fail_msg = "bad Altivec AND #1";
19043 extra_cost = -1;
19044 }
19045
19046 if (rclass != ALTIVEC_REGS)
19047 {
19048 if (legitimate_indirect_address_p (and_arg, false))
19049 extra_cost = 1;
19050
19051 else if (legitimate_indexed_address_p (and_arg, false))
19052 extra_cost = 2;
19053
19054 else
19055 {
19056 fail_msg = "bad Altivec AND #2";
19057 extra_cost = -1;
19058 }
19059
19060 type = "and";
19061 }
19062 break;
19063
19064 /* If this is an indirect address, make sure it is a base register. */
19065 case REG:
19066 case SUBREG:
19067 if (!legitimate_indirect_address_p (addr, false))
19068 {
19069 extra_cost = 1;
19070 type = "move";
19071 }
19072 break;
19073
19074 /* If this is an indexed address, make sure the register class can handle
19075 indexed addresses for this mode. */
19076 case PLUS:
19077 plus_arg0 = XEXP (addr, 0);
19078 plus_arg1 = XEXP (addr, 1);
19079
19080 /* (plus (plus (reg) (constant)) (constant)) is generated during
19081 push_reload processing, so handle it now. */
19082 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19083 {
19084 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19085 {
19086 extra_cost = 1;
19087 type = "offset";
19088 }
19089 }
19090
19091 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19092 push_reload processing, so handle it now. */
19093 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19094 {
19095 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19096 {
19097 extra_cost = 1;
19098 type = "indexed #2";
19099 }
19100 }
19101
19102 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19103 {
19104 fail_msg = "no base register #2";
19105 extra_cost = -1;
19106 }
19107
19108 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19109 {
19110 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19111 || !legitimate_indexed_address_p (addr, false))
19112 {
19113 extra_cost = 1;
19114 type = "indexed";
19115 }
19116 }
19117
19118 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19119 && CONST_INT_P (plus_arg1))
19120 {
19121 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19122 {
19123 extra_cost = 1;
19124 type = "vector d-form offset";
19125 }
19126 }
19127
19128 /* Make sure the register class can handle offset addresses. */
19129 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19130 {
19131 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19132 {
19133 extra_cost = 1;
19134 type = "offset #2";
19135 }
19136 }
19137
19138 else
19139 {
19140 fail_msg = "bad PLUS";
19141 extra_cost = -1;
19142 }
19143
19144 break;
19145
19146 case LO_SUM:
19147 /* Quad offsets are restricted and can't handle normal addresses. */
19148 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19149 {
19150 extra_cost = -1;
19151 type = "vector d-form lo_sum";
19152 }
19153
19154 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19155 {
19156 fail_msg = "bad LO_SUM";
19157 extra_cost = -1;
19158 }
19159
19160 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19161 {
19162 extra_cost = 1;
19163 type = "lo_sum";
19164 }
19165 break;
19166
19167 /* Static addresses need to create a TOC entry. */
19168 case CONST:
19169 case SYMBOL_REF:
19170 case LABEL_REF:
19171 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19172 {
19173 extra_cost = -1;
19174 type = "vector d-form lo_sum #2";
19175 }
19176
19177 else
19178 {
19179 type = "address";
19180 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19181 }
19182 break;
19183
19184 /* TOC references look like offsetable memory. */
19185 case UNSPEC:
19186 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19187 {
19188 fail_msg = "bad UNSPEC";
19189 extra_cost = -1;
19190 }
19191
19192 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19193 {
19194 extra_cost = -1;
19195 type = "vector d-form lo_sum #3";
19196 }
19197
19198 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19199 {
19200 extra_cost = 1;
19201 type = "toc reference";
19202 }
19203 break;
19204
19205 default:
19206 {
19207 fail_msg = "bad address";
19208 extra_cost = -1;
19209 }
19210 }
19211
19212 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19213 {
19214 if (extra_cost < 0)
19215 fprintf (stderr,
19216 "rs6000_secondary_reload_memory error: mode = %s, "
19217 "class = %s, addr_mask = '%s', %s\n",
19218 GET_MODE_NAME (mode),
19219 reg_class_names[rclass],
19220 rs6000_debug_addr_mask (addr_mask, false),
19221 (fail_msg != NULL) ? fail_msg : "<bad address>");
19222
19223 else
19224 fprintf (stderr,
19225 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19226 "addr_mask = '%s', extra cost = %d, %s\n",
19227 GET_MODE_NAME (mode),
19228 reg_class_names[rclass],
19229 rs6000_debug_addr_mask (addr_mask, false),
19230 extra_cost,
19231 (type) ? type : "<none>");
19232
19233 debug_rtx (addr);
19234 }
19235
19236 return extra_cost;
19237 }
19238
19239 /* Helper function for rs6000_secondary_reload to return true if a move to a
19240 different register classe is really a simple move. */
19241
19242 static bool
19243 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19244 enum rs6000_reg_type from_type,
19245 machine_mode mode)
19246 {
19247 int size = GET_MODE_SIZE (mode);
19248
19249 /* Add support for various direct moves available. In this function, we only
19250 look at cases where we don't need any extra registers, and one or more
19251 simple move insns are issued. Originally small integers are not allowed
19252 in FPR/VSX registers. Single precision binary floating is not a simple
19253 move because we need to convert to the single precision memory layout.
19254 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19255 need special direct move handling, which we do not support yet. */
19256 if (TARGET_DIRECT_MOVE
19257 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19258 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19259 {
19260 if (TARGET_POWERPC64)
19261 {
19262 /* ISA 2.07: MTVSRD or MVFVSRD. */
19263 if (size == 8)
19264 return true;
19265
19266 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19267 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19268 return true;
19269 }
19270
19271 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19272 if (TARGET_P8_VECTOR)
19273 {
19274 if (mode == SImode)
19275 return true;
19276
19277 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19278 return true;
19279 }
19280
19281 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19282 if (mode == SDmode)
19283 return true;
19284 }
19285
19286 /* Power6+: MFTGPR or MFFGPR. */
19287 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19288 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19289 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19290 return true;
19291
19292 /* Move to/from SPR. */
19293 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19294 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19295 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19296 return true;
19297
19298 return false;
19299 }
19300
19301 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19302 special direct moves that involve allocating an extra register, return the
19303 insn code of the helper function if there is such a function or
19304 CODE_FOR_nothing if not. */
19305
19306 static bool
19307 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19308 enum rs6000_reg_type from_type,
19309 machine_mode mode,
19310 secondary_reload_info *sri,
19311 bool altivec_p)
19312 {
19313 bool ret = false;
19314 enum insn_code icode = CODE_FOR_nothing;
19315 int cost = 0;
19316 int size = GET_MODE_SIZE (mode);
19317
19318 if (TARGET_POWERPC64 && size == 16)
19319 {
19320 /* Handle moving 128-bit values from GPRs to VSX point registers on
19321 ISA 2.07 (power8, power9) when running in 64-bit mode using
19322 XXPERMDI to glue the two 64-bit values back together. */
19323 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19324 {
19325 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19326 icode = reg_addr[mode].reload_vsx_gpr;
19327 }
19328
19329 /* Handle moving 128-bit values from VSX point registers to GPRs on
19330 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19331 bottom 64-bit value. */
19332 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19333 {
19334 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19335 icode = reg_addr[mode].reload_gpr_vsx;
19336 }
19337 }
19338
19339 else if (TARGET_POWERPC64 && mode == SFmode)
19340 {
19341 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19342 {
19343 cost = 3; /* xscvdpspn, mfvsrd, and. */
19344 icode = reg_addr[mode].reload_gpr_vsx;
19345 }
19346
19347 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19348 {
19349 cost = 2; /* mtvsrz, xscvspdpn. */
19350 icode = reg_addr[mode].reload_vsx_gpr;
19351 }
19352 }
19353
19354 else if (!TARGET_POWERPC64 && size == 8)
19355 {
19356 /* Handle moving 64-bit values from GPRs to floating point registers on
19357 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19358 32-bit values back together. Altivec register classes must be handled
19359 specially since a different instruction is used, and the secondary
19360 reload support requires a single instruction class in the scratch
19361 register constraint. However, right now TFmode is not allowed in
19362 Altivec registers, so the pattern will never match. */
19363 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19364 {
19365 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19366 icode = reg_addr[mode].reload_fpr_gpr;
19367 }
19368 }
19369
19370 if (icode != CODE_FOR_nothing)
19371 {
19372 ret = true;
19373 if (sri)
19374 {
19375 sri->icode = icode;
19376 sri->extra_cost = cost;
19377 }
19378 }
19379
19380 return ret;
19381 }
19382
19383 /* Return whether a move between two register classes can be done either
19384 directly (simple move) or via a pattern that uses a single extra temporary
19385 (using ISA 2.07's direct move in this case. */
19386
19387 static bool
19388 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19389 enum rs6000_reg_type from_type,
19390 machine_mode mode,
19391 secondary_reload_info *sri,
19392 bool altivec_p)
19393 {
19394 /* Fall back to load/store reloads if either type is not a register. */
19395 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19396 return false;
19397
19398 /* If we haven't allocated registers yet, assume the move can be done for the
19399 standard register types. */
19400 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19401 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19402 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19403 return true;
19404
19405 /* Moves to the same set of registers is a simple move for non-specialized
19406 registers. */
19407 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19408 return true;
19409
19410 /* Check whether a simple move can be done directly. */
19411 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19412 {
19413 if (sri)
19414 {
19415 sri->icode = CODE_FOR_nothing;
19416 sri->extra_cost = 0;
19417 }
19418 return true;
19419 }
19420
19421 /* Now check if we can do it in a few steps. */
19422 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19423 altivec_p);
19424 }
19425
19426 /* Inform reload about cases where moving X with a mode MODE to a register in
19427 RCLASS requires an extra scratch or immediate register. Return the class
19428 needed for the immediate register.
19429
19430 For VSX and Altivec, we may need a register to convert sp+offset into
19431 reg+sp.
19432
19433 For misaligned 64-bit gpr loads and stores we need a register to
19434 convert an offset address to indirect. */
19435
19436 static reg_class_t
19437 rs6000_secondary_reload (bool in_p,
19438 rtx x,
19439 reg_class_t rclass_i,
19440 machine_mode mode,
19441 secondary_reload_info *sri)
19442 {
19443 enum reg_class rclass = (enum reg_class) rclass_i;
19444 reg_class_t ret = ALL_REGS;
19445 enum insn_code icode;
19446 bool default_p = false;
19447 bool done_p = false;
19448
19449 /* Allow subreg of memory before/during reload. */
19450 bool memory_p = (MEM_P (x)
19451 || (!reload_completed && GET_CODE (x) == SUBREG
19452 && MEM_P (SUBREG_REG (x))));
19453
19454 sri->icode = CODE_FOR_nothing;
19455 sri->t_icode = CODE_FOR_nothing;
19456 sri->extra_cost = 0;
19457 icode = ((in_p)
19458 ? reg_addr[mode].reload_load
19459 : reg_addr[mode].reload_store);
19460
19461 if (REG_P (x) || register_operand (x, mode))
19462 {
19463 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19464 bool altivec_p = (rclass == ALTIVEC_REGS);
19465 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19466
19467 if (!in_p)
19468 std::swap (to_type, from_type);
19469
19470 /* Can we do a direct move of some sort? */
19471 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19472 altivec_p))
19473 {
19474 icode = (enum insn_code)sri->icode;
19475 default_p = false;
19476 done_p = true;
19477 ret = NO_REGS;
19478 }
19479 }
19480
19481 /* Make sure 0.0 is not reloaded or forced into memory. */
19482 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19483 {
19484 ret = NO_REGS;
19485 default_p = false;
19486 done_p = true;
19487 }
19488
19489 /* If this is a scalar floating point value and we want to load it into the
19490 traditional Altivec registers, do it via a move via a traditional floating
19491 point register, unless we have D-form addressing. Also make sure that
19492 non-zero constants use a FPR. */
19493 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19494 && !mode_supports_vmx_dform (mode)
19495 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19496 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19497 {
19498 ret = FLOAT_REGS;
19499 default_p = false;
19500 done_p = true;
19501 }
19502
19503 /* Handle reload of load/stores if we have reload helper functions. */
19504 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19505 {
19506 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19507 mode);
19508
19509 if (extra_cost >= 0)
19510 {
19511 done_p = true;
19512 ret = NO_REGS;
19513 if (extra_cost > 0)
19514 {
19515 sri->extra_cost = extra_cost;
19516 sri->icode = icode;
19517 }
19518 }
19519 }
19520
19521 /* Handle unaligned loads and stores of integer registers. */
19522 if (!done_p && TARGET_POWERPC64
19523 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19524 && memory_p
19525 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19526 {
19527 rtx addr = XEXP (x, 0);
19528 rtx off = address_offset (addr);
19529
19530 if (off != NULL_RTX)
19531 {
19532 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19533 unsigned HOST_WIDE_INT offset = INTVAL (off);
19534
19535 /* We need a secondary reload when our legitimate_address_p
19536 says the address is good (as otherwise the entire address
19537 will be reloaded), and the offset is not a multiple of
19538 four or we have an address wrap. Address wrap will only
19539 occur for LO_SUMs since legitimate_offset_address_p
19540 rejects addresses for 16-byte mems that will wrap. */
19541 if (GET_CODE (addr) == LO_SUM
19542 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19543 && ((offset & 3) != 0
19544 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19545 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19546 && (offset & 3) != 0))
19547 {
19548 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19549 if (in_p)
19550 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19551 : CODE_FOR_reload_di_load);
19552 else
19553 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19554 : CODE_FOR_reload_di_store);
19555 sri->extra_cost = 2;
19556 ret = NO_REGS;
19557 done_p = true;
19558 }
19559 else
19560 default_p = true;
19561 }
19562 else
19563 default_p = true;
19564 }
19565
19566 if (!done_p && !TARGET_POWERPC64
19567 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19568 && memory_p
19569 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19570 {
19571 rtx addr = XEXP (x, 0);
19572 rtx off = address_offset (addr);
19573
19574 if (off != NULL_RTX)
19575 {
19576 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19577 unsigned HOST_WIDE_INT offset = INTVAL (off);
19578
19579 /* We need a secondary reload when our legitimate_address_p
19580 says the address is good (as otherwise the entire address
19581 will be reloaded), and we have a wrap.
19582
19583 legitimate_lo_sum_address_p allows LO_SUM addresses to
19584 have any offset so test for wrap in the low 16 bits.
19585
19586 legitimate_offset_address_p checks for the range
19587 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19588 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19589 [0x7ff4,0x7fff] respectively, so test for the
19590 intersection of these ranges, [0x7ffc,0x7fff] and
19591 [0x7ff4,0x7ff7] respectively.
19592
19593 Note that the address we see here may have been
19594 manipulated by legitimize_reload_address. */
19595 if (GET_CODE (addr) == LO_SUM
19596 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19597 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19598 {
19599 if (in_p)
19600 sri->icode = CODE_FOR_reload_si_load;
19601 else
19602 sri->icode = CODE_FOR_reload_si_store;
19603 sri->extra_cost = 2;
19604 ret = NO_REGS;
19605 done_p = true;
19606 }
19607 else
19608 default_p = true;
19609 }
19610 else
19611 default_p = true;
19612 }
19613
19614 if (!done_p)
19615 default_p = true;
19616
19617 if (default_p)
19618 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19619
19620 gcc_assert (ret != ALL_REGS);
19621
19622 if (TARGET_DEBUG_ADDR)
19623 {
19624 fprintf (stderr,
19625 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19626 "mode = %s",
19627 reg_class_names[ret],
19628 in_p ? "true" : "false",
19629 reg_class_names[rclass],
19630 GET_MODE_NAME (mode));
19631
19632 if (reload_completed)
19633 fputs (", after reload", stderr);
19634
19635 if (!done_p)
19636 fputs (", done_p not set", stderr);
19637
19638 if (default_p)
19639 fputs (", default secondary reload", stderr);
19640
19641 if (sri->icode != CODE_FOR_nothing)
19642 fprintf (stderr, ", reload func = %s, extra cost = %d",
19643 insn_data[sri->icode].name, sri->extra_cost);
19644
19645 else if (sri->extra_cost > 0)
19646 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19647
19648 fputs ("\n", stderr);
19649 debug_rtx (x);
19650 }
19651
19652 return ret;
19653 }
19654
19655 /* Better tracing for rs6000_secondary_reload_inner. */
19656
19657 static void
19658 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19659 bool store_p)
19660 {
19661 rtx set, clobber;
19662
19663 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19664
19665 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19666 store_p ? "store" : "load");
19667
19668 if (store_p)
19669 set = gen_rtx_SET (mem, reg);
19670 else
19671 set = gen_rtx_SET (reg, mem);
19672
19673 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19674 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19675 }
19676
19677 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19678 ATTRIBUTE_NORETURN;
19679
19680 static void
19681 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19682 bool store_p)
19683 {
19684 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19685 gcc_unreachable ();
19686 }
19687
19688 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19689 reload helper functions. These were identified in
19690 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19691 reload, it calls the insns:
19692 reload_<RELOAD:mode>_<P:mptrsize>_store
19693 reload_<RELOAD:mode>_<P:mptrsize>_load
19694
19695 which in turn calls this function, to do whatever is necessary to create
19696 valid addresses. */
19697
19698 void
19699 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19700 {
19701 int regno = true_regnum (reg);
19702 machine_mode mode = GET_MODE (reg);
19703 addr_mask_type addr_mask;
19704 rtx addr;
19705 rtx new_addr;
19706 rtx op_reg, op0, op1;
19707 rtx and_op;
19708 rtx cc_clobber;
19709 rtvec rv;
19710
19711 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19712 || !base_reg_operand (scratch, GET_MODE (scratch)))
19713 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19714
19715 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19716 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19717
19718 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19719 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19720
19721 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19722 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19723
19724 else
19725 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19726
19727 /* Make sure the mode is valid in this register class. */
19728 if ((addr_mask & RELOAD_REG_VALID) == 0)
19729 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19730
19731 if (TARGET_DEBUG_ADDR)
19732 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19733
19734 new_addr = addr = XEXP (mem, 0);
19735 switch (GET_CODE (addr))
19736 {
19737 /* Does the register class support auto update forms for this mode? If
19738 not, do the update now. We don't need a scratch register, since the
19739 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19740 case PRE_INC:
19741 case PRE_DEC:
19742 op_reg = XEXP (addr, 0);
19743 if (!base_reg_operand (op_reg, Pmode))
19744 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19745
19746 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19747 {
19748 int delta = GET_MODE_SIZE (mode);
19749 if (GET_CODE (addr) == PRE_DEC)
19750 delta = -delta;
19751 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19752 new_addr = op_reg;
19753 }
19754 break;
19755
19756 case PRE_MODIFY:
19757 op0 = XEXP (addr, 0);
19758 op1 = XEXP (addr, 1);
19759 if (!base_reg_operand (op0, Pmode)
19760 || GET_CODE (op1) != PLUS
19761 || !rtx_equal_p (op0, XEXP (op1, 0)))
19762 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19763
19764 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19765 {
19766 emit_insn (gen_rtx_SET (op0, op1));
19767 new_addr = reg;
19768 }
19769 break;
19770
19771 /* Do we need to simulate AND -16 to clear the bottom address bits used
19772 in VMX load/stores? */
19773 case AND:
19774 op0 = XEXP (addr, 0);
19775 op1 = XEXP (addr, 1);
19776 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19777 {
19778 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19779 op_reg = op0;
19780
19781 else if (GET_CODE (op1) == PLUS)
19782 {
19783 emit_insn (gen_rtx_SET (scratch, op1));
19784 op_reg = scratch;
19785 }
19786
19787 else
19788 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19789
19790 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19791 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19792 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19793 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19794 new_addr = scratch;
19795 }
19796 break;
19797
19798 /* If this is an indirect address, make sure it is a base register. */
19799 case REG:
19800 case SUBREG:
19801 if (!base_reg_operand (addr, GET_MODE (addr)))
19802 {
19803 emit_insn (gen_rtx_SET (scratch, addr));
19804 new_addr = scratch;
19805 }
19806 break;
19807
19808 /* If this is an indexed address, make sure the register class can handle
19809 indexed addresses for this mode. */
19810 case PLUS:
19811 op0 = XEXP (addr, 0);
19812 op1 = XEXP (addr, 1);
19813 if (!base_reg_operand (op0, Pmode))
19814 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19815
19816 else if (int_reg_operand (op1, Pmode))
19817 {
19818 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19819 {
19820 emit_insn (gen_rtx_SET (scratch, addr));
19821 new_addr = scratch;
19822 }
19823 }
19824
19825 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19826 {
19827 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19828 || !quad_address_p (addr, mode, false))
19829 {
19830 emit_insn (gen_rtx_SET (scratch, addr));
19831 new_addr = scratch;
19832 }
19833 }
19834
19835 /* Make sure the register class can handle offset addresses. */
19836 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19837 {
19838 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19839 {
19840 emit_insn (gen_rtx_SET (scratch, addr));
19841 new_addr = scratch;
19842 }
19843 }
19844
19845 else
19846 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19847
19848 break;
19849
19850 case LO_SUM:
19851 op0 = XEXP (addr, 0);
19852 op1 = XEXP (addr, 1);
19853 if (!base_reg_operand (op0, Pmode))
19854 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19855
19856 else if (int_reg_operand (op1, Pmode))
19857 {
19858 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19859 {
19860 emit_insn (gen_rtx_SET (scratch, addr));
19861 new_addr = scratch;
19862 }
19863 }
19864
19865 /* Quad offsets are restricted and can't handle normal addresses. */
19866 else if (mode_supports_dq_form (mode))
19867 {
19868 emit_insn (gen_rtx_SET (scratch, addr));
19869 new_addr = scratch;
19870 }
19871
19872 /* Make sure the register class can handle offset addresses. */
19873 else if (legitimate_lo_sum_address_p (mode, addr, false))
19874 {
19875 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19876 {
19877 emit_insn (gen_rtx_SET (scratch, addr));
19878 new_addr = scratch;
19879 }
19880 }
19881
19882 else
19883 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19884
19885 break;
19886
19887 case SYMBOL_REF:
19888 case CONST:
19889 case LABEL_REF:
19890 rs6000_emit_move (scratch, addr, Pmode);
19891 new_addr = scratch;
19892 break;
19893
19894 default:
19895 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19896 }
19897
19898 /* Adjust the address if it changed. */
19899 if (addr != new_addr)
19900 {
19901 mem = replace_equiv_address_nv (mem, new_addr);
19902 if (TARGET_DEBUG_ADDR)
19903 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19904 }
19905
19906 /* Now create the move. */
19907 if (store_p)
19908 emit_insn (gen_rtx_SET (mem, reg));
19909 else
19910 emit_insn (gen_rtx_SET (reg, mem));
19911
19912 return;
19913 }
19914
19915 /* Convert reloads involving 64-bit gprs and misaligned offset
19916 addressing, or multiple 32-bit gprs and offsets that are too large,
19917 to use indirect addressing. */
19918
19919 void
19920 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19921 {
19922 int regno = true_regnum (reg);
19923 enum reg_class rclass;
19924 rtx addr;
19925 rtx scratch_or_premodify = scratch;
19926
19927 if (TARGET_DEBUG_ADDR)
19928 {
19929 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19930 store_p ? "store" : "load");
19931 fprintf (stderr, "reg:\n");
19932 debug_rtx (reg);
19933 fprintf (stderr, "mem:\n");
19934 debug_rtx (mem);
19935 fprintf (stderr, "scratch:\n");
19936 debug_rtx (scratch);
19937 }
19938
19939 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
19940 gcc_assert (GET_CODE (mem) == MEM);
19941 rclass = REGNO_REG_CLASS (regno);
19942 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19943 addr = XEXP (mem, 0);
19944
19945 if (GET_CODE (addr) == PRE_MODIFY)
19946 {
19947 gcc_assert (REG_P (XEXP (addr, 0))
19948 && GET_CODE (XEXP (addr, 1)) == PLUS
19949 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19950 scratch_or_premodify = XEXP (addr, 0);
19951 addr = XEXP (addr, 1);
19952 }
19953 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19954
19955 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19956
19957 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19958
19959 /* Now create the move. */
19960 if (store_p)
19961 emit_insn (gen_rtx_SET (mem, reg));
19962 else
19963 emit_insn (gen_rtx_SET (reg, mem));
19964
19965 return;
19966 }
19967
19968 /* Given an rtx X being reloaded into a reg required to be
19969 in class CLASS, return the class of reg to actually use.
19970 In general this is just CLASS; but on some machines
19971 in some cases it is preferable to use a more restrictive class.
19972
19973 On the RS/6000, we have to return NO_REGS when we want to reload a
19974 floating-point CONST_DOUBLE to force it to be copied to memory.
19975
19976 We also don't want to reload integer values into floating-point
19977 registers if we can at all help it. In fact, this can
19978 cause reload to die, if it tries to generate a reload of CTR
19979 into a FP register and discovers it doesn't have the memory location
19980 required.
19981
19982 ??? Would it be a good idea to have reload do the converse, that is
19983 try to reload floating modes into FP registers if possible?
19984 */
19985
19986 static enum reg_class
19987 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19988 {
19989 machine_mode mode = GET_MODE (x);
19990 bool is_constant = CONSTANT_P (x);
19991
19992 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19993 reload class for it. */
19994 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19995 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19996 return NO_REGS;
19997
19998 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19999 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20000 return NO_REGS;
20001
20002 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20003 the reloading of address expressions using PLUS into floating point
20004 registers. */
20005 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20006 {
20007 if (is_constant)
20008 {
20009 /* Zero is always allowed in all VSX registers. */
20010 if (x == CONST0_RTX (mode))
20011 return rclass;
20012
20013 /* If this is a vector constant that can be formed with a few Altivec
20014 instructions, we want altivec registers. */
20015 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20016 return ALTIVEC_REGS;
20017
20018 /* If this is an integer constant that can easily be loaded into
20019 vector registers, allow it. */
20020 if (CONST_INT_P (x))
20021 {
20022 HOST_WIDE_INT value = INTVAL (x);
20023
20024 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20025 2.06 can generate it in the Altivec registers with
20026 VSPLTI<x>. */
20027 if (value == -1)
20028 {
20029 if (TARGET_P8_VECTOR)
20030 return rclass;
20031 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20032 return ALTIVEC_REGS;
20033 else
20034 return NO_REGS;
20035 }
20036
20037 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20038 a sign extend in the Altivec registers. */
20039 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20040 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20041 return ALTIVEC_REGS;
20042 }
20043
20044 /* Force constant to memory. */
20045 return NO_REGS;
20046 }
20047
20048 /* D-form addressing can easily reload the value. */
20049 if (mode_supports_vmx_dform (mode)
20050 || mode_supports_dq_form (mode))
20051 return rclass;
20052
20053 /* If this is a scalar floating point value and we don't have D-form
20054 addressing, prefer the traditional floating point registers so that we
20055 can use D-form (register+offset) addressing. */
20056 if (rclass == VSX_REGS
20057 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20058 return FLOAT_REGS;
20059
20060 /* Prefer the Altivec registers if Altivec is handling the vector
20061 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20062 loads. */
20063 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20064 || mode == V1TImode)
20065 return ALTIVEC_REGS;
20066
20067 return rclass;
20068 }
20069
20070 if (is_constant || GET_CODE (x) == PLUS)
20071 {
20072 if (reg_class_subset_p (GENERAL_REGS, rclass))
20073 return GENERAL_REGS;
20074 if (reg_class_subset_p (BASE_REGS, rclass))
20075 return BASE_REGS;
20076 return NO_REGS;
20077 }
20078
20079 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20080 return GENERAL_REGS;
20081
20082 return rclass;
20083 }
20084
20085 /* Debug version of rs6000_preferred_reload_class. */
20086 static enum reg_class
20087 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20088 {
20089 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20090
20091 fprintf (stderr,
20092 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20093 "mode = %s, x:\n",
20094 reg_class_names[ret], reg_class_names[rclass],
20095 GET_MODE_NAME (GET_MODE (x)));
20096 debug_rtx (x);
20097
20098 return ret;
20099 }
20100
20101 /* If we are copying between FP or AltiVec registers and anything else, we need
20102 a memory location. The exception is when we are targeting ppc64 and the
20103 move to/from fpr to gpr instructions are available. Also, under VSX, you
20104 can copy vector registers from the FP register set to the Altivec register
20105 set and vice versa. */
20106
20107 static bool
20108 rs6000_secondary_memory_needed (machine_mode mode,
20109 reg_class_t from_class,
20110 reg_class_t to_class)
20111 {
20112 enum rs6000_reg_type from_type, to_type;
20113 bool altivec_p = ((from_class == ALTIVEC_REGS)
20114 || (to_class == ALTIVEC_REGS));
20115
20116 /* If a simple/direct move is available, we don't need secondary memory */
20117 from_type = reg_class_to_reg_type[(int)from_class];
20118 to_type = reg_class_to_reg_type[(int)to_class];
20119
20120 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20121 (secondary_reload_info *)0, altivec_p))
20122 return false;
20123
20124 /* If we have a floating point or vector register class, we need to use
20125 memory to transfer the data. */
20126 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20127 return true;
20128
20129 return false;
20130 }
20131
20132 /* Debug version of rs6000_secondary_memory_needed. */
20133 static bool
20134 rs6000_debug_secondary_memory_needed (machine_mode mode,
20135 reg_class_t from_class,
20136 reg_class_t to_class)
20137 {
20138 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20139
20140 fprintf (stderr,
20141 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20142 "to_class = %s, mode = %s\n",
20143 ret ? "true" : "false",
20144 reg_class_names[from_class],
20145 reg_class_names[to_class],
20146 GET_MODE_NAME (mode));
20147
20148 return ret;
20149 }
20150
20151 /* Return the register class of a scratch register needed to copy IN into
20152 or out of a register in RCLASS in MODE. If it can be done directly,
20153 NO_REGS is returned. */
20154
20155 static enum reg_class
20156 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20157 rtx in)
20158 {
20159 int regno;
20160
20161 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20162 #if TARGET_MACHO
20163 && MACHOPIC_INDIRECT
20164 #endif
20165 ))
20166 {
20167 /* We cannot copy a symbolic operand directly into anything
20168 other than BASE_REGS for TARGET_ELF. So indicate that a
20169 register from BASE_REGS is needed as an intermediate
20170 register.
20171
20172 On Darwin, pic addresses require a load from memory, which
20173 needs a base register. */
20174 if (rclass != BASE_REGS
20175 && (GET_CODE (in) == SYMBOL_REF
20176 || GET_CODE (in) == HIGH
20177 || GET_CODE (in) == LABEL_REF
20178 || GET_CODE (in) == CONST))
20179 return BASE_REGS;
20180 }
20181
20182 if (GET_CODE (in) == REG)
20183 {
20184 regno = REGNO (in);
20185 if (regno >= FIRST_PSEUDO_REGISTER)
20186 {
20187 regno = true_regnum (in);
20188 if (regno >= FIRST_PSEUDO_REGISTER)
20189 regno = -1;
20190 }
20191 }
20192 else if (GET_CODE (in) == SUBREG)
20193 {
20194 regno = true_regnum (in);
20195 if (regno >= FIRST_PSEUDO_REGISTER)
20196 regno = -1;
20197 }
20198 else
20199 regno = -1;
20200
20201 /* If we have VSX register moves, prefer moving scalar values between
20202 Altivec registers and GPR by going via an FPR (and then via memory)
20203 instead of reloading the secondary memory address for Altivec moves. */
20204 if (TARGET_VSX
20205 && GET_MODE_SIZE (mode) < 16
20206 && !mode_supports_vmx_dform (mode)
20207 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20208 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20209 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20210 && (regno >= 0 && INT_REGNO_P (regno)))))
20211 return FLOAT_REGS;
20212
20213 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20214 into anything. */
20215 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20216 || (regno >= 0 && INT_REGNO_P (regno)))
20217 return NO_REGS;
20218
20219 /* Constants, memory, and VSX registers can go into VSX registers (both the
20220 traditional floating point and the altivec registers). */
20221 if (rclass == VSX_REGS
20222 && (regno == -1 || VSX_REGNO_P (regno)))
20223 return NO_REGS;
20224
20225 /* Constants, memory, and FP registers can go into FP registers. */
20226 if ((regno == -1 || FP_REGNO_P (regno))
20227 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20228 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20229
20230 /* Memory, and AltiVec registers can go into AltiVec registers. */
20231 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20232 && rclass == ALTIVEC_REGS)
20233 return NO_REGS;
20234
20235 /* We can copy among the CR registers. */
20236 if ((rclass == CR_REGS || rclass == CR0_REGS)
20237 && regno >= 0 && CR_REGNO_P (regno))
20238 return NO_REGS;
20239
20240 /* Otherwise, we need GENERAL_REGS. */
20241 return GENERAL_REGS;
20242 }
20243
20244 /* Debug version of rs6000_secondary_reload_class. */
20245 static enum reg_class
20246 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20247 machine_mode mode, rtx in)
20248 {
20249 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20250 fprintf (stderr,
20251 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20252 "mode = %s, input rtx:\n",
20253 reg_class_names[ret], reg_class_names[rclass],
20254 GET_MODE_NAME (mode));
20255 debug_rtx (in);
20256
20257 return ret;
20258 }
20259
20260 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20261
20262 static bool
20263 rs6000_can_change_mode_class (machine_mode from,
20264 machine_mode to,
20265 reg_class_t rclass)
20266 {
20267 unsigned from_size = GET_MODE_SIZE (from);
20268 unsigned to_size = GET_MODE_SIZE (to);
20269
20270 if (from_size != to_size)
20271 {
20272 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20273
20274 if (reg_classes_intersect_p (xclass, rclass))
20275 {
20276 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20277 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20278 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20279 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20280
20281 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20282 single register under VSX because the scalar part of the register
20283 is in the upper 64-bits, and not the lower 64-bits. Types like
20284 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20285 IEEE floating point can't overlap, and neither can small
20286 values. */
20287
20288 if (to_float128_vector_p && from_float128_vector_p)
20289 return true;
20290
20291 else if (to_float128_vector_p || from_float128_vector_p)
20292 return false;
20293
20294 /* TDmode in floating-mode registers must always go into a register
20295 pair with the most significant word in the even-numbered register
20296 to match ISA requirements. In little-endian mode, this does not
20297 match subreg numbering, so we cannot allow subregs. */
20298 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20299 return false;
20300
20301 if (from_size < 8 || to_size < 8)
20302 return false;
20303
20304 if (from_size == 8 && (8 * to_nregs) != to_size)
20305 return false;
20306
20307 if (to_size == 8 && (8 * from_nregs) != from_size)
20308 return false;
20309
20310 return true;
20311 }
20312 else
20313 return true;
20314 }
20315
20316 /* Since the VSX register set includes traditional floating point registers
20317 and altivec registers, just check for the size being different instead of
20318 trying to check whether the modes are vector modes. Otherwise it won't
20319 allow say DF and DI to change classes. For types like TFmode and TDmode
20320 that take 2 64-bit registers, rather than a single 128-bit register, don't
20321 allow subregs of those types to other 128 bit types. */
20322 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20323 {
20324 unsigned num_regs = (from_size + 15) / 16;
20325 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20326 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20327 return false;
20328
20329 return (from_size == 8 || from_size == 16);
20330 }
20331
20332 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20333 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20334 return false;
20335
20336 return true;
20337 }
20338
20339 /* Debug version of rs6000_can_change_mode_class. */
20340 static bool
20341 rs6000_debug_can_change_mode_class (machine_mode from,
20342 machine_mode to,
20343 reg_class_t rclass)
20344 {
20345 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20346
20347 fprintf (stderr,
20348 "rs6000_can_change_mode_class, return %s, from = %s, "
20349 "to = %s, rclass = %s\n",
20350 ret ? "true" : "false",
20351 GET_MODE_NAME (from), GET_MODE_NAME (to),
20352 reg_class_names[rclass]);
20353
20354 return ret;
20355 }
20356 \f
20357 /* Return a string to do a move operation of 128 bits of data. */
20358
20359 const char *
20360 rs6000_output_move_128bit (rtx operands[])
20361 {
20362 rtx dest = operands[0];
20363 rtx src = operands[1];
20364 machine_mode mode = GET_MODE (dest);
20365 int dest_regno;
20366 int src_regno;
20367 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20368 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20369
20370 if (REG_P (dest))
20371 {
20372 dest_regno = REGNO (dest);
20373 dest_gpr_p = INT_REGNO_P (dest_regno);
20374 dest_fp_p = FP_REGNO_P (dest_regno);
20375 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20376 dest_vsx_p = dest_fp_p | dest_vmx_p;
20377 }
20378 else
20379 {
20380 dest_regno = -1;
20381 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20382 }
20383
20384 if (REG_P (src))
20385 {
20386 src_regno = REGNO (src);
20387 src_gpr_p = INT_REGNO_P (src_regno);
20388 src_fp_p = FP_REGNO_P (src_regno);
20389 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20390 src_vsx_p = src_fp_p | src_vmx_p;
20391 }
20392 else
20393 {
20394 src_regno = -1;
20395 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20396 }
20397
20398 /* Register moves. */
20399 if (dest_regno >= 0 && src_regno >= 0)
20400 {
20401 if (dest_gpr_p)
20402 {
20403 if (src_gpr_p)
20404 return "#";
20405
20406 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20407 return (WORDS_BIG_ENDIAN
20408 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20409 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20410
20411 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20412 return "#";
20413 }
20414
20415 else if (TARGET_VSX && dest_vsx_p)
20416 {
20417 if (src_vsx_p)
20418 return "xxlor %x0,%x1,%x1";
20419
20420 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20421 return (WORDS_BIG_ENDIAN
20422 ? "mtvsrdd %x0,%1,%L1"
20423 : "mtvsrdd %x0,%L1,%1");
20424
20425 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20426 return "#";
20427 }
20428
20429 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20430 return "vor %0,%1,%1";
20431
20432 else if (dest_fp_p && src_fp_p)
20433 return "#";
20434 }
20435
20436 /* Loads. */
20437 else if (dest_regno >= 0 && MEM_P (src))
20438 {
20439 if (dest_gpr_p)
20440 {
20441 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20442 return "lq %0,%1";
20443 else
20444 return "#";
20445 }
20446
20447 else if (TARGET_ALTIVEC && dest_vmx_p
20448 && altivec_indexed_or_indirect_operand (src, mode))
20449 return "lvx %0,%y1";
20450
20451 else if (TARGET_VSX && dest_vsx_p)
20452 {
20453 if (mode_supports_dq_form (mode)
20454 && quad_address_p (XEXP (src, 0), mode, true))
20455 return "lxv %x0,%1";
20456
20457 else if (TARGET_P9_VECTOR)
20458 return "lxvx %x0,%y1";
20459
20460 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20461 return "lxvw4x %x0,%y1";
20462
20463 else
20464 return "lxvd2x %x0,%y1";
20465 }
20466
20467 else if (TARGET_ALTIVEC && dest_vmx_p)
20468 return "lvx %0,%y1";
20469
20470 else if (dest_fp_p)
20471 return "#";
20472 }
20473
20474 /* Stores. */
20475 else if (src_regno >= 0 && MEM_P (dest))
20476 {
20477 if (src_gpr_p)
20478 {
20479 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20480 return "stq %1,%0";
20481 else
20482 return "#";
20483 }
20484
20485 else if (TARGET_ALTIVEC && src_vmx_p
20486 && altivec_indexed_or_indirect_operand (dest, mode))
20487 return "stvx %1,%y0";
20488
20489 else if (TARGET_VSX && src_vsx_p)
20490 {
20491 if (mode_supports_dq_form (mode)
20492 && quad_address_p (XEXP (dest, 0), mode, true))
20493 return "stxv %x1,%0";
20494
20495 else if (TARGET_P9_VECTOR)
20496 return "stxvx %x1,%y0";
20497
20498 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20499 return "stxvw4x %x1,%y0";
20500
20501 else
20502 return "stxvd2x %x1,%y0";
20503 }
20504
20505 else if (TARGET_ALTIVEC && src_vmx_p)
20506 return "stvx %1,%y0";
20507
20508 else if (src_fp_p)
20509 return "#";
20510 }
20511
20512 /* Constants. */
20513 else if (dest_regno >= 0
20514 && (GET_CODE (src) == CONST_INT
20515 || GET_CODE (src) == CONST_WIDE_INT
20516 || GET_CODE (src) == CONST_DOUBLE
20517 || GET_CODE (src) == CONST_VECTOR))
20518 {
20519 if (dest_gpr_p)
20520 return "#";
20521
20522 else if ((dest_vmx_p && TARGET_ALTIVEC)
20523 || (dest_vsx_p && TARGET_VSX))
20524 return output_vec_const_move (operands);
20525 }
20526
20527 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20528 }
20529
20530 /* Validate a 128-bit move. */
20531 bool
20532 rs6000_move_128bit_ok_p (rtx operands[])
20533 {
20534 machine_mode mode = GET_MODE (operands[0]);
20535 return (gpc_reg_operand (operands[0], mode)
20536 || gpc_reg_operand (operands[1], mode));
20537 }
20538
20539 /* Return true if a 128-bit move needs to be split. */
20540 bool
20541 rs6000_split_128bit_ok_p (rtx operands[])
20542 {
20543 if (!reload_completed)
20544 return false;
20545
20546 if (!gpr_or_gpr_p (operands[0], operands[1]))
20547 return false;
20548
20549 if (quad_load_store_p (operands[0], operands[1]))
20550 return false;
20551
20552 return true;
20553 }
20554
20555 \f
20556 /* Given a comparison operation, return the bit number in CCR to test. We
20557 know this is a valid comparison.
20558
20559 SCC_P is 1 if this is for an scc. That means that %D will have been
20560 used instead of %C, so the bits will be in different places.
20561
20562 Return -1 if OP isn't a valid comparison for some reason. */
20563
20564 int
20565 ccr_bit (rtx op, int scc_p)
20566 {
20567 enum rtx_code code = GET_CODE (op);
20568 machine_mode cc_mode;
20569 int cc_regnum;
20570 int base_bit;
20571 rtx reg;
20572
20573 if (!COMPARISON_P (op))
20574 return -1;
20575
20576 reg = XEXP (op, 0);
20577
20578 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20579
20580 cc_mode = GET_MODE (reg);
20581 cc_regnum = REGNO (reg);
20582 base_bit = 4 * (cc_regnum - CR0_REGNO);
20583
20584 validate_condition_mode (code, cc_mode);
20585
20586 /* When generating a sCOND operation, only positive conditions are
20587 allowed. */
20588 gcc_assert (!scc_p
20589 || code == EQ || code == GT || code == LT || code == UNORDERED
20590 || code == GTU || code == LTU);
20591
20592 switch (code)
20593 {
20594 case NE:
20595 return scc_p ? base_bit + 3 : base_bit + 2;
20596 case EQ:
20597 return base_bit + 2;
20598 case GT: case GTU: case UNLE:
20599 return base_bit + 1;
20600 case LT: case LTU: case UNGE:
20601 return base_bit;
20602 case ORDERED: case UNORDERED:
20603 return base_bit + 3;
20604
20605 case GE: case GEU:
20606 /* If scc, we will have done a cror to put the bit in the
20607 unordered position. So test that bit. For integer, this is ! LT
20608 unless this is an scc insn. */
20609 return scc_p ? base_bit + 3 : base_bit;
20610
20611 case LE: case LEU:
20612 return scc_p ? base_bit + 3 : base_bit + 1;
20613
20614 default:
20615 gcc_unreachable ();
20616 }
20617 }
20618 \f
20619 /* Return the GOT register. */
20620
20621 rtx
20622 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20623 {
20624 /* The second flow pass currently (June 1999) can't update
20625 regs_ever_live without disturbing other parts of the compiler, so
20626 update it here to make the prolog/epilogue code happy. */
20627 if (!can_create_pseudo_p ()
20628 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20629 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20630
20631 crtl->uses_pic_offset_table = 1;
20632
20633 return pic_offset_table_rtx;
20634 }
20635 \f
20636 static rs6000_stack_t stack_info;
20637
20638 /* Function to init struct machine_function.
20639 This will be called, via a pointer variable,
20640 from push_function_context. */
20641
20642 static struct machine_function *
20643 rs6000_init_machine_status (void)
20644 {
20645 stack_info.reload_completed = 0;
20646 return ggc_cleared_alloc<machine_function> ();
20647 }
20648 \f
20649 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20650
20651 /* Write out a function code label. */
20652
20653 void
20654 rs6000_output_function_entry (FILE *file, const char *fname)
20655 {
20656 if (fname[0] != '.')
20657 {
20658 switch (DEFAULT_ABI)
20659 {
20660 default:
20661 gcc_unreachable ();
20662
20663 case ABI_AIX:
20664 if (DOT_SYMBOLS)
20665 putc ('.', file);
20666 else
20667 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20668 break;
20669
20670 case ABI_ELFv2:
20671 case ABI_V4:
20672 case ABI_DARWIN:
20673 break;
20674 }
20675 }
20676
20677 RS6000_OUTPUT_BASENAME (file, fname);
20678 }
20679
20680 /* Print an operand. Recognize special options, documented below. */
20681
20682 #if TARGET_ELF
20683 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20684 only introduced by the linker, when applying the sda21
20685 relocation. */
20686 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20687 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20688 #else
20689 #define SMALL_DATA_RELOC "sda21"
20690 #define SMALL_DATA_REG 0
20691 #endif
20692
20693 void
20694 print_operand (FILE *file, rtx x, int code)
20695 {
20696 int i;
20697 unsigned HOST_WIDE_INT uval;
20698
20699 switch (code)
20700 {
20701 /* %a is output_address. */
20702
20703 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20704 output_operand. */
20705
20706 case 'D':
20707 /* Like 'J' but get to the GT bit only. */
20708 if (!REG_P (x))
20709 {
20710 output_operand_lossage ("invalid %%D value");
20711 return;
20712 }
20713
20714 /* Bit 1 is GT bit. */
20715 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20716
20717 /* Add one for shift count in rlinm for scc. */
20718 fprintf (file, "%d", i + 1);
20719 return;
20720
20721 case 'e':
20722 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20723 if (! INT_P (x))
20724 {
20725 output_operand_lossage ("invalid %%e value");
20726 return;
20727 }
20728
20729 uval = INTVAL (x);
20730 if ((uval & 0xffff) == 0 && uval != 0)
20731 putc ('s', file);
20732 return;
20733
20734 case 'E':
20735 /* X is a CR register. Print the number of the EQ bit of the CR */
20736 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20737 output_operand_lossage ("invalid %%E value");
20738 else
20739 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20740 return;
20741
20742 case 'f':
20743 /* X is a CR register. Print the shift count needed to move it
20744 to the high-order four bits. */
20745 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20746 output_operand_lossage ("invalid %%f value");
20747 else
20748 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20749 return;
20750
20751 case 'F':
20752 /* Similar, but print the count for the rotate in the opposite
20753 direction. */
20754 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20755 output_operand_lossage ("invalid %%F value");
20756 else
20757 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20758 return;
20759
20760 case 'G':
20761 /* X is a constant integer. If it is negative, print "m",
20762 otherwise print "z". This is to make an aze or ame insn. */
20763 if (GET_CODE (x) != CONST_INT)
20764 output_operand_lossage ("invalid %%G value");
20765 else if (INTVAL (x) >= 0)
20766 putc ('z', file);
20767 else
20768 putc ('m', file);
20769 return;
20770
20771 case 'h':
20772 /* If constant, output low-order five bits. Otherwise, write
20773 normally. */
20774 if (INT_P (x))
20775 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20776 else
20777 print_operand (file, x, 0);
20778 return;
20779
20780 case 'H':
20781 /* If constant, output low-order six bits. Otherwise, write
20782 normally. */
20783 if (INT_P (x))
20784 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20785 else
20786 print_operand (file, x, 0);
20787 return;
20788
20789 case 'I':
20790 /* Print `i' if this is a constant, else nothing. */
20791 if (INT_P (x))
20792 putc ('i', file);
20793 return;
20794
20795 case 'j':
20796 /* Write the bit number in CCR for jump. */
20797 i = ccr_bit (x, 0);
20798 if (i == -1)
20799 output_operand_lossage ("invalid %%j code");
20800 else
20801 fprintf (file, "%d", i);
20802 return;
20803
20804 case 'J':
20805 /* Similar, but add one for shift count in rlinm for scc and pass
20806 scc flag to `ccr_bit'. */
20807 i = ccr_bit (x, 1);
20808 if (i == -1)
20809 output_operand_lossage ("invalid %%J code");
20810 else
20811 /* If we want bit 31, write a shift count of zero, not 32. */
20812 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20813 return;
20814
20815 case 'k':
20816 /* X must be a constant. Write the 1's complement of the
20817 constant. */
20818 if (! INT_P (x))
20819 output_operand_lossage ("invalid %%k value");
20820 else
20821 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20822 return;
20823
20824 case 'K':
20825 /* X must be a symbolic constant on ELF. Write an
20826 expression suitable for an 'addi' that adds in the low 16
20827 bits of the MEM. */
20828 if (GET_CODE (x) == CONST)
20829 {
20830 if (GET_CODE (XEXP (x, 0)) != PLUS
20831 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20832 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20833 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20834 output_operand_lossage ("invalid %%K value");
20835 }
20836 print_operand_address (file, x);
20837 fputs ("@l", file);
20838 return;
20839
20840 /* %l is output_asm_label. */
20841
20842 case 'L':
20843 /* Write second word of DImode or DFmode reference. Works on register
20844 or non-indexed memory only. */
20845 if (REG_P (x))
20846 fputs (reg_names[REGNO (x) + 1], file);
20847 else if (MEM_P (x))
20848 {
20849 machine_mode mode = GET_MODE (x);
20850 /* Handle possible auto-increment. Since it is pre-increment and
20851 we have already done it, we can just use an offset of word. */
20852 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20853 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20854 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20855 UNITS_PER_WORD));
20856 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20857 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20858 UNITS_PER_WORD));
20859 else
20860 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20861 UNITS_PER_WORD),
20862 0));
20863
20864 if (small_data_operand (x, GET_MODE (x)))
20865 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20866 reg_names[SMALL_DATA_REG]);
20867 }
20868 return;
20869
20870 case 'N': /* Unused */
20871 /* Write the number of elements in the vector times 4. */
20872 if (GET_CODE (x) != PARALLEL)
20873 output_operand_lossage ("invalid %%N value");
20874 else
20875 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20876 return;
20877
20878 case 'O': /* Unused */
20879 /* Similar, but subtract 1 first. */
20880 if (GET_CODE (x) != PARALLEL)
20881 output_operand_lossage ("invalid %%O value");
20882 else
20883 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20884 return;
20885
20886 case 'p':
20887 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20888 if (! INT_P (x)
20889 || INTVAL (x) < 0
20890 || (i = exact_log2 (INTVAL (x))) < 0)
20891 output_operand_lossage ("invalid %%p value");
20892 else
20893 fprintf (file, "%d", i);
20894 return;
20895
20896 case 'P':
20897 /* The operand must be an indirect memory reference. The result
20898 is the register name. */
20899 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20900 || REGNO (XEXP (x, 0)) >= 32)
20901 output_operand_lossage ("invalid %%P value");
20902 else
20903 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20904 return;
20905
20906 case 'q':
20907 /* This outputs the logical code corresponding to a boolean
20908 expression. The expression may have one or both operands
20909 negated (if one, only the first one). For condition register
20910 logical operations, it will also treat the negated
20911 CR codes as NOTs, but not handle NOTs of them. */
20912 {
20913 const char *const *t = 0;
20914 const char *s;
20915 enum rtx_code code = GET_CODE (x);
20916 static const char * const tbl[3][3] = {
20917 { "and", "andc", "nor" },
20918 { "or", "orc", "nand" },
20919 { "xor", "eqv", "xor" } };
20920
20921 if (code == AND)
20922 t = tbl[0];
20923 else if (code == IOR)
20924 t = tbl[1];
20925 else if (code == XOR)
20926 t = tbl[2];
20927 else
20928 output_operand_lossage ("invalid %%q value");
20929
20930 if (GET_CODE (XEXP (x, 0)) != NOT)
20931 s = t[0];
20932 else
20933 {
20934 if (GET_CODE (XEXP (x, 1)) == NOT)
20935 s = t[2];
20936 else
20937 s = t[1];
20938 }
20939
20940 fputs (s, file);
20941 }
20942 return;
20943
20944 case 'Q':
20945 if (! TARGET_MFCRF)
20946 return;
20947 fputc (',', file);
20948 /* FALLTHRU */
20949
20950 case 'R':
20951 /* X is a CR register. Print the mask for `mtcrf'. */
20952 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20953 output_operand_lossage ("invalid %%R value");
20954 else
20955 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20956 return;
20957
20958 case 's':
20959 /* Low 5 bits of 32 - value */
20960 if (! INT_P (x))
20961 output_operand_lossage ("invalid %%s value");
20962 else
20963 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20964 return;
20965
20966 case 't':
20967 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20968 if (!REG_P (x) || GET_MODE (x) != CCmode)
20969 {
20970 output_operand_lossage ("invalid %%t value");
20971 return;
20972 }
20973
20974 /* Bit 3 is OV bit. */
20975 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20976
20977 /* If we want bit 31, write a shift count of zero, not 32. */
20978 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20979 return;
20980
20981 case 'T':
20982 /* Print the symbolic name of a branch target register. */
20983 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20984 x = XVECEXP (x, 0, 0);
20985 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
20986 && REGNO (x) != CTR_REGNO))
20987 output_operand_lossage ("invalid %%T value");
20988 else if (REGNO (x) == LR_REGNO)
20989 fputs ("lr", file);
20990 else
20991 fputs ("ctr", file);
20992 return;
20993
20994 case 'u':
20995 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20996 for use in unsigned operand. */
20997 if (! INT_P (x))
20998 {
20999 output_operand_lossage ("invalid %%u value");
21000 return;
21001 }
21002
21003 uval = INTVAL (x);
21004 if ((uval & 0xffff) == 0)
21005 uval >>= 16;
21006
21007 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21008 return;
21009
21010 case 'v':
21011 /* High-order 16 bits of constant for use in signed operand. */
21012 if (! INT_P (x))
21013 output_operand_lossage ("invalid %%v value");
21014 else
21015 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21016 (INTVAL (x) >> 16) & 0xffff);
21017 return;
21018
21019 case 'U':
21020 /* Print `u' if this has an auto-increment or auto-decrement. */
21021 if (MEM_P (x)
21022 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21023 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21024 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21025 putc ('u', file);
21026 return;
21027
21028 case 'V':
21029 /* Print the trap code for this operand. */
21030 switch (GET_CODE (x))
21031 {
21032 case EQ:
21033 fputs ("eq", file); /* 4 */
21034 break;
21035 case NE:
21036 fputs ("ne", file); /* 24 */
21037 break;
21038 case LT:
21039 fputs ("lt", file); /* 16 */
21040 break;
21041 case LE:
21042 fputs ("le", file); /* 20 */
21043 break;
21044 case GT:
21045 fputs ("gt", file); /* 8 */
21046 break;
21047 case GE:
21048 fputs ("ge", file); /* 12 */
21049 break;
21050 case LTU:
21051 fputs ("llt", file); /* 2 */
21052 break;
21053 case LEU:
21054 fputs ("lle", file); /* 6 */
21055 break;
21056 case GTU:
21057 fputs ("lgt", file); /* 1 */
21058 break;
21059 case GEU:
21060 fputs ("lge", file); /* 5 */
21061 break;
21062 default:
21063 output_operand_lossage ("invalid %%V value");
21064 }
21065 break;
21066
21067 case 'w':
21068 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21069 normally. */
21070 if (INT_P (x))
21071 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21072 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21073 else
21074 print_operand (file, x, 0);
21075 return;
21076
21077 case 'x':
21078 /* X is a FPR or Altivec register used in a VSX context. */
21079 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21080 output_operand_lossage ("invalid %%x value");
21081 else
21082 {
21083 int reg = REGNO (x);
21084 int vsx_reg = (FP_REGNO_P (reg)
21085 ? reg - 32
21086 : reg - FIRST_ALTIVEC_REGNO + 32);
21087
21088 #ifdef TARGET_REGNAMES
21089 if (TARGET_REGNAMES)
21090 fprintf (file, "%%vs%d", vsx_reg);
21091 else
21092 #endif
21093 fprintf (file, "%d", vsx_reg);
21094 }
21095 return;
21096
21097 case 'X':
21098 if (MEM_P (x)
21099 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21100 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21101 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21102 putc ('x', file);
21103 return;
21104
21105 case 'Y':
21106 /* Like 'L', for third word of TImode/PTImode */
21107 if (REG_P (x))
21108 fputs (reg_names[REGNO (x) + 2], file);
21109 else if (MEM_P (x))
21110 {
21111 machine_mode mode = GET_MODE (x);
21112 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21113 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21114 output_address (mode, plus_constant (Pmode,
21115 XEXP (XEXP (x, 0), 0), 8));
21116 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21117 output_address (mode, plus_constant (Pmode,
21118 XEXP (XEXP (x, 0), 0), 8));
21119 else
21120 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21121 if (small_data_operand (x, GET_MODE (x)))
21122 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21123 reg_names[SMALL_DATA_REG]);
21124 }
21125 return;
21126
21127 case 'z':
21128 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21129 x = XVECEXP (x, 0, 1);
21130 /* X is a SYMBOL_REF. Write out the name preceded by a
21131 period and without any trailing data in brackets. Used for function
21132 names. If we are configured for System V (or the embedded ABI) on
21133 the PowerPC, do not emit the period, since those systems do not use
21134 TOCs and the like. */
21135 if (!SYMBOL_REF_P (x))
21136 {
21137 output_operand_lossage ("invalid %%z value");
21138 return;
21139 }
21140
21141 /* For macho, check to see if we need a stub. */
21142 if (TARGET_MACHO)
21143 {
21144 const char *name = XSTR (x, 0);
21145 #if TARGET_MACHO
21146 if (darwin_emit_branch_islands
21147 && MACHOPIC_INDIRECT
21148 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21149 name = machopic_indirection_name (x, /*stub_p=*/true);
21150 #endif
21151 assemble_name (file, name);
21152 }
21153 else if (!DOT_SYMBOLS)
21154 assemble_name (file, XSTR (x, 0));
21155 else
21156 rs6000_output_function_entry (file, XSTR (x, 0));
21157 return;
21158
21159 case 'Z':
21160 /* Like 'L', for last word of TImode/PTImode. */
21161 if (REG_P (x))
21162 fputs (reg_names[REGNO (x) + 3], file);
21163 else if (MEM_P (x))
21164 {
21165 machine_mode mode = GET_MODE (x);
21166 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21167 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21168 output_address (mode, plus_constant (Pmode,
21169 XEXP (XEXP (x, 0), 0), 12));
21170 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21171 output_address (mode, plus_constant (Pmode,
21172 XEXP (XEXP (x, 0), 0), 12));
21173 else
21174 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21175 if (small_data_operand (x, GET_MODE (x)))
21176 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21177 reg_names[SMALL_DATA_REG]);
21178 }
21179 return;
21180
21181 /* Print AltiVec memory operand. */
21182 case 'y':
21183 {
21184 rtx tmp;
21185
21186 gcc_assert (MEM_P (x));
21187
21188 tmp = XEXP (x, 0);
21189
21190 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21191 && GET_CODE (tmp) == AND
21192 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21193 && INTVAL (XEXP (tmp, 1)) == -16)
21194 tmp = XEXP (tmp, 0);
21195 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21196 && GET_CODE (tmp) == PRE_MODIFY)
21197 tmp = XEXP (tmp, 1);
21198 if (REG_P (tmp))
21199 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21200 else
21201 {
21202 if (GET_CODE (tmp) != PLUS
21203 || !REG_P (XEXP (tmp, 0))
21204 || !REG_P (XEXP (tmp, 1)))
21205 {
21206 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21207 break;
21208 }
21209
21210 if (REGNO (XEXP (tmp, 0)) == 0)
21211 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21212 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21213 else
21214 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21215 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21216 }
21217 break;
21218 }
21219
21220 case 0:
21221 if (REG_P (x))
21222 fprintf (file, "%s", reg_names[REGNO (x)]);
21223 else if (MEM_P (x))
21224 {
21225 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21226 know the width from the mode. */
21227 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21228 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21229 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21230 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21231 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21232 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21233 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21234 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21235 else
21236 output_address (GET_MODE (x), XEXP (x, 0));
21237 }
21238 else if (toc_relative_expr_p (x, false,
21239 &tocrel_base_oac, &tocrel_offset_oac))
21240 /* This hack along with a corresponding hack in
21241 rs6000_output_addr_const_extra arranges to output addends
21242 where the assembler expects to find them. eg.
21243 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21244 without this hack would be output as "x@toc+4". We
21245 want "x+4@toc". */
21246 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21247 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21248 output_addr_const (file, XVECEXP (x, 0, 0));
21249 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21250 output_addr_const (file, XVECEXP (x, 0, 1));
21251 else
21252 output_addr_const (file, x);
21253 return;
21254
21255 case '&':
21256 if (const char *name = get_some_local_dynamic_name ())
21257 assemble_name (file, name);
21258 else
21259 output_operand_lossage ("'%%&' used without any "
21260 "local dynamic TLS references");
21261 return;
21262
21263 default:
21264 output_operand_lossage ("invalid %%xn code");
21265 }
21266 }
21267 \f
21268 /* Print the address of an operand. */
21269
21270 void
21271 print_operand_address (FILE *file, rtx x)
21272 {
21273 if (REG_P (x))
21274 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21275 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21276 || GET_CODE (x) == LABEL_REF)
21277 {
21278 output_addr_const (file, x);
21279 if (small_data_operand (x, GET_MODE (x)))
21280 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21281 reg_names[SMALL_DATA_REG]);
21282 else
21283 gcc_assert (!TARGET_TOC);
21284 }
21285 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21286 && REG_P (XEXP (x, 1)))
21287 {
21288 if (REGNO (XEXP (x, 0)) == 0)
21289 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21290 reg_names[ REGNO (XEXP (x, 0)) ]);
21291 else
21292 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21293 reg_names[ REGNO (XEXP (x, 1)) ]);
21294 }
21295 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21296 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21297 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21298 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21299 #if TARGET_MACHO
21300 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21301 && CONSTANT_P (XEXP (x, 1)))
21302 {
21303 fprintf (file, "lo16(");
21304 output_addr_const (file, XEXP (x, 1));
21305 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21306 }
21307 #endif
21308 #if TARGET_ELF
21309 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21310 && CONSTANT_P (XEXP (x, 1)))
21311 {
21312 output_addr_const (file, XEXP (x, 1));
21313 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21314 }
21315 #endif
21316 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21317 {
21318 /* This hack along with a corresponding hack in
21319 rs6000_output_addr_const_extra arranges to output addends
21320 where the assembler expects to find them. eg.
21321 (lo_sum (reg 9)
21322 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21323 without this hack would be output as "x@toc+8@l(9)". We
21324 want "x+8@toc@l(9)". */
21325 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21326 if (GET_CODE (x) == LO_SUM)
21327 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21328 else
21329 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21330 }
21331 else
21332 output_addr_const (file, x);
21333 }
21334 \f
21335 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21336
21337 static bool
21338 rs6000_output_addr_const_extra (FILE *file, rtx x)
21339 {
21340 if (GET_CODE (x) == UNSPEC)
21341 switch (XINT (x, 1))
21342 {
21343 case UNSPEC_TOCREL:
21344 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21345 && REG_P (XVECEXP (x, 0, 1))
21346 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21347 output_addr_const (file, XVECEXP (x, 0, 0));
21348 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21349 {
21350 if (INTVAL (tocrel_offset_oac) >= 0)
21351 fprintf (file, "+");
21352 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21353 }
21354 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21355 {
21356 putc ('-', file);
21357 assemble_name (file, toc_label_name);
21358 need_toc_init = 1;
21359 }
21360 else if (TARGET_ELF)
21361 fputs ("@toc", file);
21362 return true;
21363
21364 #if TARGET_MACHO
21365 case UNSPEC_MACHOPIC_OFFSET:
21366 output_addr_const (file, XVECEXP (x, 0, 0));
21367 putc ('-', file);
21368 machopic_output_function_base_name (file);
21369 return true;
21370 #endif
21371 }
21372 return false;
21373 }
21374 \f
21375 /* Target hook for assembling integer objects. The PowerPC version has
21376 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21377 is defined. It also needs to handle DI-mode objects on 64-bit
21378 targets. */
21379
21380 static bool
21381 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21382 {
21383 #ifdef RELOCATABLE_NEEDS_FIXUP
21384 /* Special handling for SI values. */
21385 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21386 {
21387 static int recurse = 0;
21388
21389 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21390 the .fixup section. Since the TOC section is already relocated, we
21391 don't need to mark it here. We used to skip the text section, but it
21392 should never be valid for relocated addresses to be placed in the text
21393 section. */
21394 if (DEFAULT_ABI == ABI_V4
21395 && (TARGET_RELOCATABLE || flag_pic > 1)
21396 && in_section != toc_section
21397 && !recurse
21398 && !CONST_SCALAR_INT_P (x)
21399 && CONSTANT_P (x))
21400 {
21401 char buf[256];
21402
21403 recurse = 1;
21404 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21405 fixuplabelno++;
21406 ASM_OUTPUT_LABEL (asm_out_file, buf);
21407 fprintf (asm_out_file, "\t.long\t(");
21408 output_addr_const (asm_out_file, x);
21409 fprintf (asm_out_file, ")@fixup\n");
21410 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21411 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21412 fprintf (asm_out_file, "\t.long\t");
21413 assemble_name (asm_out_file, buf);
21414 fprintf (asm_out_file, "\n\t.previous\n");
21415 recurse = 0;
21416 return true;
21417 }
21418 /* Remove initial .'s to turn a -mcall-aixdesc function
21419 address into the address of the descriptor, not the function
21420 itself. */
21421 else if (GET_CODE (x) == SYMBOL_REF
21422 && XSTR (x, 0)[0] == '.'
21423 && DEFAULT_ABI == ABI_AIX)
21424 {
21425 const char *name = XSTR (x, 0);
21426 while (*name == '.')
21427 name++;
21428
21429 fprintf (asm_out_file, "\t.long\t%s\n", name);
21430 return true;
21431 }
21432 }
21433 #endif /* RELOCATABLE_NEEDS_FIXUP */
21434 return default_assemble_integer (x, size, aligned_p);
21435 }
21436
21437 /* Return a template string for assembly to emit when making an
21438 external call. FUNOP is the call mem argument operand number. */
21439
21440 static const char *
21441 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21442 {
21443 /* -Wformat-overflow workaround, without which gcc thinks that %u
21444 might produce 10 digits. */
21445 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21446
21447 char arg[12];
21448 arg[0] = 0;
21449 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21450 {
21451 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21452 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21453 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21454 sprintf (arg, "(%%&@tlsld)");
21455 else
21456 gcc_unreachable ();
21457 }
21458
21459 /* The magic 32768 offset here corresponds to the offset of
21460 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21461 char z[11];
21462 sprintf (z, "%%z%u%s", funop,
21463 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21464 ? "+32768" : ""));
21465
21466 static char str[32]; /* 2 spare */
21467 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21468 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21469 sibcall ? "" : "\n\tnop");
21470 else if (DEFAULT_ABI == ABI_V4)
21471 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21472 flag_pic ? "@plt" : "");
21473 else
21474 gcc_unreachable ();
21475 return str;
21476 }
21477
21478 const char *
21479 rs6000_call_template (rtx *operands, unsigned int funop)
21480 {
21481 return rs6000_call_template_1 (operands, funop, false);
21482 }
21483
21484 const char *
21485 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21486 {
21487 return rs6000_call_template_1 (operands, funop, true);
21488 }
21489
21490 /* As above, for indirect calls. */
21491
21492 static const char *
21493 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21494 bool sibcall)
21495 {
21496 /* -Wformat-overflow workaround, without which gcc thinks that %u
21497 might produce 10 digits. */
21498 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21499
21500 static char str[144]; /* 1 spare */
21501 char *s = str;
21502 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21503
21504 if (DEFAULT_ABI == ABI_AIX)
21505 s += sprintf (s,
21506 "l%s 2,%%%u\n\t",
21507 ptrload, funop + 2);
21508
21509 /* We don't need the extra code to stop indirect call speculation if
21510 calling via LR. */
21511 bool speculate = (TARGET_MACHO
21512 || rs6000_speculate_indirect_jumps
21513 || (REG_P (operands[funop])
21514 && REGNO (operands[funop]) == LR_REGNO));
21515
21516 if (!TARGET_MACHO && HAVE_AS_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21517 {
21518 const char *rel64 = TARGET_64BIT ? "64" : "";
21519 char tls[29];
21520 tls[0] = 0;
21521 if (GET_CODE (operands[funop + 1]) == UNSPEC)
21522 {
21523 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21524 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21525 rel64, funop + 1);
21526 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21527 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21528 rel64);
21529 else
21530 gcc_unreachable ();
21531 }
21532
21533 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21534 && flag_pic == 2 ? "+32768" : "");
21535 if (!speculate)
21536 {
21537 s += sprintf (s,
21538 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21539 tls, rel64, funop, addend);
21540 s += sprintf (s, "crset 2\n\t");
21541 }
21542 s += sprintf (s,
21543 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21544 tls, rel64, funop, addend);
21545 }
21546 else if (!speculate)
21547 s += sprintf (s, "crset 2\n\t");
21548
21549 if (DEFAULT_ABI == ABI_AIX)
21550 {
21551 if (speculate)
21552 sprintf (s,
21553 "b%%T%ul\n\t"
21554 "l%s 2,%%%u(1)",
21555 funop, ptrload, funop + 3);
21556 else
21557 sprintf (s,
21558 "beq%%T%ul-\n\t"
21559 "l%s 2,%%%u(1)",
21560 funop, ptrload, funop + 3);
21561 }
21562 else if (DEFAULT_ABI == ABI_ELFv2)
21563 {
21564 if (speculate)
21565 sprintf (s,
21566 "b%%T%ul\n\t"
21567 "l%s 2,%%%u(1)",
21568 funop, ptrload, funop + 2);
21569 else
21570 sprintf (s,
21571 "beq%%T%ul-\n\t"
21572 "l%s 2,%%%u(1)",
21573 funop, ptrload, funop + 2);
21574 }
21575 else
21576 {
21577 if (speculate)
21578 sprintf (s,
21579 "b%%T%u%s",
21580 funop, sibcall ? "" : "l");
21581 else
21582 sprintf (s,
21583 "beq%%T%u%s-%s",
21584 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21585 }
21586 return str;
21587 }
21588
21589 const char *
21590 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21591 {
21592 return rs6000_indirect_call_template_1 (operands, funop, false);
21593 }
21594
21595 const char *
21596 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21597 {
21598 return rs6000_indirect_call_template_1 (operands, funop, true);
21599 }
21600
21601 #if HAVE_AS_PLTSEQ
21602 /* Output indirect call insns.
21603 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21604 const char *
21605 rs6000_pltseq_template (rtx *operands, int which)
21606 {
21607 const char *rel64 = TARGET_64BIT ? "64" : "";
21608 char tls[28];
21609 tls[0] = 0;
21610 if (GET_CODE (operands[3]) == UNSPEC)
21611 {
21612 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21613 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21614 rel64);
21615 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21616 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21617 rel64);
21618 else
21619 gcc_unreachable ();
21620 }
21621
21622 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21623 static char str[96]; /* 15 spare */
21624 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21625 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21626 && flag_pic == 2 ? "+32768" : "");
21627 switch (which)
21628 {
21629 case 0:
21630 sprintf (str,
21631 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21632 "st%s",
21633 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21634 break;
21635 case 1:
21636 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21637 sprintf (str,
21638 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21639 "lis %%0,0",
21640 tls, off, rel64);
21641 else
21642 sprintf (str,
21643 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21644 "addis %%0,%%1,0",
21645 tls, off, rel64, addend);
21646 break;
21647 case 2:
21648 sprintf (str,
21649 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21650 "l%s %%0,0(%%1)",
21651 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21652 TARGET_64BIT ? "d" : "wz");
21653 break;
21654 case 3:
21655 sprintf (str,
21656 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21657 "mtctr %%1",
21658 tls, rel64, addend);
21659 break;
21660 default:
21661 gcc_unreachable ();
21662 }
21663 return str;
21664 }
21665 #endif
21666
21667 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21668 /* Emit an assembler directive to set symbol visibility for DECL to
21669 VISIBILITY_TYPE. */
21670
21671 static void
21672 rs6000_assemble_visibility (tree decl, int vis)
21673 {
21674 if (TARGET_XCOFF)
21675 return;
21676
21677 /* Functions need to have their entry point symbol visibility set as
21678 well as their descriptor symbol visibility. */
21679 if (DEFAULT_ABI == ABI_AIX
21680 && DOT_SYMBOLS
21681 && TREE_CODE (decl) == FUNCTION_DECL)
21682 {
21683 static const char * const visibility_types[] = {
21684 NULL, "protected", "hidden", "internal"
21685 };
21686
21687 const char *name, *type;
21688
21689 name = ((* targetm.strip_name_encoding)
21690 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21691 type = visibility_types[vis];
21692
21693 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21694 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21695 }
21696 else
21697 default_assemble_visibility (decl, vis);
21698 }
21699 #endif
21700 \f
21701 enum rtx_code
21702 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21703 {
21704 /* Reversal of FP compares takes care -- an ordered compare
21705 becomes an unordered compare and vice versa. */
21706 if (mode == CCFPmode
21707 && (!flag_finite_math_only
21708 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21709 || code == UNEQ || code == LTGT))
21710 return reverse_condition_maybe_unordered (code);
21711 else
21712 return reverse_condition (code);
21713 }
21714
21715 /* Generate a compare for CODE. Return a brand-new rtx that
21716 represents the result of the compare. */
21717
21718 static rtx
21719 rs6000_generate_compare (rtx cmp, machine_mode mode)
21720 {
21721 machine_mode comp_mode;
21722 rtx compare_result;
21723 enum rtx_code code = GET_CODE (cmp);
21724 rtx op0 = XEXP (cmp, 0);
21725 rtx op1 = XEXP (cmp, 1);
21726
21727 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21728 comp_mode = CCmode;
21729 else if (FLOAT_MODE_P (mode))
21730 comp_mode = CCFPmode;
21731 else if (code == GTU || code == LTU
21732 || code == GEU || code == LEU)
21733 comp_mode = CCUNSmode;
21734 else if ((code == EQ || code == NE)
21735 && unsigned_reg_p (op0)
21736 && (unsigned_reg_p (op1)
21737 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21738 /* These are unsigned values, perhaps there will be a later
21739 ordering compare that can be shared with this one. */
21740 comp_mode = CCUNSmode;
21741 else
21742 comp_mode = CCmode;
21743
21744 /* If we have an unsigned compare, make sure we don't have a signed value as
21745 an immediate. */
21746 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21747 && INTVAL (op1) < 0)
21748 {
21749 op0 = copy_rtx_if_shared (op0);
21750 op1 = force_reg (GET_MODE (op0), op1);
21751 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21752 }
21753
21754 /* First, the compare. */
21755 compare_result = gen_reg_rtx (comp_mode);
21756
21757 /* IEEE 128-bit support in VSX registers when we do not have hardware
21758 support. */
21759 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21760 {
21761 rtx libfunc = NULL_RTX;
21762 bool check_nan = false;
21763 rtx dest;
21764
21765 switch (code)
21766 {
21767 case EQ:
21768 case NE:
21769 libfunc = optab_libfunc (eq_optab, mode);
21770 break;
21771
21772 case GT:
21773 case GE:
21774 libfunc = optab_libfunc (ge_optab, mode);
21775 break;
21776
21777 case LT:
21778 case LE:
21779 libfunc = optab_libfunc (le_optab, mode);
21780 break;
21781
21782 case UNORDERED:
21783 case ORDERED:
21784 libfunc = optab_libfunc (unord_optab, mode);
21785 code = (code == UNORDERED) ? NE : EQ;
21786 break;
21787
21788 case UNGE:
21789 case UNGT:
21790 check_nan = true;
21791 libfunc = optab_libfunc (ge_optab, mode);
21792 code = (code == UNGE) ? GE : GT;
21793 break;
21794
21795 case UNLE:
21796 case UNLT:
21797 check_nan = true;
21798 libfunc = optab_libfunc (le_optab, mode);
21799 code = (code == UNLE) ? LE : LT;
21800 break;
21801
21802 case UNEQ:
21803 case LTGT:
21804 check_nan = true;
21805 libfunc = optab_libfunc (eq_optab, mode);
21806 code = (code = UNEQ) ? EQ : NE;
21807 break;
21808
21809 default:
21810 gcc_unreachable ();
21811 }
21812
21813 gcc_assert (libfunc);
21814
21815 if (!check_nan)
21816 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21817 SImode, op0, mode, op1, mode);
21818
21819 /* The library signals an exception for signalling NaNs, so we need to
21820 handle isgreater, etc. by first checking isordered. */
21821 else
21822 {
21823 rtx ne_rtx, normal_dest, unord_dest;
21824 rtx unord_func = optab_libfunc (unord_optab, mode);
21825 rtx join_label = gen_label_rtx ();
21826 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21827 rtx unord_cmp = gen_reg_rtx (comp_mode);
21828
21829
21830 /* Test for either value being a NaN. */
21831 gcc_assert (unord_func);
21832 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21833 SImode, op0, mode, op1, mode);
21834
21835 /* Set value (0) if either value is a NaN, and jump to the join
21836 label. */
21837 dest = gen_reg_rtx (SImode);
21838 emit_move_insn (dest, const1_rtx);
21839 emit_insn (gen_rtx_SET (unord_cmp,
21840 gen_rtx_COMPARE (comp_mode, unord_dest,
21841 const0_rtx)));
21842
21843 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21844 emit_jump_insn (gen_rtx_SET (pc_rtx,
21845 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21846 join_ref,
21847 pc_rtx)));
21848
21849 /* Do the normal comparison, knowing that the values are not
21850 NaNs. */
21851 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21852 SImode, op0, mode, op1, mode);
21853
21854 emit_insn (gen_cstoresi4 (dest,
21855 gen_rtx_fmt_ee (code, SImode, normal_dest,
21856 const0_rtx),
21857 normal_dest, const0_rtx));
21858
21859 /* Join NaN and non-Nan paths. Compare dest against 0. */
21860 emit_label (join_label);
21861 code = NE;
21862 }
21863
21864 emit_insn (gen_rtx_SET (compare_result,
21865 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21866 }
21867
21868 else
21869 {
21870 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21871 CLOBBERs to match cmptf_internal2 pattern. */
21872 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21873 && FLOAT128_IBM_P (GET_MODE (op0))
21874 && TARGET_HARD_FLOAT)
21875 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21876 gen_rtvec (10,
21877 gen_rtx_SET (compare_result,
21878 gen_rtx_COMPARE (comp_mode, op0, op1)),
21879 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21880 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21881 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21882 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21883 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21884 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21885 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21886 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21887 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21888 else if (GET_CODE (op1) == UNSPEC
21889 && XINT (op1, 1) == UNSPEC_SP_TEST)
21890 {
21891 rtx op1b = XVECEXP (op1, 0, 0);
21892 comp_mode = CCEQmode;
21893 compare_result = gen_reg_rtx (CCEQmode);
21894 if (TARGET_64BIT)
21895 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21896 else
21897 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21898 }
21899 else
21900 emit_insn (gen_rtx_SET (compare_result,
21901 gen_rtx_COMPARE (comp_mode, op0, op1)));
21902 }
21903
21904 /* Some kinds of FP comparisons need an OR operation;
21905 under flag_finite_math_only we don't bother. */
21906 if (FLOAT_MODE_P (mode)
21907 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21908 && !flag_finite_math_only
21909 && (code == LE || code == GE
21910 || code == UNEQ || code == LTGT
21911 || code == UNGT || code == UNLT))
21912 {
21913 enum rtx_code or1, or2;
21914 rtx or1_rtx, or2_rtx, compare2_rtx;
21915 rtx or_result = gen_reg_rtx (CCEQmode);
21916
21917 switch (code)
21918 {
21919 case LE: or1 = LT; or2 = EQ; break;
21920 case GE: or1 = GT; or2 = EQ; break;
21921 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21922 case LTGT: or1 = LT; or2 = GT; break;
21923 case UNGT: or1 = UNORDERED; or2 = GT; break;
21924 case UNLT: or1 = UNORDERED; or2 = LT; break;
21925 default: gcc_unreachable ();
21926 }
21927 validate_condition_mode (or1, comp_mode);
21928 validate_condition_mode (or2, comp_mode);
21929 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21930 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21931 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21932 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21933 const_true_rtx);
21934 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21935
21936 compare_result = or_result;
21937 code = EQ;
21938 }
21939
21940 validate_condition_mode (code, GET_MODE (compare_result));
21941
21942 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21943 }
21944
21945 \f
21946 /* Return the diagnostic message string if the binary operation OP is
21947 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21948
21949 static const char*
21950 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21951 const_tree type1,
21952 const_tree type2)
21953 {
21954 machine_mode mode1 = TYPE_MODE (type1);
21955 machine_mode mode2 = TYPE_MODE (type2);
21956
21957 /* For complex modes, use the inner type. */
21958 if (COMPLEX_MODE_P (mode1))
21959 mode1 = GET_MODE_INNER (mode1);
21960
21961 if (COMPLEX_MODE_P (mode2))
21962 mode2 = GET_MODE_INNER (mode2);
21963
21964 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21965 double to intermix unless -mfloat128-convert. */
21966 if (mode1 == mode2)
21967 return NULL;
21968
21969 if (!TARGET_FLOAT128_CVT)
21970 {
21971 if ((mode1 == KFmode && mode2 == IFmode)
21972 || (mode1 == IFmode && mode2 == KFmode))
21973 return N_("__float128 and __ibm128 cannot be used in the same "
21974 "expression");
21975
21976 if (TARGET_IEEEQUAD
21977 && ((mode1 == IFmode && mode2 == TFmode)
21978 || (mode1 == TFmode && mode2 == IFmode)))
21979 return N_("__ibm128 and long double cannot be used in the same "
21980 "expression");
21981
21982 if (!TARGET_IEEEQUAD
21983 && ((mode1 == KFmode && mode2 == TFmode)
21984 || (mode1 == TFmode && mode2 == KFmode)))
21985 return N_("__float128 and long double cannot be used in the same "
21986 "expression");
21987 }
21988
21989 return NULL;
21990 }
21991
21992 \f
21993 /* Expand floating point conversion to/from __float128 and __ibm128. */
21994
21995 void
21996 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21997 {
21998 machine_mode dest_mode = GET_MODE (dest);
21999 machine_mode src_mode = GET_MODE (src);
22000 convert_optab cvt = unknown_optab;
22001 bool do_move = false;
22002 rtx libfunc = NULL_RTX;
22003 rtx dest2;
22004 typedef rtx (*rtx_2func_t) (rtx, rtx);
22005 rtx_2func_t hw_convert = (rtx_2func_t)0;
22006 size_t kf_or_tf;
22007
22008 struct hw_conv_t {
22009 rtx_2func_t from_df;
22010 rtx_2func_t from_sf;
22011 rtx_2func_t from_si_sign;
22012 rtx_2func_t from_si_uns;
22013 rtx_2func_t from_di_sign;
22014 rtx_2func_t from_di_uns;
22015 rtx_2func_t to_df;
22016 rtx_2func_t to_sf;
22017 rtx_2func_t to_si_sign;
22018 rtx_2func_t to_si_uns;
22019 rtx_2func_t to_di_sign;
22020 rtx_2func_t to_di_uns;
22021 } hw_conversions[2] = {
22022 /* convertions to/from KFmode */
22023 {
22024 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22025 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22026 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22027 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22028 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22029 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22030 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22031 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22032 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22033 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22034 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22035 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22036 },
22037
22038 /* convertions to/from TFmode */
22039 {
22040 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22041 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22042 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22043 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22044 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22045 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22046 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22047 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22048 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22049 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22050 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22051 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22052 },
22053 };
22054
22055 if (dest_mode == src_mode)
22056 gcc_unreachable ();
22057
22058 /* Eliminate memory operations. */
22059 if (MEM_P (src))
22060 src = force_reg (src_mode, src);
22061
22062 if (MEM_P (dest))
22063 {
22064 rtx tmp = gen_reg_rtx (dest_mode);
22065 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22066 rs6000_emit_move (dest, tmp, dest_mode);
22067 return;
22068 }
22069
22070 /* Convert to IEEE 128-bit floating point. */
22071 if (FLOAT128_IEEE_P (dest_mode))
22072 {
22073 if (dest_mode == KFmode)
22074 kf_or_tf = 0;
22075 else if (dest_mode == TFmode)
22076 kf_or_tf = 1;
22077 else
22078 gcc_unreachable ();
22079
22080 switch (src_mode)
22081 {
22082 case E_DFmode:
22083 cvt = sext_optab;
22084 hw_convert = hw_conversions[kf_or_tf].from_df;
22085 break;
22086
22087 case E_SFmode:
22088 cvt = sext_optab;
22089 hw_convert = hw_conversions[kf_or_tf].from_sf;
22090 break;
22091
22092 case E_KFmode:
22093 case E_IFmode:
22094 case E_TFmode:
22095 if (FLOAT128_IBM_P (src_mode))
22096 cvt = sext_optab;
22097 else
22098 do_move = true;
22099 break;
22100
22101 case E_SImode:
22102 if (unsigned_p)
22103 {
22104 cvt = ufloat_optab;
22105 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22106 }
22107 else
22108 {
22109 cvt = sfloat_optab;
22110 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22111 }
22112 break;
22113
22114 case E_DImode:
22115 if (unsigned_p)
22116 {
22117 cvt = ufloat_optab;
22118 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22119 }
22120 else
22121 {
22122 cvt = sfloat_optab;
22123 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22124 }
22125 break;
22126
22127 default:
22128 gcc_unreachable ();
22129 }
22130 }
22131
22132 /* Convert from IEEE 128-bit floating point. */
22133 else if (FLOAT128_IEEE_P (src_mode))
22134 {
22135 if (src_mode == KFmode)
22136 kf_or_tf = 0;
22137 else if (src_mode == TFmode)
22138 kf_or_tf = 1;
22139 else
22140 gcc_unreachable ();
22141
22142 switch (dest_mode)
22143 {
22144 case E_DFmode:
22145 cvt = trunc_optab;
22146 hw_convert = hw_conversions[kf_or_tf].to_df;
22147 break;
22148
22149 case E_SFmode:
22150 cvt = trunc_optab;
22151 hw_convert = hw_conversions[kf_or_tf].to_sf;
22152 break;
22153
22154 case E_KFmode:
22155 case E_IFmode:
22156 case E_TFmode:
22157 if (FLOAT128_IBM_P (dest_mode))
22158 cvt = trunc_optab;
22159 else
22160 do_move = true;
22161 break;
22162
22163 case E_SImode:
22164 if (unsigned_p)
22165 {
22166 cvt = ufix_optab;
22167 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22168 }
22169 else
22170 {
22171 cvt = sfix_optab;
22172 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22173 }
22174 break;
22175
22176 case E_DImode:
22177 if (unsigned_p)
22178 {
22179 cvt = ufix_optab;
22180 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22181 }
22182 else
22183 {
22184 cvt = sfix_optab;
22185 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22186 }
22187 break;
22188
22189 default:
22190 gcc_unreachable ();
22191 }
22192 }
22193
22194 /* Both IBM format. */
22195 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22196 do_move = true;
22197
22198 else
22199 gcc_unreachable ();
22200
22201 /* Handle conversion between TFmode/KFmode/IFmode. */
22202 if (do_move)
22203 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22204
22205 /* Handle conversion if we have hardware support. */
22206 else if (TARGET_FLOAT128_HW && hw_convert)
22207 emit_insn ((hw_convert) (dest, src));
22208
22209 /* Call an external function to do the conversion. */
22210 else if (cvt != unknown_optab)
22211 {
22212 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22213 gcc_assert (libfunc != NULL_RTX);
22214
22215 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22216 src, src_mode);
22217
22218 gcc_assert (dest2 != NULL_RTX);
22219 if (!rtx_equal_p (dest, dest2))
22220 emit_move_insn (dest, dest2);
22221 }
22222
22223 else
22224 gcc_unreachable ();
22225
22226 return;
22227 }
22228
22229 \f
22230 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22231 can be used as that dest register. Return the dest register. */
22232
22233 rtx
22234 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22235 {
22236 if (op2 == const0_rtx)
22237 return op1;
22238
22239 if (GET_CODE (scratch) == SCRATCH)
22240 scratch = gen_reg_rtx (mode);
22241
22242 if (logical_operand (op2, mode))
22243 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22244 else
22245 emit_insn (gen_rtx_SET (scratch,
22246 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22247
22248 return scratch;
22249 }
22250
22251 void
22252 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22253 {
22254 rtx condition_rtx;
22255 machine_mode op_mode;
22256 enum rtx_code cond_code;
22257 rtx result = operands[0];
22258
22259 condition_rtx = rs6000_generate_compare (operands[1], mode);
22260 cond_code = GET_CODE (condition_rtx);
22261
22262 if (cond_code == NE
22263 || cond_code == GE || cond_code == LE
22264 || cond_code == GEU || cond_code == LEU
22265 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22266 {
22267 rtx not_result = gen_reg_rtx (CCEQmode);
22268 rtx not_op, rev_cond_rtx;
22269 machine_mode cc_mode;
22270
22271 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22272
22273 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22274 SImode, XEXP (condition_rtx, 0), const0_rtx);
22275 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22276 emit_insn (gen_rtx_SET (not_result, not_op));
22277 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22278 }
22279
22280 op_mode = GET_MODE (XEXP (operands[1], 0));
22281 if (op_mode == VOIDmode)
22282 op_mode = GET_MODE (XEXP (operands[1], 1));
22283
22284 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22285 {
22286 PUT_MODE (condition_rtx, DImode);
22287 convert_move (result, condition_rtx, 0);
22288 }
22289 else
22290 {
22291 PUT_MODE (condition_rtx, SImode);
22292 emit_insn (gen_rtx_SET (result, condition_rtx));
22293 }
22294 }
22295
22296 /* Emit a branch of kind CODE to location LOC. */
22297
22298 void
22299 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22300 {
22301 rtx condition_rtx, loc_ref;
22302
22303 condition_rtx = rs6000_generate_compare (operands[0], mode);
22304 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22305 emit_jump_insn (gen_rtx_SET (pc_rtx,
22306 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22307 loc_ref, pc_rtx)));
22308 }
22309
22310 /* Return the string to output a conditional branch to LABEL, which is
22311 the operand template of the label, or NULL if the branch is really a
22312 conditional return.
22313
22314 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22315 condition code register and its mode specifies what kind of
22316 comparison we made.
22317
22318 REVERSED is nonzero if we should reverse the sense of the comparison.
22319
22320 INSN is the insn. */
22321
22322 char *
22323 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22324 {
22325 static char string[64];
22326 enum rtx_code code = GET_CODE (op);
22327 rtx cc_reg = XEXP (op, 0);
22328 machine_mode mode = GET_MODE (cc_reg);
22329 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22330 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22331 int really_reversed = reversed ^ need_longbranch;
22332 char *s = string;
22333 const char *ccode;
22334 const char *pred;
22335 rtx note;
22336
22337 validate_condition_mode (code, mode);
22338
22339 /* Work out which way this really branches. We could use
22340 reverse_condition_maybe_unordered here always but this
22341 makes the resulting assembler clearer. */
22342 if (really_reversed)
22343 {
22344 /* Reversal of FP compares takes care -- an ordered compare
22345 becomes an unordered compare and vice versa. */
22346 if (mode == CCFPmode)
22347 code = reverse_condition_maybe_unordered (code);
22348 else
22349 code = reverse_condition (code);
22350 }
22351
22352 switch (code)
22353 {
22354 /* Not all of these are actually distinct opcodes, but
22355 we distinguish them for clarity of the resulting assembler. */
22356 case NE: case LTGT:
22357 ccode = "ne"; break;
22358 case EQ: case UNEQ:
22359 ccode = "eq"; break;
22360 case GE: case GEU:
22361 ccode = "ge"; break;
22362 case GT: case GTU: case UNGT:
22363 ccode = "gt"; break;
22364 case LE: case LEU:
22365 ccode = "le"; break;
22366 case LT: case LTU: case UNLT:
22367 ccode = "lt"; break;
22368 case UNORDERED: ccode = "un"; break;
22369 case ORDERED: ccode = "nu"; break;
22370 case UNGE: ccode = "nl"; break;
22371 case UNLE: ccode = "ng"; break;
22372 default:
22373 gcc_unreachable ();
22374 }
22375
22376 /* Maybe we have a guess as to how likely the branch is. */
22377 pred = "";
22378 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22379 if (note != NULL_RTX)
22380 {
22381 /* PROB is the difference from 50%. */
22382 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22383 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22384
22385 /* Only hint for highly probable/improbable branches on newer cpus when
22386 we have real profile data, as static prediction overrides processor
22387 dynamic prediction. For older cpus we may as well always hint, but
22388 assume not taken for branches that are very close to 50% as a
22389 mispredicted taken branch is more expensive than a
22390 mispredicted not-taken branch. */
22391 if (rs6000_always_hint
22392 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22393 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22394 && br_prob_note_reliable_p (note)))
22395 {
22396 if (abs (prob) > REG_BR_PROB_BASE / 20
22397 && ((prob > 0) ^ need_longbranch))
22398 pred = "+";
22399 else
22400 pred = "-";
22401 }
22402 }
22403
22404 if (label == NULL)
22405 s += sprintf (s, "b%slr%s ", ccode, pred);
22406 else
22407 s += sprintf (s, "b%s%s ", ccode, pred);
22408
22409 /* We need to escape any '%' characters in the reg_names string.
22410 Assume they'd only be the first character.... */
22411 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22412 *s++ = '%';
22413 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22414
22415 if (label != NULL)
22416 {
22417 /* If the branch distance was too far, we may have to use an
22418 unconditional branch to go the distance. */
22419 if (need_longbranch)
22420 s += sprintf (s, ",$+8\n\tb %s", label);
22421 else
22422 s += sprintf (s, ",%s", label);
22423 }
22424
22425 return string;
22426 }
22427
22428 /* Return insn for VSX or Altivec comparisons. */
22429
22430 static rtx
22431 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22432 {
22433 rtx mask;
22434 machine_mode mode = GET_MODE (op0);
22435
22436 switch (code)
22437 {
22438 default:
22439 break;
22440
22441 case GE:
22442 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22443 return NULL_RTX;
22444 /* FALLTHRU */
22445
22446 case EQ:
22447 case GT:
22448 case GTU:
22449 case ORDERED:
22450 case UNORDERED:
22451 case UNEQ:
22452 case LTGT:
22453 mask = gen_reg_rtx (mode);
22454 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22455 return mask;
22456 }
22457
22458 return NULL_RTX;
22459 }
22460
22461 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22462 DMODE is expected destination mode. This is a recursive function. */
22463
22464 static rtx
22465 rs6000_emit_vector_compare (enum rtx_code rcode,
22466 rtx op0, rtx op1,
22467 machine_mode dmode)
22468 {
22469 rtx mask;
22470 bool swap_operands = false;
22471 bool try_again = false;
22472
22473 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22474 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22475
22476 /* See if the comparison works as is. */
22477 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22478 if (mask)
22479 return mask;
22480
22481 switch (rcode)
22482 {
22483 case LT:
22484 rcode = GT;
22485 swap_operands = true;
22486 try_again = true;
22487 break;
22488 case LTU:
22489 rcode = GTU;
22490 swap_operands = true;
22491 try_again = true;
22492 break;
22493 case NE:
22494 case UNLE:
22495 case UNLT:
22496 case UNGE:
22497 case UNGT:
22498 /* Invert condition and try again.
22499 e.g., A != B becomes ~(A==B). */
22500 {
22501 enum rtx_code rev_code;
22502 enum insn_code nor_code;
22503 rtx mask2;
22504
22505 rev_code = reverse_condition_maybe_unordered (rcode);
22506 if (rev_code == UNKNOWN)
22507 return NULL_RTX;
22508
22509 nor_code = optab_handler (one_cmpl_optab, dmode);
22510 if (nor_code == CODE_FOR_nothing)
22511 return NULL_RTX;
22512
22513 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22514 if (!mask2)
22515 return NULL_RTX;
22516
22517 mask = gen_reg_rtx (dmode);
22518 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22519 return mask;
22520 }
22521 break;
22522 case GE:
22523 case GEU:
22524 case LE:
22525 case LEU:
22526 /* Try GT/GTU/LT/LTU OR EQ */
22527 {
22528 rtx c_rtx, eq_rtx;
22529 enum insn_code ior_code;
22530 enum rtx_code new_code;
22531
22532 switch (rcode)
22533 {
22534 case GE:
22535 new_code = GT;
22536 break;
22537
22538 case GEU:
22539 new_code = GTU;
22540 break;
22541
22542 case LE:
22543 new_code = LT;
22544 break;
22545
22546 case LEU:
22547 new_code = LTU;
22548 break;
22549
22550 default:
22551 gcc_unreachable ();
22552 }
22553
22554 ior_code = optab_handler (ior_optab, dmode);
22555 if (ior_code == CODE_FOR_nothing)
22556 return NULL_RTX;
22557
22558 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22559 if (!c_rtx)
22560 return NULL_RTX;
22561
22562 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22563 if (!eq_rtx)
22564 return NULL_RTX;
22565
22566 mask = gen_reg_rtx (dmode);
22567 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22568 return mask;
22569 }
22570 break;
22571 default:
22572 return NULL_RTX;
22573 }
22574
22575 if (try_again)
22576 {
22577 if (swap_operands)
22578 std::swap (op0, op1);
22579
22580 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22581 if (mask)
22582 return mask;
22583 }
22584
22585 /* You only get two chances. */
22586 return NULL_RTX;
22587 }
22588
22589 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22590 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22591 operands for the relation operation COND. */
22592
22593 int
22594 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22595 rtx cond, rtx cc_op0, rtx cc_op1)
22596 {
22597 machine_mode dest_mode = GET_MODE (dest);
22598 machine_mode mask_mode = GET_MODE (cc_op0);
22599 enum rtx_code rcode = GET_CODE (cond);
22600 machine_mode cc_mode = CCmode;
22601 rtx mask;
22602 rtx cond2;
22603 bool invert_move = false;
22604
22605 if (VECTOR_UNIT_NONE_P (dest_mode))
22606 return 0;
22607
22608 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22609 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22610
22611 switch (rcode)
22612 {
22613 /* Swap operands if we can, and fall back to doing the operation as
22614 specified, and doing a NOR to invert the test. */
22615 case NE:
22616 case UNLE:
22617 case UNLT:
22618 case UNGE:
22619 case UNGT:
22620 /* Invert condition and try again.
22621 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22622 invert_move = true;
22623 rcode = reverse_condition_maybe_unordered (rcode);
22624 if (rcode == UNKNOWN)
22625 return 0;
22626 break;
22627
22628 case GE:
22629 case LE:
22630 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22631 {
22632 /* Invert condition to avoid compound test. */
22633 invert_move = true;
22634 rcode = reverse_condition (rcode);
22635 }
22636 break;
22637
22638 case GTU:
22639 case GEU:
22640 case LTU:
22641 case LEU:
22642 /* Mark unsigned tests with CCUNSmode. */
22643 cc_mode = CCUNSmode;
22644
22645 /* Invert condition to avoid compound test if necessary. */
22646 if (rcode == GEU || rcode == LEU)
22647 {
22648 invert_move = true;
22649 rcode = reverse_condition (rcode);
22650 }
22651 break;
22652
22653 default:
22654 break;
22655 }
22656
22657 /* Get the vector mask for the given relational operations. */
22658 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22659
22660 if (!mask)
22661 return 0;
22662
22663 if (invert_move)
22664 std::swap (op_true, op_false);
22665
22666 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22667 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22668 && (GET_CODE (op_true) == CONST_VECTOR
22669 || GET_CODE (op_false) == CONST_VECTOR))
22670 {
22671 rtx constant_0 = CONST0_RTX (dest_mode);
22672 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22673
22674 if (op_true == constant_m1 && op_false == constant_0)
22675 {
22676 emit_move_insn (dest, mask);
22677 return 1;
22678 }
22679
22680 else if (op_true == constant_0 && op_false == constant_m1)
22681 {
22682 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22683 return 1;
22684 }
22685
22686 /* If we can't use the vector comparison directly, perhaps we can use
22687 the mask for the true or false fields, instead of loading up a
22688 constant. */
22689 if (op_true == constant_m1)
22690 op_true = mask;
22691
22692 if (op_false == constant_0)
22693 op_false = mask;
22694 }
22695
22696 if (!REG_P (op_true) && !SUBREG_P (op_true))
22697 op_true = force_reg (dest_mode, op_true);
22698
22699 if (!REG_P (op_false) && !SUBREG_P (op_false))
22700 op_false = force_reg (dest_mode, op_false);
22701
22702 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22703 CONST0_RTX (dest_mode));
22704 emit_insn (gen_rtx_SET (dest,
22705 gen_rtx_IF_THEN_ELSE (dest_mode,
22706 cond2,
22707 op_true,
22708 op_false)));
22709 return 1;
22710 }
22711
22712 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22713 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22714 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22715 hardware has no such operation. */
22716
22717 static int
22718 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22719 {
22720 enum rtx_code code = GET_CODE (op);
22721 rtx op0 = XEXP (op, 0);
22722 rtx op1 = XEXP (op, 1);
22723 machine_mode compare_mode = GET_MODE (op0);
22724 machine_mode result_mode = GET_MODE (dest);
22725 bool max_p = false;
22726
22727 if (result_mode != compare_mode)
22728 return 0;
22729
22730 if (code == GE || code == GT)
22731 max_p = true;
22732 else if (code == LE || code == LT)
22733 max_p = false;
22734 else
22735 return 0;
22736
22737 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22738 ;
22739
22740 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22741 max_p = !max_p;
22742
22743 else
22744 return 0;
22745
22746 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22747 return 1;
22748 }
22749
22750 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22751 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22752 operands of the last comparison is nonzero/true, FALSE_COND if it is
22753 zero/false. Return 0 if the hardware has no such operation. */
22754
22755 static int
22756 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22757 {
22758 enum rtx_code code = GET_CODE (op);
22759 rtx op0 = XEXP (op, 0);
22760 rtx op1 = XEXP (op, 1);
22761 machine_mode result_mode = GET_MODE (dest);
22762 rtx compare_rtx;
22763 rtx cmove_rtx;
22764 rtx clobber_rtx;
22765
22766 if (!can_create_pseudo_p ())
22767 return 0;
22768
22769 switch (code)
22770 {
22771 case EQ:
22772 case GE:
22773 case GT:
22774 break;
22775
22776 case NE:
22777 case LT:
22778 case LE:
22779 code = swap_condition (code);
22780 std::swap (op0, op1);
22781 break;
22782
22783 default:
22784 return 0;
22785 }
22786
22787 /* Generate: [(parallel [(set (dest)
22788 (if_then_else (op (cmp1) (cmp2))
22789 (true)
22790 (false)))
22791 (clobber (scratch))])]. */
22792
22793 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22794 cmove_rtx = gen_rtx_SET (dest,
22795 gen_rtx_IF_THEN_ELSE (result_mode,
22796 compare_rtx,
22797 true_cond,
22798 false_cond));
22799
22800 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22801 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22802 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22803
22804 return 1;
22805 }
22806
22807 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22808 operands of the last comparison is nonzero/true, FALSE_COND if it
22809 is zero/false. Return 0 if the hardware has no such operation. */
22810
22811 int
22812 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22813 {
22814 enum rtx_code code = GET_CODE (op);
22815 rtx op0 = XEXP (op, 0);
22816 rtx op1 = XEXP (op, 1);
22817 machine_mode compare_mode = GET_MODE (op0);
22818 machine_mode result_mode = GET_MODE (dest);
22819 rtx temp;
22820 bool is_against_zero;
22821
22822 /* These modes should always match. */
22823 if (GET_MODE (op1) != compare_mode
22824 /* In the isel case however, we can use a compare immediate, so
22825 op1 may be a small constant. */
22826 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22827 return 0;
22828 if (GET_MODE (true_cond) != result_mode)
22829 return 0;
22830 if (GET_MODE (false_cond) != result_mode)
22831 return 0;
22832
22833 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22834 if (TARGET_P9_MINMAX
22835 && (compare_mode == SFmode || compare_mode == DFmode)
22836 && (result_mode == SFmode || result_mode == DFmode))
22837 {
22838 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22839 return 1;
22840
22841 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22842 return 1;
22843 }
22844
22845 /* Don't allow using floating point comparisons for integer results for
22846 now. */
22847 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22848 return 0;
22849
22850 /* First, work out if the hardware can do this at all, or
22851 if it's too slow.... */
22852 if (!FLOAT_MODE_P (compare_mode))
22853 {
22854 if (TARGET_ISEL)
22855 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22856 return 0;
22857 }
22858
22859 is_against_zero = op1 == CONST0_RTX (compare_mode);
22860
22861 /* A floating-point subtract might overflow, underflow, or produce
22862 an inexact result, thus changing the floating-point flags, so it
22863 can't be generated if we care about that. It's safe if one side
22864 of the construct is zero, since then no subtract will be
22865 generated. */
22866 if (SCALAR_FLOAT_MODE_P (compare_mode)
22867 && flag_trapping_math && ! is_against_zero)
22868 return 0;
22869
22870 /* Eliminate half of the comparisons by switching operands, this
22871 makes the remaining code simpler. */
22872 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22873 || code == LTGT || code == LT || code == UNLE)
22874 {
22875 code = reverse_condition_maybe_unordered (code);
22876 temp = true_cond;
22877 true_cond = false_cond;
22878 false_cond = temp;
22879 }
22880
22881 /* UNEQ and LTGT take four instructions for a comparison with zero,
22882 it'll probably be faster to use a branch here too. */
22883 if (code == UNEQ && HONOR_NANS (compare_mode))
22884 return 0;
22885
22886 /* We're going to try to implement comparisons by performing
22887 a subtract, then comparing against zero. Unfortunately,
22888 Inf - Inf is NaN which is not zero, and so if we don't
22889 know that the operand is finite and the comparison
22890 would treat EQ different to UNORDERED, we can't do it. */
22891 if (HONOR_INFINITIES (compare_mode)
22892 && code != GT && code != UNGE
22893 && (GET_CODE (op1) != CONST_DOUBLE
22894 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22895 /* Constructs of the form (a OP b ? a : b) are safe. */
22896 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22897 || (! rtx_equal_p (op0, true_cond)
22898 && ! rtx_equal_p (op1, true_cond))))
22899 return 0;
22900
22901 /* At this point we know we can use fsel. */
22902
22903 /* Reduce the comparison to a comparison against zero. */
22904 if (! is_against_zero)
22905 {
22906 temp = gen_reg_rtx (compare_mode);
22907 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22908 op0 = temp;
22909 op1 = CONST0_RTX (compare_mode);
22910 }
22911
22912 /* If we don't care about NaNs we can reduce some of the comparisons
22913 down to faster ones. */
22914 if (! HONOR_NANS (compare_mode))
22915 switch (code)
22916 {
22917 case GT:
22918 code = LE;
22919 temp = true_cond;
22920 true_cond = false_cond;
22921 false_cond = temp;
22922 break;
22923 case UNGE:
22924 code = GE;
22925 break;
22926 case UNEQ:
22927 code = EQ;
22928 break;
22929 default:
22930 break;
22931 }
22932
22933 /* Now, reduce everything down to a GE. */
22934 switch (code)
22935 {
22936 case GE:
22937 break;
22938
22939 case LE:
22940 temp = gen_reg_rtx (compare_mode);
22941 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22942 op0 = temp;
22943 break;
22944
22945 case ORDERED:
22946 temp = gen_reg_rtx (compare_mode);
22947 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22948 op0 = temp;
22949 break;
22950
22951 case EQ:
22952 temp = gen_reg_rtx (compare_mode);
22953 emit_insn (gen_rtx_SET (temp,
22954 gen_rtx_NEG (compare_mode,
22955 gen_rtx_ABS (compare_mode, op0))));
22956 op0 = temp;
22957 break;
22958
22959 case UNGE:
22960 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22961 temp = gen_reg_rtx (result_mode);
22962 emit_insn (gen_rtx_SET (temp,
22963 gen_rtx_IF_THEN_ELSE (result_mode,
22964 gen_rtx_GE (VOIDmode,
22965 op0, op1),
22966 true_cond, false_cond)));
22967 false_cond = true_cond;
22968 true_cond = temp;
22969
22970 temp = gen_reg_rtx (compare_mode);
22971 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22972 op0 = temp;
22973 break;
22974
22975 case GT:
22976 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22977 temp = gen_reg_rtx (result_mode);
22978 emit_insn (gen_rtx_SET (temp,
22979 gen_rtx_IF_THEN_ELSE (result_mode,
22980 gen_rtx_GE (VOIDmode,
22981 op0, op1),
22982 true_cond, false_cond)));
22983 true_cond = false_cond;
22984 false_cond = temp;
22985
22986 temp = gen_reg_rtx (compare_mode);
22987 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22988 op0 = temp;
22989 break;
22990
22991 default:
22992 gcc_unreachable ();
22993 }
22994
22995 emit_insn (gen_rtx_SET (dest,
22996 gen_rtx_IF_THEN_ELSE (result_mode,
22997 gen_rtx_GE (VOIDmode,
22998 op0, op1),
22999 true_cond, false_cond)));
23000 return 1;
23001 }
23002
23003 /* Same as above, but for ints (isel). */
23004
23005 int
23006 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23007 {
23008 rtx condition_rtx, cr;
23009 machine_mode mode = GET_MODE (dest);
23010 enum rtx_code cond_code;
23011 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23012 bool signedp;
23013
23014 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23015 return 0;
23016
23017 /* We still have to do the compare, because isel doesn't do a
23018 compare, it just looks at the CRx bits set by a previous compare
23019 instruction. */
23020 condition_rtx = rs6000_generate_compare (op, mode);
23021 cond_code = GET_CODE (condition_rtx);
23022 cr = XEXP (condition_rtx, 0);
23023 signedp = GET_MODE (cr) == CCmode;
23024
23025 isel_func = (mode == SImode
23026 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23027 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23028
23029 switch (cond_code)
23030 {
23031 case LT: case GT: case LTU: case GTU: case EQ:
23032 /* isel handles these directly. */
23033 break;
23034
23035 default:
23036 /* We need to swap the sense of the comparison. */
23037 {
23038 std::swap (false_cond, true_cond);
23039 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23040 }
23041 break;
23042 }
23043
23044 false_cond = force_reg (mode, false_cond);
23045 if (true_cond != const0_rtx)
23046 true_cond = force_reg (mode, true_cond);
23047
23048 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23049
23050 return 1;
23051 }
23052
23053 void
23054 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23055 {
23056 machine_mode mode = GET_MODE (op0);
23057 enum rtx_code c;
23058 rtx target;
23059
23060 /* VSX/altivec have direct min/max insns. */
23061 if ((code == SMAX || code == SMIN)
23062 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23063 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23064 {
23065 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23066 return;
23067 }
23068
23069 if (code == SMAX || code == SMIN)
23070 c = GE;
23071 else
23072 c = GEU;
23073
23074 if (code == SMAX || code == UMAX)
23075 target = emit_conditional_move (dest, c, op0, op1, mode,
23076 op0, op1, mode, 0);
23077 else
23078 target = emit_conditional_move (dest, c, op0, op1, mode,
23079 op1, op0, mode, 0);
23080 gcc_assert (target);
23081 if (target != dest)
23082 emit_move_insn (dest, target);
23083 }
23084
23085 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23086 COND is true. Mark the jump as unlikely to be taken. */
23087
23088 static void
23089 emit_unlikely_jump (rtx cond, rtx label)
23090 {
23091 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23092 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23093 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23094 }
23095
23096 /* A subroutine of the atomic operation splitters. Emit a load-locked
23097 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23098 the zero_extend operation. */
23099
23100 static void
23101 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23102 {
23103 rtx (*fn) (rtx, rtx) = NULL;
23104
23105 switch (mode)
23106 {
23107 case E_QImode:
23108 fn = gen_load_lockedqi;
23109 break;
23110 case E_HImode:
23111 fn = gen_load_lockedhi;
23112 break;
23113 case E_SImode:
23114 if (GET_MODE (mem) == QImode)
23115 fn = gen_load_lockedqi_si;
23116 else if (GET_MODE (mem) == HImode)
23117 fn = gen_load_lockedhi_si;
23118 else
23119 fn = gen_load_lockedsi;
23120 break;
23121 case E_DImode:
23122 fn = gen_load_lockeddi;
23123 break;
23124 case E_TImode:
23125 fn = gen_load_lockedti;
23126 break;
23127 default:
23128 gcc_unreachable ();
23129 }
23130 emit_insn (fn (reg, mem));
23131 }
23132
23133 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23134 instruction in MODE. */
23135
23136 static void
23137 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23138 {
23139 rtx (*fn) (rtx, rtx, rtx) = NULL;
23140
23141 switch (mode)
23142 {
23143 case E_QImode:
23144 fn = gen_store_conditionalqi;
23145 break;
23146 case E_HImode:
23147 fn = gen_store_conditionalhi;
23148 break;
23149 case E_SImode:
23150 fn = gen_store_conditionalsi;
23151 break;
23152 case E_DImode:
23153 fn = gen_store_conditionaldi;
23154 break;
23155 case E_TImode:
23156 fn = gen_store_conditionalti;
23157 break;
23158 default:
23159 gcc_unreachable ();
23160 }
23161
23162 /* Emit sync before stwcx. to address PPC405 Erratum. */
23163 if (PPC405_ERRATUM77)
23164 emit_insn (gen_hwsync ());
23165
23166 emit_insn (fn (res, mem, val));
23167 }
23168
23169 /* Expand barriers before and after a load_locked/store_cond sequence. */
23170
23171 static rtx
23172 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23173 {
23174 rtx addr = XEXP (mem, 0);
23175
23176 if (!legitimate_indirect_address_p (addr, reload_completed)
23177 && !legitimate_indexed_address_p (addr, reload_completed))
23178 {
23179 addr = force_reg (Pmode, addr);
23180 mem = replace_equiv_address_nv (mem, addr);
23181 }
23182
23183 switch (model)
23184 {
23185 case MEMMODEL_RELAXED:
23186 case MEMMODEL_CONSUME:
23187 case MEMMODEL_ACQUIRE:
23188 break;
23189 case MEMMODEL_RELEASE:
23190 case MEMMODEL_ACQ_REL:
23191 emit_insn (gen_lwsync ());
23192 break;
23193 case MEMMODEL_SEQ_CST:
23194 emit_insn (gen_hwsync ());
23195 break;
23196 default:
23197 gcc_unreachable ();
23198 }
23199 return mem;
23200 }
23201
23202 static void
23203 rs6000_post_atomic_barrier (enum memmodel model)
23204 {
23205 switch (model)
23206 {
23207 case MEMMODEL_RELAXED:
23208 case MEMMODEL_CONSUME:
23209 case MEMMODEL_RELEASE:
23210 break;
23211 case MEMMODEL_ACQUIRE:
23212 case MEMMODEL_ACQ_REL:
23213 case MEMMODEL_SEQ_CST:
23214 emit_insn (gen_isync ());
23215 break;
23216 default:
23217 gcc_unreachable ();
23218 }
23219 }
23220
23221 /* A subroutine of the various atomic expanders. For sub-word operations,
23222 we must adjust things to operate on SImode. Given the original MEM,
23223 return a new aligned memory. Also build and return the quantities by
23224 which to shift and mask. */
23225
23226 static rtx
23227 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23228 {
23229 rtx addr, align, shift, mask, mem;
23230 HOST_WIDE_INT shift_mask;
23231 machine_mode mode = GET_MODE (orig_mem);
23232
23233 /* For smaller modes, we have to implement this via SImode. */
23234 shift_mask = (mode == QImode ? 0x18 : 0x10);
23235
23236 addr = XEXP (orig_mem, 0);
23237 addr = force_reg (GET_MODE (addr), addr);
23238
23239 /* Aligned memory containing subword. Generate a new memory. We
23240 do not want any of the existing MEM_ATTR data, as we're now
23241 accessing memory outside the original object. */
23242 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23243 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23244 mem = gen_rtx_MEM (SImode, align);
23245 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23246 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23247 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23248
23249 /* Shift amount for subword relative to aligned word. */
23250 shift = gen_reg_rtx (SImode);
23251 addr = gen_lowpart (SImode, addr);
23252 rtx tmp = gen_reg_rtx (SImode);
23253 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23254 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23255 if (BYTES_BIG_ENDIAN)
23256 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23257 shift, 1, OPTAB_LIB_WIDEN);
23258 *pshift = shift;
23259
23260 /* Mask for insertion. */
23261 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23262 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23263 *pmask = mask;
23264
23265 return mem;
23266 }
23267
23268 /* A subroutine of the various atomic expanders. For sub-word operands,
23269 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23270
23271 static rtx
23272 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23273 {
23274 rtx x;
23275
23276 x = gen_reg_rtx (SImode);
23277 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23278 gen_rtx_NOT (SImode, mask),
23279 oldval)));
23280
23281 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23282
23283 return x;
23284 }
23285
23286 /* A subroutine of the various atomic expanders. For sub-word operands,
23287 extract WIDE to NARROW via SHIFT. */
23288
23289 static void
23290 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23291 {
23292 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23293 wide, 1, OPTAB_LIB_WIDEN);
23294 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23295 }
23296
23297 /* Expand an atomic compare and swap operation. */
23298
23299 void
23300 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23301 {
23302 rtx boolval, retval, mem, oldval, newval, cond;
23303 rtx label1, label2, x, mask, shift;
23304 machine_mode mode, orig_mode;
23305 enum memmodel mod_s, mod_f;
23306 bool is_weak;
23307
23308 boolval = operands[0];
23309 retval = operands[1];
23310 mem = operands[2];
23311 oldval = operands[3];
23312 newval = operands[4];
23313 is_weak = (INTVAL (operands[5]) != 0);
23314 mod_s = memmodel_base (INTVAL (operands[6]));
23315 mod_f = memmodel_base (INTVAL (operands[7]));
23316 orig_mode = mode = GET_MODE (mem);
23317
23318 mask = shift = NULL_RTX;
23319 if (mode == QImode || mode == HImode)
23320 {
23321 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23322 lwarx and shift/mask operations. With power8, we need to do the
23323 comparison in SImode, but the store is still done in QI/HImode. */
23324 oldval = convert_modes (SImode, mode, oldval, 1);
23325
23326 if (!TARGET_SYNC_HI_QI)
23327 {
23328 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23329
23330 /* Shift and mask OLDVAL into position with the word. */
23331 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23332 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23333
23334 /* Shift and mask NEWVAL into position within the word. */
23335 newval = convert_modes (SImode, mode, newval, 1);
23336 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23337 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23338 }
23339
23340 /* Prepare to adjust the return value. */
23341 retval = gen_reg_rtx (SImode);
23342 mode = SImode;
23343 }
23344 else if (reg_overlap_mentioned_p (retval, oldval))
23345 oldval = copy_to_reg (oldval);
23346
23347 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23348 oldval = copy_to_mode_reg (mode, oldval);
23349
23350 if (reg_overlap_mentioned_p (retval, newval))
23351 newval = copy_to_reg (newval);
23352
23353 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23354
23355 label1 = NULL_RTX;
23356 if (!is_weak)
23357 {
23358 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23359 emit_label (XEXP (label1, 0));
23360 }
23361 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23362
23363 emit_load_locked (mode, retval, mem);
23364
23365 x = retval;
23366 if (mask)
23367 x = expand_simple_binop (SImode, AND, retval, mask,
23368 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23369
23370 cond = gen_reg_rtx (CCmode);
23371 /* If we have TImode, synthesize a comparison. */
23372 if (mode != TImode)
23373 x = gen_rtx_COMPARE (CCmode, x, oldval);
23374 else
23375 {
23376 rtx xor1_result = gen_reg_rtx (DImode);
23377 rtx xor2_result = gen_reg_rtx (DImode);
23378 rtx or_result = gen_reg_rtx (DImode);
23379 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23380 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23381 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23382 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23383
23384 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23385 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23386 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23387 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23388 }
23389
23390 emit_insn (gen_rtx_SET (cond, x));
23391
23392 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23393 emit_unlikely_jump (x, label2);
23394
23395 x = newval;
23396 if (mask)
23397 x = rs6000_mask_atomic_subword (retval, newval, mask);
23398
23399 emit_store_conditional (orig_mode, cond, mem, x);
23400
23401 if (!is_weak)
23402 {
23403 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23404 emit_unlikely_jump (x, label1);
23405 }
23406
23407 if (!is_mm_relaxed (mod_f))
23408 emit_label (XEXP (label2, 0));
23409
23410 rs6000_post_atomic_barrier (mod_s);
23411
23412 if (is_mm_relaxed (mod_f))
23413 emit_label (XEXP (label2, 0));
23414
23415 if (shift)
23416 rs6000_finish_atomic_subword (operands[1], retval, shift);
23417 else if (mode != GET_MODE (operands[1]))
23418 convert_move (operands[1], retval, 1);
23419
23420 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23421 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23422 emit_insn (gen_rtx_SET (boolval, x));
23423 }
23424
23425 /* Expand an atomic exchange operation. */
23426
23427 void
23428 rs6000_expand_atomic_exchange (rtx operands[])
23429 {
23430 rtx retval, mem, val, cond;
23431 machine_mode mode;
23432 enum memmodel model;
23433 rtx label, x, mask, shift;
23434
23435 retval = operands[0];
23436 mem = operands[1];
23437 val = operands[2];
23438 model = memmodel_base (INTVAL (operands[3]));
23439 mode = GET_MODE (mem);
23440
23441 mask = shift = NULL_RTX;
23442 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23443 {
23444 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23445
23446 /* Shift and mask VAL into position with the word. */
23447 val = convert_modes (SImode, mode, val, 1);
23448 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23449 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23450
23451 /* Prepare to adjust the return value. */
23452 retval = gen_reg_rtx (SImode);
23453 mode = SImode;
23454 }
23455
23456 mem = rs6000_pre_atomic_barrier (mem, model);
23457
23458 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23459 emit_label (XEXP (label, 0));
23460
23461 emit_load_locked (mode, retval, mem);
23462
23463 x = val;
23464 if (mask)
23465 x = rs6000_mask_atomic_subword (retval, val, mask);
23466
23467 cond = gen_reg_rtx (CCmode);
23468 emit_store_conditional (mode, cond, mem, x);
23469
23470 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23471 emit_unlikely_jump (x, label);
23472
23473 rs6000_post_atomic_barrier (model);
23474
23475 if (shift)
23476 rs6000_finish_atomic_subword (operands[0], retval, shift);
23477 }
23478
23479 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23480 to perform. MEM is the memory on which to operate. VAL is the second
23481 operand of the binary operator. BEFORE and AFTER are optional locations to
23482 return the value of MEM either before of after the operation. MODEL_RTX
23483 is a CONST_INT containing the memory model to use. */
23484
23485 void
23486 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23487 rtx orig_before, rtx orig_after, rtx model_rtx)
23488 {
23489 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23490 machine_mode mode = GET_MODE (mem);
23491 machine_mode store_mode = mode;
23492 rtx label, x, cond, mask, shift;
23493 rtx before = orig_before, after = orig_after;
23494
23495 mask = shift = NULL_RTX;
23496 /* On power8, we want to use SImode for the operation. On previous systems,
23497 use the operation in a subword and shift/mask to get the proper byte or
23498 halfword. */
23499 if (mode == QImode || mode == HImode)
23500 {
23501 if (TARGET_SYNC_HI_QI)
23502 {
23503 val = convert_modes (SImode, mode, val, 1);
23504
23505 /* Prepare to adjust the return value. */
23506 before = gen_reg_rtx (SImode);
23507 if (after)
23508 after = gen_reg_rtx (SImode);
23509 mode = SImode;
23510 }
23511 else
23512 {
23513 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23514
23515 /* Shift and mask VAL into position with the word. */
23516 val = convert_modes (SImode, mode, val, 1);
23517 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23518 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23519
23520 switch (code)
23521 {
23522 case IOR:
23523 case XOR:
23524 /* We've already zero-extended VAL. That is sufficient to
23525 make certain that it does not affect other bits. */
23526 mask = NULL;
23527 break;
23528
23529 case AND:
23530 /* If we make certain that all of the other bits in VAL are
23531 set, that will be sufficient to not affect other bits. */
23532 x = gen_rtx_NOT (SImode, mask);
23533 x = gen_rtx_IOR (SImode, x, val);
23534 emit_insn (gen_rtx_SET (val, x));
23535 mask = NULL;
23536 break;
23537
23538 case NOT:
23539 case PLUS:
23540 case MINUS:
23541 /* These will all affect bits outside the field and need
23542 adjustment via MASK within the loop. */
23543 break;
23544
23545 default:
23546 gcc_unreachable ();
23547 }
23548
23549 /* Prepare to adjust the return value. */
23550 before = gen_reg_rtx (SImode);
23551 if (after)
23552 after = gen_reg_rtx (SImode);
23553 store_mode = mode = SImode;
23554 }
23555 }
23556
23557 mem = rs6000_pre_atomic_barrier (mem, model);
23558
23559 label = gen_label_rtx ();
23560 emit_label (label);
23561 label = gen_rtx_LABEL_REF (VOIDmode, label);
23562
23563 if (before == NULL_RTX)
23564 before = gen_reg_rtx (mode);
23565
23566 emit_load_locked (mode, before, mem);
23567
23568 if (code == NOT)
23569 {
23570 x = expand_simple_binop (mode, AND, before, val,
23571 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23572 after = expand_simple_unop (mode, NOT, x, after, 1);
23573 }
23574 else
23575 {
23576 after = expand_simple_binop (mode, code, before, val,
23577 after, 1, OPTAB_LIB_WIDEN);
23578 }
23579
23580 x = after;
23581 if (mask)
23582 {
23583 x = expand_simple_binop (SImode, AND, after, mask,
23584 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23585 x = rs6000_mask_atomic_subword (before, x, mask);
23586 }
23587 else if (store_mode != mode)
23588 x = convert_modes (store_mode, mode, x, 1);
23589
23590 cond = gen_reg_rtx (CCmode);
23591 emit_store_conditional (store_mode, cond, mem, x);
23592
23593 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23594 emit_unlikely_jump (x, label);
23595
23596 rs6000_post_atomic_barrier (model);
23597
23598 if (shift)
23599 {
23600 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23601 then do the calcuations in a SImode register. */
23602 if (orig_before)
23603 rs6000_finish_atomic_subword (orig_before, before, shift);
23604 if (orig_after)
23605 rs6000_finish_atomic_subword (orig_after, after, shift);
23606 }
23607 else if (store_mode != mode)
23608 {
23609 /* QImode/HImode on machines with lbarx/lharx where we do the native
23610 operation and then do the calcuations in a SImode register. */
23611 if (orig_before)
23612 convert_move (orig_before, before, 1);
23613 if (orig_after)
23614 convert_move (orig_after, after, 1);
23615 }
23616 else if (orig_after && after != orig_after)
23617 emit_move_insn (orig_after, after);
23618 }
23619
23620 /* Emit instructions to move SRC to DST. Called by splitters for
23621 multi-register moves. It will emit at most one instruction for
23622 each register that is accessed; that is, it won't emit li/lis pairs
23623 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23624 register. */
23625
23626 void
23627 rs6000_split_multireg_move (rtx dst, rtx src)
23628 {
23629 /* The register number of the first register being moved. */
23630 int reg;
23631 /* The mode that is to be moved. */
23632 machine_mode mode;
23633 /* The mode that the move is being done in, and its size. */
23634 machine_mode reg_mode;
23635 int reg_mode_size;
23636 /* The number of registers that will be moved. */
23637 int nregs;
23638
23639 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23640 mode = GET_MODE (dst);
23641 nregs = hard_regno_nregs (reg, mode);
23642 if (FP_REGNO_P (reg))
23643 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23644 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23645 else if (ALTIVEC_REGNO_P (reg))
23646 reg_mode = V16QImode;
23647 else
23648 reg_mode = word_mode;
23649 reg_mode_size = GET_MODE_SIZE (reg_mode);
23650
23651 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23652
23653 /* TDmode residing in FP registers is special, since the ISA requires that
23654 the lower-numbered word of a register pair is always the most significant
23655 word, even in little-endian mode. This does not match the usual subreg
23656 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23657 the appropriate constituent registers "by hand" in little-endian mode.
23658
23659 Note we do not need to check for destructive overlap here since TDmode
23660 can only reside in even/odd register pairs. */
23661 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23662 {
23663 rtx p_src, p_dst;
23664 int i;
23665
23666 for (i = 0; i < nregs; i++)
23667 {
23668 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23669 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23670 else
23671 p_src = simplify_gen_subreg (reg_mode, src, mode,
23672 i * reg_mode_size);
23673
23674 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23675 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23676 else
23677 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23678 i * reg_mode_size);
23679
23680 emit_insn (gen_rtx_SET (p_dst, p_src));
23681 }
23682
23683 return;
23684 }
23685
23686 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23687 {
23688 /* Move register range backwards, if we might have destructive
23689 overlap. */
23690 int i;
23691 for (i = nregs - 1; i >= 0; i--)
23692 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23693 i * reg_mode_size),
23694 simplify_gen_subreg (reg_mode, src, mode,
23695 i * reg_mode_size)));
23696 }
23697 else
23698 {
23699 int i;
23700 int j = -1;
23701 bool used_update = false;
23702 rtx restore_basereg = NULL_RTX;
23703
23704 if (MEM_P (src) && INT_REGNO_P (reg))
23705 {
23706 rtx breg;
23707
23708 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23709 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23710 {
23711 rtx delta_rtx;
23712 breg = XEXP (XEXP (src, 0), 0);
23713 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23714 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23715 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23716 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23717 src = replace_equiv_address (src, breg);
23718 }
23719 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23720 {
23721 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23722 {
23723 rtx basereg = XEXP (XEXP (src, 0), 0);
23724 if (TARGET_UPDATE)
23725 {
23726 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23727 emit_insn (gen_rtx_SET (ndst,
23728 gen_rtx_MEM (reg_mode,
23729 XEXP (src, 0))));
23730 used_update = true;
23731 }
23732 else
23733 emit_insn (gen_rtx_SET (basereg,
23734 XEXP (XEXP (src, 0), 1)));
23735 src = replace_equiv_address (src, basereg);
23736 }
23737 else
23738 {
23739 rtx basereg = gen_rtx_REG (Pmode, reg);
23740 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23741 src = replace_equiv_address (src, basereg);
23742 }
23743 }
23744
23745 breg = XEXP (src, 0);
23746 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23747 breg = XEXP (breg, 0);
23748
23749 /* If the base register we are using to address memory is
23750 also a destination reg, then change that register last. */
23751 if (REG_P (breg)
23752 && REGNO (breg) >= REGNO (dst)
23753 && REGNO (breg) < REGNO (dst) + nregs)
23754 j = REGNO (breg) - REGNO (dst);
23755 }
23756 else if (MEM_P (dst) && INT_REGNO_P (reg))
23757 {
23758 rtx breg;
23759
23760 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23761 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23762 {
23763 rtx delta_rtx;
23764 breg = XEXP (XEXP (dst, 0), 0);
23765 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23766 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23767 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23768
23769 /* We have to update the breg before doing the store.
23770 Use store with update, if available. */
23771
23772 if (TARGET_UPDATE)
23773 {
23774 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23775 emit_insn (TARGET_32BIT
23776 ? (TARGET_POWERPC64
23777 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23778 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23779 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23780 used_update = true;
23781 }
23782 else
23783 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23784 dst = replace_equiv_address (dst, breg);
23785 }
23786 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23787 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23788 {
23789 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23790 {
23791 rtx basereg = XEXP (XEXP (dst, 0), 0);
23792 if (TARGET_UPDATE)
23793 {
23794 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23795 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23796 XEXP (dst, 0)),
23797 nsrc));
23798 used_update = true;
23799 }
23800 else
23801 emit_insn (gen_rtx_SET (basereg,
23802 XEXP (XEXP (dst, 0), 1)));
23803 dst = replace_equiv_address (dst, basereg);
23804 }
23805 else
23806 {
23807 rtx basereg = XEXP (XEXP (dst, 0), 0);
23808 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23809 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23810 && REG_P (basereg)
23811 && REG_P (offsetreg)
23812 && REGNO (basereg) != REGNO (offsetreg));
23813 if (REGNO (basereg) == 0)
23814 {
23815 rtx tmp = offsetreg;
23816 offsetreg = basereg;
23817 basereg = tmp;
23818 }
23819 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23820 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23821 dst = replace_equiv_address (dst, basereg);
23822 }
23823 }
23824 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23825 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23826 }
23827
23828 for (i = 0; i < nregs; i++)
23829 {
23830 /* Calculate index to next subword. */
23831 ++j;
23832 if (j == nregs)
23833 j = 0;
23834
23835 /* If compiler already emitted move of first word by
23836 store with update, no need to do anything. */
23837 if (j == 0 && used_update)
23838 continue;
23839
23840 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23841 j * reg_mode_size),
23842 simplify_gen_subreg (reg_mode, src, mode,
23843 j * reg_mode_size)));
23844 }
23845 if (restore_basereg != NULL_RTX)
23846 emit_insn (restore_basereg);
23847 }
23848 }
23849
23850 \f
23851 /* This page contains routines that are used to determine what the
23852 function prologue and epilogue code will do and write them out. */
23853
23854 /* Determine whether the REG is really used. */
23855
23856 static bool
23857 save_reg_p (int reg)
23858 {
23859 /* We need to mark the PIC offset register live for the same conditions
23860 as it is set up, or otherwise it won't be saved before we clobber it. */
23861
23862 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23863 {
23864 /* When calling eh_return, we must return true for all the cases
23865 where conditional_register_usage marks the PIC offset reg
23866 call used. */
23867 if (TARGET_TOC && TARGET_MINIMAL_TOC
23868 && (crtl->calls_eh_return
23869 || df_regs_ever_live_p (reg)
23870 || !constant_pool_empty_p ()))
23871 return true;
23872
23873 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23874 && flag_pic)
23875 return true;
23876 }
23877
23878 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23879 }
23880
23881 /* Return the first fixed-point register that is required to be
23882 saved. 32 if none. */
23883
23884 int
23885 first_reg_to_save (void)
23886 {
23887 int first_reg;
23888
23889 /* Find lowest numbered live register. */
23890 for (first_reg = 13; first_reg <= 31; first_reg++)
23891 if (save_reg_p (first_reg))
23892 break;
23893
23894 #if TARGET_MACHO
23895 if (flag_pic
23896 && crtl->uses_pic_offset_table
23897 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
23898 return RS6000_PIC_OFFSET_TABLE_REGNUM;
23899 #endif
23900
23901 return first_reg;
23902 }
23903
23904 /* Similar, for FP regs. */
23905
23906 int
23907 first_fp_reg_to_save (void)
23908 {
23909 int first_reg;
23910
23911 /* Find lowest numbered live register. */
23912 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23913 if (save_reg_p (first_reg))
23914 break;
23915
23916 return first_reg;
23917 }
23918
23919 /* Similar, for AltiVec regs. */
23920
23921 static int
23922 first_altivec_reg_to_save (void)
23923 {
23924 int i;
23925
23926 /* Stack frame remains as is unless we are in AltiVec ABI. */
23927 if (! TARGET_ALTIVEC_ABI)
23928 return LAST_ALTIVEC_REGNO + 1;
23929
23930 /* On Darwin, the unwind routines are compiled without
23931 TARGET_ALTIVEC, and use save_world to save/restore the
23932 altivec registers when necessary. */
23933 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23934 && ! TARGET_ALTIVEC)
23935 return FIRST_ALTIVEC_REGNO + 20;
23936
23937 /* Find lowest numbered live register. */
23938 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23939 if (save_reg_p (i))
23940 break;
23941
23942 return i;
23943 }
23944
23945 /* Return a 32-bit mask of the AltiVec registers we need to set in
23946 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23947 the 32-bit word is 0. */
23948
23949 static unsigned int
23950 compute_vrsave_mask (void)
23951 {
23952 unsigned int i, mask = 0;
23953
23954 /* On Darwin, the unwind routines are compiled without
23955 TARGET_ALTIVEC, and use save_world to save/restore the
23956 call-saved altivec registers when necessary. */
23957 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23958 && ! TARGET_ALTIVEC)
23959 mask |= 0xFFF;
23960
23961 /* First, find out if we use _any_ altivec registers. */
23962 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
23963 if (df_regs_ever_live_p (i))
23964 mask |= ALTIVEC_REG_BIT (i);
23965
23966 if (mask == 0)
23967 return mask;
23968
23969 /* Next, remove the argument registers from the set. These must
23970 be in the VRSAVE mask set by the caller, so we don't need to add
23971 them in again. More importantly, the mask we compute here is
23972 used to generate CLOBBERs in the set_vrsave insn, and we do not
23973 wish the argument registers to die. */
23974 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
23975 mask &= ~ALTIVEC_REG_BIT (i);
23976
23977 /* Similarly, remove the return value from the set. */
23978 {
23979 bool yes = false;
23980 diddle_return_value (is_altivec_return_reg, &yes);
23981 if (yes)
23982 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
23983 }
23984
23985 return mask;
23986 }
23987
23988 /* For a very restricted set of circumstances, we can cut down the
23989 size of prologues/epilogues by calling our own save/restore-the-world
23990 routines. */
23991
23992 static void
23993 compute_save_world_info (rs6000_stack_t *info)
23994 {
23995 info->world_save_p = 1;
23996 info->world_save_p
23997 = (WORLD_SAVE_P (info)
23998 && DEFAULT_ABI == ABI_DARWIN
23999 && !cfun->has_nonlocal_label
24000 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24001 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24002 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24003 && info->cr_save_p);
24004
24005 /* This will not work in conjunction with sibcalls. Make sure there
24006 are none. (This check is expensive, but seldom executed.) */
24007 if (WORLD_SAVE_P (info))
24008 {
24009 rtx_insn *insn;
24010 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24011 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24012 {
24013 info->world_save_p = 0;
24014 break;
24015 }
24016 }
24017
24018 if (WORLD_SAVE_P (info))
24019 {
24020 /* Even if we're not touching VRsave, make sure there's room on the
24021 stack for it, if it looks like we're calling SAVE_WORLD, which
24022 will attempt to save it. */
24023 info->vrsave_size = 4;
24024
24025 /* If we are going to save the world, we need to save the link register too. */
24026 info->lr_save_p = 1;
24027
24028 /* "Save" the VRsave register too if we're saving the world. */
24029 if (info->vrsave_mask == 0)
24030 info->vrsave_mask = compute_vrsave_mask ();
24031
24032 /* Because the Darwin register save/restore routines only handle
24033 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24034 check. */
24035 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24036 && (info->first_altivec_reg_save
24037 >= FIRST_SAVED_ALTIVEC_REGNO));
24038 }
24039
24040 return;
24041 }
24042
24043
24044 static void
24045 is_altivec_return_reg (rtx reg, void *xyes)
24046 {
24047 bool *yes = (bool *) xyes;
24048 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24049 *yes = true;
24050 }
24051
24052 \f
24053 /* Return whether REG is a global user reg or has been specifed by
24054 -ffixed-REG. We should not restore these, and so cannot use
24055 lmw or out-of-line restore functions if there are any. We also
24056 can't save them (well, emit frame notes for them), because frame
24057 unwinding during exception handling will restore saved registers. */
24058
24059 static bool
24060 fixed_reg_p (int reg)
24061 {
24062 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24063 backend sets it, overriding anything the user might have given. */
24064 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24065 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24066 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24067 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24068 return false;
24069
24070 return fixed_regs[reg];
24071 }
24072
24073 /* Determine the strategy for savings/restoring registers. */
24074
24075 enum {
24076 SAVE_MULTIPLE = 0x1,
24077 SAVE_INLINE_GPRS = 0x2,
24078 SAVE_INLINE_FPRS = 0x4,
24079 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24080 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24081 SAVE_INLINE_VRS = 0x20,
24082 REST_MULTIPLE = 0x100,
24083 REST_INLINE_GPRS = 0x200,
24084 REST_INLINE_FPRS = 0x400,
24085 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24086 REST_INLINE_VRS = 0x1000
24087 };
24088
24089 static int
24090 rs6000_savres_strategy (rs6000_stack_t *info,
24091 bool using_static_chain_p)
24092 {
24093 int strategy = 0;
24094
24095 /* Select between in-line and out-of-line save and restore of regs.
24096 First, all the obvious cases where we don't use out-of-line. */
24097 if (crtl->calls_eh_return
24098 || cfun->machine->ra_need_lr)
24099 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24100 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24101 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24102
24103 if (info->first_gp_reg_save == 32)
24104 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24105
24106 if (info->first_fp_reg_save == 64)
24107 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24108
24109 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24110 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24111
24112 /* Define cutoff for using out-of-line functions to save registers. */
24113 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24114 {
24115 if (!optimize_size)
24116 {
24117 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24118 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24119 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24120 }
24121 else
24122 {
24123 /* Prefer out-of-line restore if it will exit. */
24124 if (info->first_fp_reg_save > 61)
24125 strategy |= SAVE_INLINE_FPRS;
24126 if (info->first_gp_reg_save > 29)
24127 {
24128 if (info->first_fp_reg_save == 64)
24129 strategy |= SAVE_INLINE_GPRS;
24130 else
24131 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24132 }
24133 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24134 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24135 }
24136 }
24137 else if (DEFAULT_ABI == ABI_DARWIN)
24138 {
24139 if (info->first_fp_reg_save > 60)
24140 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24141 if (info->first_gp_reg_save > 29)
24142 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24143 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24144 }
24145 else
24146 {
24147 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24148 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24149 || info->first_fp_reg_save > 61)
24150 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24151 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24152 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24153 }
24154
24155 /* Don't bother to try to save things out-of-line if r11 is occupied
24156 by the static chain. It would require too much fiddling and the
24157 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24158 pointer on Darwin, and AIX uses r1 or r12. */
24159 if (using_static_chain_p
24160 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24161 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24162 | SAVE_INLINE_GPRS
24163 | SAVE_INLINE_VRS);
24164
24165 /* Don't ever restore fixed regs. That means we can't use the
24166 out-of-line register restore functions if a fixed reg is in the
24167 range of regs restored. */
24168 if (!(strategy & REST_INLINE_FPRS))
24169 for (int i = info->first_fp_reg_save; i < 64; i++)
24170 if (fixed_regs[i])
24171 {
24172 strategy |= REST_INLINE_FPRS;
24173 break;
24174 }
24175
24176 /* We can only use the out-of-line routines to restore fprs if we've
24177 saved all the registers from first_fp_reg_save in the prologue.
24178 Otherwise, we risk loading garbage. Of course, if we have saved
24179 out-of-line then we know we haven't skipped any fprs. */
24180 if ((strategy & SAVE_INLINE_FPRS)
24181 && !(strategy & REST_INLINE_FPRS))
24182 for (int i = info->first_fp_reg_save; i < 64; i++)
24183 if (!save_reg_p (i))
24184 {
24185 strategy |= REST_INLINE_FPRS;
24186 break;
24187 }
24188
24189 /* Similarly, for altivec regs. */
24190 if (!(strategy & REST_INLINE_VRS))
24191 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24192 if (fixed_regs[i])
24193 {
24194 strategy |= REST_INLINE_VRS;
24195 break;
24196 }
24197
24198 if ((strategy & SAVE_INLINE_VRS)
24199 && !(strategy & REST_INLINE_VRS))
24200 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24201 if (!save_reg_p (i))
24202 {
24203 strategy |= REST_INLINE_VRS;
24204 break;
24205 }
24206
24207 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24208 saved is an out-of-line save or restore. Set up the value for
24209 the next test (excluding out-of-line gprs). */
24210 bool lr_save_p = (info->lr_save_p
24211 || !(strategy & SAVE_INLINE_FPRS)
24212 || !(strategy & SAVE_INLINE_VRS)
24213 || !(strategy & REST_INLINE_FPRS)
24214 || !(strategy & REST_INLINE_VRS));
24215
24216 if (TARGET_MULTIPLE
24217 && !TARGET_POWERPC64
24218 && info->first_gp_reg_save < 31
24219 && !(flag_shrink_wrap
24220 && flag_shrink_wrap_separate
24221 && optimize_function_for_speed_p (cfun)))
24222 {
24223 int count = 0;
24224 for (int i = info->first_gp_reg_save; i < 32; i++)
24225 if (save_reg_p (i))
24226 count++;
24227
24228 if (count <= 1)
24229 /* Don't use store multiple if only one reg needs to be
24230 saved. This can occur for example when the ABI_V4 pic reg
24231 (r30) needs to be saved to make calls, but r31 is not
24232 used. */
24233 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24234 else
24235 {
24236 /* Prefer store multiple for saves over out-of-line
24237 routines, since the store-multiple instruction will
24238 always be smaller. */
24239 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24240
24241 /* The situation is more complicated with load multiple.
24242 We'd prefer to use the out-of-line routines for restores,
24243 since the "exit" out-of-line routines can handle the
24244 restore of LR and the frame teardown. However if doesn't
24245 make sense to use the out-of-line routine if that is the
24246 only reason we'd need to save LR, and we can't use the
24247 "exit" out-of-line gpr restore if we have saved some
24248 fprs; In those cases it is advantageous to use load
24249 multiple when available. */
24250 if (info->first_fp_reg_save != 64 || !lr_save_p)
24251 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24252 }
24253 }
24254
24255 /* Using the "exit" out-of-line routine does not improve code size
24256 if using it would require lr to be saved and if only saving one
24257 or two gprs. */
24258 else if (!lr_save_p && info->first_gp_reg_save > 29)
24259 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24260
24261 /* Don't ever restore fixed regs. */
24262 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24263 for (int i = info->first_gp_reg_save; i < 32; i++)
24264 if (fixed_reg_p (i))
24265 {
24266 strategy |= REST_INLINE_GPRS;
24267 strategy &= ~REST_MULTIPLE;
24268 break;
24269 }
24270
24271 /* We can only use load multiple or the out-of-line routines to
24272 restore gprs if we've saved all the registers from
24273 first_gp_reg_save. Otherwise, we risk loading garbage.
24274 Of course, if we have saved out-of-line or used stmw then we know
24275 we haven't skipped any gprs. */
24276 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24277 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24278 for (int i = info->first_gp_reg_save; i < 32; i++)
24279 if (!save_reg_p (i))
24280 {
24281 strategy |= REST_INLINE_GPRS;
24282 strategy &= ~REST_MULTIPLE;
24283 break;
24284 }
24285
24286 if (TARGET_ELF && TARGET_64BIT)
24287 {
24288 if (!(strategy & SAVE_INLINE_FPRS))
24289 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24290 else if (!(strategy & SAVE_INLINE_GPRS)
24291 && info->first_fp_reg_save == 64)
24292 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24293 }
24294 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24295 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24296
24297 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24298 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24299
24300 return strategy;
24301 }
24302
24303 /* Calculate the stack information for the current function. This is
24304 complicated by having two separate calling sequences, the AIX calling
24305 sequence and the V.4 calling sequence.
24306
24307 AIX (and Darwin/Mac OS X) stack frames look like:
24308 32-bit 64-bit
24309 SP----> +---------------------------------------+
24310 | back chain to caller | 0 0
24311 +---------------------------------------+
24312 | saved CR | 4 8 (8-11)
24313 +---------------------------------------+
24314 | saved LR | 8 16
24315 +---------------------------------------+
24316 | reserved for compilers | 12 24
24317 +---------------------------------------+
24318 | reserved for binders | 16 32
24319 +---------------------------------------+
24320 | saved TOC pointer | 20 40
24321 +---------------------------------------+
24322 | Parameter save area (+padding*) (P) | 24 48
24323 +---------------------------------------+
24324 | Alloca space (A) | 24+P etc.
24325 +---------------------------------------+
24326 | Local variable space (L) | 24+P+A
24327 +---------------------------------------+
24328 | Float/int conversion temporary (X) | 24+P+A+L
24329 +---------------------------------------+
24330 | Save area for AltiVec registers (W) | 24+P+A+L+X
24331 +---------------------------------------+
24332 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24333 +---------------------------------------+
24334 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24335 +---------------------------------------+
24336 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24337 +---------------------------------------+
24338 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24339 +---------------------------------------+
24340 old SP->| back chain to caller's caller |
24341 +---------------------------------------+
24342
24343 * If the alloca area is present, the parameter save area is
24344 padded so that the former starts 16-byte aligned.
24345
24346 The required alignment for AIX configurations is two words (i.e., 8
24347 or 16 bytes).
24348
24349 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24350
24351 SP----> +---------------------------------------+
24352 | Back chain to caller | 0
24353 +---------------------------------------+
24354 | Save area for CR | 8
24355 +---------------------------------------+
24356 | Saved LR | 16
24357 +---------------------------------------+
24358 | Saved TOC pointer | 24
24359 +---------------------------------------+
24360 | Parameter save area (+padding*) (P) | 32
24361 +---------------------------------------+
24362 | Alloca space (A) | 32+P
24363 +---------------------------------------+
24364 | Local variable space (L) | 32+P+A
24365 +---------------------------------------+
24366 | Save area for AltiVec registers (W) | 32+P+A+L
24367 +---------------------------------------+
24368 | AltiVec alignment padding (Y) | 32+P+A+L+W
24369 +---------------------------------------+
24370 | Save area for GP registers (G) | 32+P+A+L+W+Y
24371 +---------------------------------------+
24372 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24373 +---------------------------------------+
24374 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24375 +---------------------------------------+
24376
24377 * If the alloca area is present, the parameter save area is
24378 padded so that the former starts 16-byte aligned.
24379
24380 V.4 stack frames look like:
24381
24382 SP----> +---------------------------------------+
24383 | back chain to caller | 0
24384 +---------------------------------------+
24385 | caller's saved LR | 4
24386 +---------------------------------------+
24387 | Parameter save area (+padding*) (P) | 8
24388 +---------------------------------------+
24389 | Alloca space (A) | 8+P
24390 +---------------------------------------+
24391 | Varargs save area (V) | 8+P+A
24392 +---------------------------------------+
24393 | Local variable space (L) | 8+P+A+V
24394 +---------------------------------------+
24395 | Float/int conversion temporary (X) | 8+P+A+V+L
24396 +---------------------------------------+
24397 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24398 +---------------------------------------+
24399 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24400 +---------------------------------------+
24401 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24402 +---------------------------------------+
24403 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24404 +---------------------------------------+
24405 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24406 +---------------------------------------+
24407 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24408 +---------------------------------------+
24409 old SP->| back chain to caller's caller |
24410 +---------------------------------------+
24411
24412 * If the alloca area is present and the required alignment is
24413 16 bytes, the parameter save area is padded so that the
24414 alloca area starts 16-byte aligned.
24415
24416 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24417 given. (But note below and in sysv4.h that we require only 8 and
24418 may round up the size of our stack frame anyways. The historical
24419 reason is early versions of powerpc-linux which didn't properly
24420 align the stack at program startup. A happy side-effect is that
24421 -mno-eabi libraries can be used with -meabi programs.)
24422
24423 The EABI configuration defaults to the V.4 layout. However,
24424 the stack alignment requirements may differ. If -mno-eabi is not
24425 given, the required stack alignment is 8 bytes; if -mno-eabi is
24426 given, the required alignment is 16 bytes. (But see V.4 comment
24427 above.) */
24428
24429 #ifndef ABI_STACK_BOUNDARY
24430 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24431 #endif
24432
24433 static rs6000_stack_t *
24434 rs6000_stack_info (void)
24435 {
24436 /* We should never be called for thunks, we are not set up for that. */
24437 gcc_assert (!cfun->is_thunk);
24438
24439 rs6000_stack_t *info = &stack_info;
24440 int reg_size = TARGET_32BIT ? 4 : 8;
24441 int ehrd_size;
24442 int ehcr_size;
24443 int save_align;
24444 int first_gp;
24445 HOST_WIDE_INT non_fixed_size;
24446 bool using_static_chain_p;
24447
24448 if (reload_completed && info->reload_completed)
24449 return info;
24450
24451 memset (info, 0, sizeof (*info));
24452 info->reload_completed = reload_completed;
24453
24454 /* Select which calling sequence. */
24455 info->abi = DEFAULT_ABI;
24456
24457 /* Calculate which registers need to be saved & save area size. */
24458 info->first_gp_reg_save = first_reg_to_save ();
24459 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24460 even if it currently looks like we won't. Reload may need it to
24461 get at a constant; if so, it will have already created a constant
24462 pool entry for it. */
24463 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24464 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24465 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24466 && crtl->uses_const_pool
24467 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24468 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24469 else
24470 first_gp = info->first_gp_reg_save;
24471
24472 info->gp_size = reg_size * (32 - first_gp);
24473
24474 info->first_fp_reg_save = first_fp_reg_to_save ();
24475 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24476
24477 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24478 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24479 - info->first_altivec_reg_save);
24480
24481 /* Does this function call anything? */
24482 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24483
24484 /* Determine if we need to save the condition code registers. */
24485 if (save_reg_p (CR2_REGNO)
24486 || save_reg_p (CR3_REGNO)
24487 || save_reg_p (CR4_REGNO))
24488 {
24489 info->cr_save_p = 1;
24490 if (DEFAULT_ABI == ABI_V4)
24491 info->cr_size = reg_size;
24492 }
24493
24494 /* If the current function calls __builtin_eh_return, then we need
24495 to allocate stack space for registers that will hold data for
24496 the exception handler. */
24497 if (crtl->calls_eh_return)
24498 {
24499 unsigned int i;
24500 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24501 continue;
24502
24503 ehrd_size = i * UNITS_PER_WORD;
24504 }
24505 else
24506 ehrd_size = 0;
24507
24508 /* In the ELFv2 ABI, we also need to allocate space for separate
24509 CR field save areas if the function calls __builtin_eh_return. */
24510 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24511 {
24512 /* This hard-codes that we have three call-saved CR fields. */
24513 ehcr_size = 3 * reg_size;
24514 /* We do *not* use the regular CR save mechanism. */
24515 info->cr_save_p = 0;
24516 }
24517 else
24518 ehcr_size = 0;
24519
24520 /* Determine various sizes. */
24521 info->reg_size = reg_size;
24522 info->fixed_size = RS6000_SAVE_AREA;
24523 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24524 if (cfun->calls_alloca)
24525 info->parm_size =
24526 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24527 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24528 else
24529 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24530 TARGET_ALTIVEC ? 16 : 8);
24531 if (FRAME_GROWS_DOWNWARD)
24532 info->vars_size
24533 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24534 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24535 - (info->fixed_size + info->vars_size + info->parm_size);
24536
24537 if (TARGET_ALTIVEC_ABI)
24538 info->vrsave_mask = compute_vrsave_mask ();
24539
24540 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24541 info->vrsave_size = 4;
24542
24543 compute_save_world_info (info);
24544
24545 /* Calculate the offsets. */
24546 switch (DEFAULT_ABI)
24547 {
24548 case ABI_NONE:
24549 default:
24550 gcc_unreachable ();
24551
24552 case ABI_AIX:
24553 case ABI_ELFv2:
24554 case ABI_DARWIN:
24555 info->fp_save_offset = -info->fp_size;
24556 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24557
24558 if (TARGET_ALTIVEC_ABI)
24559 {
24560 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24561
24562 /* Align stack so vector save area is on a quadword boundary.
24563 The padding goes above the vectors. */
24564 if (info->altivec_size != 0)
24565 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24566
24567 info->altivec_save_offset = info->vrsave_save_offset
24568 - info->altivec_padding_size
24569 - info->altivec_size;
24570 gcc_assert (info->altivec_size == 0
24571 || info->altivec_save_offset % 16 == 0);
24572
24573 /* Adjust for AltiVec case. */
24574 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24575 }
24576 else
24577 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24578
24579 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24580 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24581 info->lr_save_offset = 2*reg_size;
24582 break;
24583
24584 case ABI_V4:
24585 info->fp_save_offset = -info->fp_size;
24586 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24587 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24588
24589 if (TARGET_ALTIVEC_ABI)
24590 {
24591 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24592
24593 /* Align stack so vector save area is on a quadword boundary. */
24594 if (info->altivec_size != 0)
24595 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24596
24597 info->altivec_save_offset = info->vrsave_save_offset
24598 - info->altivec_padding_size
24599 - info->altivec_size;
24600
24601 /* Adjust for AltiVec case. */
24602 info->ehrd_offset = info->altivec_save_offset;
24603 }
24604 else
24605 info->ehrd_offset = info->cr_save_offset;
24606
24607 info->ehrd_offset -= ehrd_size;
24608 info->lr_save_offset = reg_size;
24609 }
24610
24611 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24612 info->save_size = RS6000_ALIGN (info->fp_size
24613 + info->gp_size
24614 + info->altivec_size
24615 + info->altivec_padding_size
24616 + ehrd_size
24617 + ehcr_size
24618 + info->cr_size
24619 + info->vrsave_size,
24620 save_align);
24621
24622 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24623
24624 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24625 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24626
24627 /* Determine if we need to save the link register. */
24628 if (info->calls_p
24629 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24630 && crtl->profile
24631 && !TARGET_PROFILE_KERNEL)
24632 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24633 #ifdef TARGET_RELOCATABLE
24634 || (DEFAULT_ABI == ABI_V4
24635 && (TARGET_RELOCATABLE || flag_pic > 1)
24636 && !constant_pool_empty_p ())
24637 #endif
24638 || rs6000_ra_ever_killed ())
24639 info->lr_save_p = 1;
24640
24641 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24642 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24643 && call_used_regs[STATIC_CHAIN_REGNUM]);
24644 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24645
24646 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24647 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24648 || !(info->savres_strategy & SAVE_INLINE_VRS)
24649 || !(info->savres_strategy & REST_INLINE_GPRS)
24650 || !(info->savres_strategy & REST_INLINE_FPRS)
24651 || !(info->savres_strategy & REST_INLINE_VRS))
24652 info->lr_save_p = 1;
24653
24654 if (info->lr_save_p)
24655 df_set_regs_ever_live (LR_REGNO, true);
24656
24657 /* Determine if we need to allocate any stack frame:
24658
24659 For AIX we need to push the stack if a frame pointer is needed
24660 (because the stack might be dynamically adjusted), if we are
24661 debugging, if we make calls, or if the sum of fp_save, gp_save,
24662 and local variables are more than the space needed to save all
24663 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24664 + 18*8 = 288 (GPR13 reserved).
24665
24666 For V.4 we don't have the stack cushion that AIX uses, but assume
24667 that the debugger can handle stackless frames. */
24668
24669 if (info->calls_p)
24670 info->push_p = 1;
24671
24672 else if (DEFAULT_ABI == ABI_V4)
24673 info->push_p = non_fixed_size != 0;
24674
24675 else if (frame_pointer_needed)
24676 info->push_p = 1;
24677
24678 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24679 info->push_p = 1;
24680
24681 else
24682 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24683
24684 return info;
24685 }
24686
24687 static void
24688 debug_stack_info (rs6000_stack_t *info)
24689 {
24690 const char *abi_string;
24691
24692 if (! info)
24693 info = rs6000_stack_info ();
24694
24695 fprintf (stderr, "\nStack information for function %s:\n",
24696 ((current_function_decl && DECL_NAME (current_function_decl))
24697 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24698 : "<unknown>"));
24699
24700 switch (info->abi)
24701 {
24702 default: abi_string = "Unknown"; break;
24703 case ABI_NONE: abi_string = "NONE"; break;
24704 case ABI_AIX: abi_string = "AIX"; break;
24705 case ABI_ELFv2: abi_string = "ELFv2"; break;
24706 case ABI_DARWIN: abi_string = "Darwin"; break;
24707 case ABI_V4: abi_string = "V.4"; break;
24708 }
24709
24710 fprintf (stderr, "\tABI = %5s\n", abi_string);
24711
24712 if (TARGET_ALTIVEC_ABI)
24713 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24714
24715 if (info->first_gp_reg_save != 32)
24716 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24717
24718 if (info->first_fp_reg_save != 64)
24719 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24720
24721 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24722 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24723 info->first_altivec_reg_save);
24724
24725 if (info->lr_save_p)
24726 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24727
24728 if (info->cr_save_p)
24729 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24730
24731 if (info->vrsave_mask)
24732 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24733
24734 if (info->push_p)
24735 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24736
24737 if (info->calls_p)
24738 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24739
24740 if (info->gp_size)
24741 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24742
24743 if (info->fp_size)
24744 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24745
24746 if (info->altivec_size)
24747 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24748 info->altivec_save_offset);
24749
24750 if (info->vrsave_size)
24751 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24752 info->vrsave_save_offset);
24753
24754 if (info->lr_save_p)
24755 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24756
24757 if (info->cr_save_p)
24758 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24759
24760 if (info->varargs_save_offset)
24761 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24762
24763 if (info->total_size)
24764 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24765 info->total_size);
24766
24767 if (info->vars_size)
24768 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24769 info->vars_size);
24770
24771 if (info->parm_size)
24772 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24773
24774 if (info->fixed_size)
24775 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24776
24777 if (info->gp_size)
24778 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24779
24780 if (info->fp_size)
24781 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24782
24783 if (info->altivec_size)
24784 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24785
24786 if (info->vrsave_size)
24787 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24788
24789 if (info->altivec_padding_size)
24790 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24791 info->altivec_padding_size);
24792
24793 if (info->cr_size)
24794 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24795
24796 if (info->save_size)
24797 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24798
24799 if (info->reg_size != 4)
24800 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24801
24802 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24803
24804 fprintf (stderr, "\n");
24805 }
24806
24807 rtx
24808 rs6000_return_addr (int count, rtx frame)
24809 {
24810 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24811 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24812 if (count != 0
24813 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24814 {
24815 cfun->machine->ra_needs_full_frame = 1;
24816
24817 if (count == 0)
24818 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24819 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24820 frame = stack_pointer_rtx;
24821 rtx prev_frame_addr = memory_address (Pmode, frame);
24822 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24823 rtx lr_save_off = plus_constant (Pmode,
24824 prev_frame, RETURN_ADDRESS_OFFSET);
24825 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24826 return gen_rtx_MEM (Pmode, lr_save_addr);
24827 }
24828
24829 cfun->machine->ra_need_lr = 1;
24830 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24831 }
24832
24833 /* Say whether a function is a candidate for sibcall handling or not. */
24834
24835 static bool
24836 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24837 {
24838 tree fntype;
24839
24840 /* The sibcall epilogue may clobber the static chain register.
24841 ??? We could work harder and avoid that, but it's probably
24842 not worth the hassle in practice. */
24843 if (CALL_EXPR_STATIC_CHAIN (exp))
24844 return false;
24845
24846 if (decl)
24847 fntype = TREE_TYPE (decl);
24848 else
24849 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24850
24851 /* We can't do it if the called function has more vector parameters
24852 than the current function; there's nowhere to put the VRsave code. */
24853 if (TARGET_ALTIVEC_ABI
24854 && TARGET_ALTIVEC_VRSAVE
24855 && !(decl && decl == current_function_decl))
24856 {
24857 function_args_iterator args_iter;
24858 tree type;
24859 int nvreg = 0;
24860
24861 /* Functions with vector parameters are required to have a
24862 prototype, so the argument type info must be available
24863 here. */
24864 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24865 if (TREE_CODE (type) == VECTOR_TYPE
24866 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24867 nvreg++;
24868
24869 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24870 if (TREE_CODE (type) == VECTOR_TYPE
24871 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24872 nvreg--;
24873
24874 if (nvreg > 0)
24875 return false;
24876 }
24877
24878 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24879 functions, because the callee may have a different TOC pointer to
24880 the caller and there's no way to ensure we restore the TOC when
24881 we return. With the secure-plt SYSV ABI we can't make non-local
24882 calls when -fpic/PIC because the plt call stubs use r30. */
24883 if (DEFAULT_ABI == ABI_DARWIN
24884 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24885 && decl
24886 && !DECL_EXTERNAL (decl)
24887 && !DECL_WEAK (decl)
24888 && (*targetm.binds_local_p) (decl))
24889 || (DEFAULT_ABI == ABI_V4
24890 && (!TARGET_SECURE_PLT
24891 || !flag_pic
24892 || (decl
24893 && (*targetm.binds_local_p) (decl)))))
24894 {
24895 tree attr_list = TYPE_ATTRIBUTES (fntype);
24896
24897 if (!lookup_attribute ("longcall", attr_list)
24898 || lookup_attribute ("shortcall", attr_list))
24899 return true;
24900 }
24901
24902 return false;
24903 }
24904
24905 static int
24906 rs6000_ra_ever_killed (void)
24907 {
24908 rtx_insn *top;
24909 rtx reg;
24910 rtx_insn *insn;
24911
24912 if (cfun->is_thunk)
24913 return 0;
24914
24915 if (cfun->machine->lr_save_state)
24916 return cfun->machine->lr_save_state - 1;
24917
24918 /* regs_ever_live has LR marked as used if any sibcalls are present,
24919 but this should not force saving and restoring in the
24920 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24921 clobbers LR, so that is inappropriate. */
24922
24923 /* Also, the prologue can generate a store into LR that
24924 doesn't really count, like this:
24925
24926 move LR->R0
24927 bcl to set PIC register
24928 move LR->R31
24929 move R0->LR
24930
24931 When we're called from the epilogue, we need to avoid counting
24932 this as a store. */
24933
24934 push_topmost_sequence ();
24935 top = get_insns ();
24936 pop_topmost_sequence ();
24937 reg = gen_rtx_REG (Pmode, LR_REGNO);
24938
24939 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24940 {
24941 if (INSN_P (insn))
24942 {
24943 if (CALL_P (insn))
24944 {
24945 if (!SIBLING_CALL_P (insn))
24946 return 1;
24947 }
24948 else if (find_regno_note (insn, REG_INC, LR_REGNO))
24949 return 1;
24950 else if (set_of (reg, insn) != NULL_RTX
24951 && !prologue_epilogue_contains (insn))
24952 return 1;
24953 }
24954 }
24955 return 0;
24956 }
24957 \f
24958 /* Emit instructions needed to load the TOC register.
24959 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24960 a constant pool; or for SVR4 -fpic. */
24961
24962 void
24963 rs6000_emit_load_toc_table (int fromprolog)
24964 {
24965 rtx dest;
24966 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
24967
24968 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
24969 {
24970 char buf[30];
24971 rtx lab, tmp1, tmp2, got;
24972
24973 lab = gen_label_rtx ();
24974 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
24975 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24976 if (flag_pic == 2)
24977 {
24978 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24979 need_toc_init = 1;
24980 }
24981 else
24982 got = rs6000_got_sym ();
24983 tmp1 = tmp2 = dest;
24984 if (!fromprolog)
24985 {
24986 tmp1 = gen_reg_rtx (Pmode);
24987 tmp2 = gen_reg_rtx (Pmode);
24988 }
24989 emit_insn (gen_load_toc_v4_PIC_1 (lab));
24990 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
24991 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
24992 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
24993 }
24994 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
24995 {
24996 emit_insn (gen_load_toc_v4_pic_si ());
24997 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24998 }
24999 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25000 {
25001 char buf[30];
25002 rtx temp0 = (fromprolog
25003 ? gen_rtx_REG (Pmode, 0)
25004 : gen_reg_rtx (Pmode));
25005
25006 if (fromprolog)
25007 {
25008 rtx symF, symL;
25009
25010 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25011 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25012
25013 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25014 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25015
25016 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25017 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25018 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25019 }
25020 else
25021 {
25022 rtx tocsym, lab;
25023
25024 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25025 need_toc_init = 1;
25026 lab = gen_label_rtx ();
25027 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25028 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25029 if (TARGET_LINK_STACK)
25030 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25031 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25032 }
25033 emit_insn (gen_addsi3 (dest, temp0, dest));
25034 }
25035 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25036 {
25037 /* This is for AIX code running in non-PIC ELF32. */
25038 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25039
25040 need_toc_init = 1;
25041 emit_insn (gen_elf_high (dest, realsym));
25042 emit_insn (gen_elf_low (dest, dest, realsym));
25043 }
25044 else
25045 {
25046 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25047
25048 if (TARGET_32BIT)
25049 emit_insn (gen_load_toc_aix_si (dest));
25050 else
25051 emit_insn (gen_load_toc_aix_di (dest));
25052 }
25053 }
25054
25055 /* Emit instructions to restore the link register after determining where
25056 its value has been stored. */
25057
25058 void
25059 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25060 {
25061 rs6000_stack_t *info = rs6000_stack_info ();
25062 rtx operands[2];
25063
25064 operands[0] = source;
25065 operands[1] = scratch;
25066
25067 if (info->lr_save_p)
25068 {
25069 rtx frame_rtx = stack_pointer_rtx;
25070 HOST_WIDE_INT sp_offset = 0;
25071 rtx tmp;
25072
25073 if (frame_pointer_needed
25074 || cfun->calls_alloca
25075 || info->total_size > 32767)
25076 {
25077 tmp = gen_frame_mem (Pmode, frame_rtx);
25078 emit_move_insn (operands[1], tmp);
25079 frame_rtx = operands[1];
25080 }
25081 else if (info->push_p)
25082 sp_offset = info->total_size;
25083
25084 tmp = plus_constant (Pmode, frame_rtx,
25085 info->lr_save_offset + sp_offset);
25086 tmp = gen_frame_mem (Pmode, tmp);
25087 emit_move_insn (tmp, operands[0]);
25088 }
25089 else
25090 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25091
25092 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25093 state of lr_save_p so any change from here on would be a bug. In
25094 particular, stop rs6000_ra_ever_killed from considering the SET
25095 of lr we may have added just above. */
25096 cfun->machine->lr_save_state = info->lr_save_p + 1;
25097 }
25098
25099 static GTY(()) alias_set_type set = -1;
25100
25101 alias_set_type
25102 get_TOC_alias_set (void)
25103 {
25104 if (set == -1)
25105 set = new_alias_set ();
25106 return set;
25107 }
25108
25109 /* This returns nonzero if the current function uses the TOC. This is
25110 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25111 is generated by the ABI_V4 load_toc_* patterns.
25112 Return 2 instead of 1 if the load_toc_* pattern is in the function
25113 partition that doesn't start the function. */
25114 #if TARGET_ELF
25115 static int
25116 uses_TOC (void)
25117 {
25118 rtx_insn *insn;
25119 int ret = 1;
25120
25121 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25122 {
25123 if (INSN_P (insn))
25124 {
25125 rtx pat = PATTERN (insn);
25126 int i;
25127
25128 if (GET_CODE (pat) == PARALLEL)
25129 for (i = 0; i < XVECLEN (pat, 0); i++)
25130 {
25131 rtx sub = XVECEXP (pat, 0, i);
25132 if (GET_CODE (sub) == USE)
25133 {
25134 sub = XEXP (sub, 0);
25135 if (GET_CODE (sub) == UNSPEC
25136 && XINT (sub, 1) == UNSPEC_TOC)
25137 return ret;
25138 }
25139 }
25140 }
25141 else if (crtl->has_bb_partition
25142 && NOTE_P (insn)
25143 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25144 ret = 2;
25145 }
25146 return 0;
25147 }
25148 #endif
25149
25150 rtx
25151 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25152 {
25153 rtx tocrel, tocreg, hi;
25154
25155 if (TARGET_DEBUG_ADDR)
25156 {
25157 if (GET_CODE (symbol) == SYMBOL_REF)
25158 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25159 XSTR (symbol, 0));
25160 else
25161 {
25162 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25163 GET_RTX_NAME (GET_CODE (symbol)));
25164 debug_rtx (symbol);
25165 }
25166 }
25167
25168 if (!can_create_pseudo_p ())
25169 df_set_regs_ever_live (TOC_REGISTER, true);
25170
25171 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25172 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25173 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25174 return tocrel;
25175
25176 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25177 if (largetoc_reg != NULL)
25178 {
25179 emit_move_insn (largetoc_reg, hi);
25180 hi = largetoc_reg;
25181 }
25182 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25183 }
25184
25185 /* Issue assembly directives that create a reference to the given DWARF
25186 FRAME_TABLE_LABEL from the current function section. */
25187 void
25188 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25189 {
25190 fprintf (asm_out_file, "\t.ref %s\n",
25191 (* targetm.strip_name_encoding) (frame_table_label));
25192 }
25193 \f
25194 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25195 and the change to the stack pointer. */
25196
25197 static void
25198 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25199 {
25200 rtvec p;
25201 int i;
25202 rtx regs[3];
25203
25204 i = 0;
25205 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25206 if (hard_frame_needed)
25207 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25208 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25209 || (hard_frame_needed
25210 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25211 regs[i++] = fp;
25212
25213 p = rtvec_alloc (i);
25214 while (--i >= 0)
25215 {
25216 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25217 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25218 }
25219
25220 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25221 }
25222
25223 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25224 and set the appropriate attributes for the generated insn. Return the
25225 first insn which adjusts the stack pointer or the last insn before
25226 the stack adjustment loop.
25227
25228 SIZE_INT is used to create the CFI note for the allocation.
25229
25230 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25231 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25232
25233 ORIG_SP contains the backchain value that must be stored at *sp. */
25234
25235 static rtx_insn *
25236 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25237 {
25238 rtx_insn *insn;
25239
25240 rtx size_rtx = GEN_INT (-size_int);
25241 if (size_int > 32767)
25242 {
25243 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25244 /* Need a note here so that try_split doesn't get confused. */
25245 if (get_last_insn () == NULL_RTX)
25246 emit_note (NOTE_INSN_DELETED);
25247 insn = emit_move_insn (tmp_reg, size_rtx);
25248 try_split (PATTERN (insn), insn, 0);
25249 size_rtx = tmp_reg;
25250 }
25251
25252 if (Pmode == SImode)
25253 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25254 stack_pointer_rtx,
25255 size_rtx,
25256 orig_sp));
25257 else
25258 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25259 stack_pointer_rtx,
25260 size_rtx,
25261 orig_sp));
25262 rtx par = PATTERN (insn);
25263 gcc_assert (GET_CODE (par) == PARALLEL);
25264 rtx set = XVECEXP (par, 0, 0);
25265 gcc_assert (GET_CODE (set) == SET);
25266 rtx mem = SET_DEST (set);
25267 gcc_assert (MEM_P (mem));
25268 MEM_NOTRAP_P (mem) = 1;
25269 set_mem_alias_set (mem, get_frame_alias_set ());
25270
25271 RTX_FRAME_RELATED_P (insn) = 1;
25272 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25273 gen_rtx_SET (stack_pointer_rtx,
25274 gen_rtx_PLUS (Pmode,
25275 stack_pointer_rtx,
25276 GEN_INT (-size_int))));
25277
25278 /* Emit a blockage to ensure the allocation/probing insns are
25279 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25280 note for similar reasons. */
25281 if (flag_stack_clash_protection)
25282 {
25283 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25284 emit_insn (gen_blockage ());
25285 }
25286
25287 return insn;
25288 }
25289
25290 static HOST_WIDE_INT
25291 get_stack_clash_protection_probe_interval (void)
25292 {
25293 return (HOST_WIDE_INT_1U
25294 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25295 }
25296
25297 static HOST_WIDE_INT
25298 get_stack_clash_protection_guard_size (void)
25299 {
25300 return (HOST_WIDE_INT_1U
25301 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25302 }
25303
25304 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25305 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25306
25307 COPY_REG, if non-null, should contain a copy of the original
25308 stack pointer at exit from this function.
25309
25310 This is subtly different than the Ada probing in that it tries hard to
25311 prevent attacks that jump the stack guard. Thus it is never allowed to
25312 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25313 space without a suitable probe. */
25314 static rtx_insn *
25315 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25316 rtx copy_reg)
25317 {
25318 rtx orig_sp = copy_reg;
25319
25320 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25321
25322 /* Round the size down to a multiple of PROBE_INTERVAL. */
25323 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25324
25325 /* If explicitly requested,
25326 or the rounded size is not the same as the original size
25327 or the the rounded size is greater than a page,
25328 then we will need a copy of the original stack pointer. */
25329 if (rounded_size != orig_size
25330 || rounded_size > probe_interval
25331 || copy_reg)
25332 {
25333 /* If the caller did not request a copy of the incoming stack
25334 pointer, then we use r0 to hold the copy. */
25335 if (!copy_reg)
25336 orig_sp = gen_rtx_REG (Pmode, 0);
25337 emit_move_insn (orig_sp, stack_pointer_rtx);
25338 }
25339
25340 /* There's three cases here.
25341
25342 One is a single probe which is the most common and most efficiently
25343 implemented as it does not have to have a copy of the original
25344 stack pointer if there are no residuals.
25345
25346 Second is unrolled allocation/probes which we use if there's just
25347 a few of them. It needs to save the original stack pointer into a
25348 temporary for use as a source register in the allocation/probe.
25349
25350 Last is a loop. This is the most uncommon case and least efficient. */
25351 rtx_insn *retval = NULL;
25352 if (rounded_size == probe_interval)
25353 {
25354 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25355
25356 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25357 }
25358 else if (rounded_size <= 8 * probe_interval)
25359 {
25360 /* The ABI requires using the store with update insns to allocate
25361 space and store the backchain into the stack
25362
25363 So we save the current stack pointer into a temporary, then
25364 emit the store-with-update insns to store the saved stack pointer
25365 into the right location in each new page. */
25366 for (int i = 0; i < rounded_size; i += probe_interval)
25367 {
25368 rtx_insn *insn
25369 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25370
25371 /* Save the first stack adjustment in RETVAL. */
25372 if (i == 0)
25373 retval = insn;
25374 }
25375
25376 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25377 }
25378 else
25379 {
25380 /* Compute the ending address. */
25381 rtx end_addr
25382 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25383 rtx rs = GEN_INT (-rounded_size);
25384 rtx_insn *insn;
25385 if (add_operand (rs, Pmode))
25386 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25387 else
25388 {
25389 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25390 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25391 stack_pointer_rtx));
25392 /* Describe the effect of INSN to the CFI engine. */
25393 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25394 gen_rtx_SET (end_addr,
25395 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25396 rs)));
25397 }
25398 RTX_FRAME_RELATED_P (insn) = 1;
25399
25400 /* Emit the loop. */
25401 if (TARGET_64BIT)
25402 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25403 stack_pointer_rtx, orig_sp,
25404 end_addr));
25405 else
25406 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25407 stack_pointer_rtx, orig_sp,
25408 end_addr));
25409 RTX_FRAME_RELATED_P (retval) = 1;
25410 /* Describe the effect of INSN to the CFI engine. */
25411 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25412 gen_rtx_SET (stack_pointer_rtx, end_addr));
25413
25414 /* Emit a blockage to ensure the allocation/probing insns are
25415 not optimized, combined, removed, etc. Other cases handle this
25416 within their call to rs6000_emit_allocate_stack_1. */
25417 emit_insn (gen_blockage ());
25418
25419 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25420 }
25421
25422 if (orig_size != rounded_size)
25423 {
25424 /* Allocate (and implicitly probe) any residual space. */
25425 HOST_WIDE_INT residual = orig_size - rounded_size;
25426
25427 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25428
25429 /* If the residual was the only allocation, then we can return the
25430 allocating insn. */
25431 if (!retval)
25432 retval = insn;
25433 }
25434
25435 return retval;
25436 }
25437
25438 /* Emit the correct code for allocating stack space, as insns.
25439 If COPY_REG, make sure a copy of the old frame is left there.
25440 The generated code may use hard register 0 as a temporary. */
25441
25442 static rtx_insn *
25443 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25444 {
25445 rtx_insn *insn;
25446 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25447 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25448 rtx todec = gen_int_mode (-size, Pmode);
25449
25450 if (INTVAL (todec) != -size)
25451 {
25452 warning (0, "stack frame too large");
25453 emit_insn (gen_trap ());
25454 return 0;
25455 }
25456
25457 if (crtl->limit_stack)
25458 {
25459 if (REG_P (stack_limit_rtx)
25460 && REGNO (stack_limit_rtx) > 1
25461 && REGNO (stack_limit_rtx) <= 31)
25462 {
25463 rtx_insn *insn
25464 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25465 gcc_assert (insn);
25466 emit_insn (insn);
25467 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25468 }
25469 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25470 && TARGET_32BIT
25471 && DEFAULT_ABI == ABI_V4
25472 && !flag_pic)
25473 {
25474 rtx toload = gen_rtx_CONST (VOIDmode,
25475 gen_rtx_PLUS (Pmode,
25476 stack_limit_rtx,
25477 GEN_INT (size)));
25478
25479 emit_insn (gen_elf_high (tmp_reg, toload));
25480 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25481 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25482 const0_rtx));
25483 }
25484 else
25485 warning (0, "stack limit expression is not supported");
25486 }
25487
25488 if (flag_stack_clash_protection)
25489 {
25490 if (size < get_stack_clash_protection_guard_size ())
25491 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25492 else
25493 {
25494 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25495 copy_reg);
25496
25497 /* If we asked for a copy with an offset, then we still need add in
25498 the offset. */
25499 if (copy_reg && copy_off)
25500 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25501 return insn;
25502 }
25503 }
25504
25505 if (copy_reg)
25506 {
25507 if (copy_off != 0)
25508 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25509 else
25510 emit_move_insn (copy_reg, stack_reg);
25511 }
25512
25513 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25514 it now and set the alias set/attributes. The above gen_*_update
25515 calls will generate a PARALLEL with the MEM set being the first
25516 operation. */
25517 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25518 return insn;
25519 }
25520
25521 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25522
25523 #if PROBE_INTERVAL > 32768
25524 #error Cannot use indexed addressing mode for stack probing
25525 #endif
25526
25527 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25528 inclusive. These are offsets from the current stack pointer. */
25529
25530 static void
25531 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25532 {
25533 /* See if we have a constant small number of probes to generate. If so,
25534 that's the easy case. */
25535 if (first + size <= 32768)
25536 {
25537 HOST_WIDE_INT i;
25538
25539 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25540 it exceeds SIZE. If only one probe is needed, this will not
25541 generate any code. Then probe at FIRST + SIZE. */
25542 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25543 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25544 -(first + i)));
25545
25546 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25547 -(first + size)));
25548 }
25549
25550 /* Otherwise, do the same as above, but in a loop. Note that we must be
25551 extra careful with variables wrapping around because we might be at
25552 the very top (or the very bottom) of the address space and we have
25553 to be able to handle this case properly; in particular, we use an
25554 equality test for the loop condition. */
25555 else
25556 {
25557 HOST_WIDE_INT rounded_size;
25558 rtx r12 = gen_rtx_REG (Pmode, 12);
25559 rtx r0 = gen_rtx_REG (Pmode, 0);
25560
25561 /* Sanity check for the addressing mode we're going to use. */
25562 gcc_assert (first <= 32768);
25563
25564 /* Step 1: round SIZE to the previous multiple of the interval. */
25565
25566 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25567
25568
25569 /* Step 2: compute initial and final value of the loop counter. */
25570
25571 /* TEST_ADDR = SP + FIRST. */
25572 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25573 -first)));
25574
25575 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25576 if (rounded_size > 32768)
25577 {
25578 emit_move_insn (r0, GEN_INT (-rounded_size));
25579 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25580 }
25581 else
25582 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25583 -rounded_size)));
25584
25585
25586 /* Step 3: the loop
25587
25588 do
25589 {
25590 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25591 probe at TEST_ADDR
25592 }
25593 while (TEST_ADDR != LAST_ADDR)
25594
25595 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25596 until it is equal to ROUNDED_SIZE. */
25597
25598 if (TARGET_64BIT)
25599 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25600 else
25601 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25602
25603
25604 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25605 that SIZE is equal to ROUNDED_SIZE. */
25606
25607 if (size != rounded_size)
25608 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25609 }
25610 }
25611
25612 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25613 addresses, not offsets. */
25614
25615 static const char *
25616 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25617 {
25618 static int labelno = 0;
25619 char loop_lab[32];
25620 rtx xops[2];
25621
25622 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25623
25624 /* Loop. */
25625 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25626
25627 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25628 xops[0] = reg1;
25629 xops[1] = GEN_INT (-PROBE_INTERVAL);
25630 output_asm_insn ("addi %0,%0,%1", xops);
25631
25632 /* Probe at TEST_ADDR. */
25633 xops[1] = gen_rtx_REG (Pmode, 0);
25634 output_asm_insn ("stw %1,0(%0)", xops);
25635
25636 /* Test if TEST_ADDR == LAST_ADDR. */
25637 xops[1] = reg2;
25638 if (TARGET_64BIT)
25639 output_asm_insn ("cmpd 0,%0,%1", xops);
25640 else
25641 output_asm_insn ("cmpw 0,%0,%1", xops);
25642
25643 /* Branch. */
25644 fputs ("\tbne 0,", asm_out_file);
25645 assemble_name_raw (asm_out_file, loop_lab);
25646 fputc ('\n', asm_out_file);
25647
25648 return "";
25649 }
25650
25651 /* This function is called when rs6000_frame_related is processing
25652 SETs within a PARALLEL, and returns whether the REGNO save ought to
25653 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25654 for out-of-line register save functions, store multiple, and the
25655 Darwin world_save. They may contain registers that don't really
25656 need saving. */
25657
25658 static bool
25659 interesting_frame_related_regno (unsigned int regno)
25660 {
25661 /* Saves apparently of r0 are actually saving LR. It doesn't make
25662 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25663 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25664 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25665 as frame related. */
25666 if (regno == 0)
25667 return true;
25668 /* If we see CR2 then we are here on a Darwin world save. Saves of
25669 CR2 signify the whole CR is being saved. This is a long-standing
25670 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25671 that CR needs to be saved. */
25672 if (regno == CR2_REGNO)
25673 return true;
25674 /* Omit frame info for any user-defined global regs. If frame info
25675 is supplied for them, frame unwinding will restore a user reg.
25676 Also omit frame info for any reg we don't need to save, as that
25677 bloats frame info and can cause problems with shrink wrapping.
25678 Since global regs won't be seen as needing to be saved, both of
25679 these conditions are covered by save_reg_p. */
25680 return save_reg_p (regno);
25681 }
25682
25683 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25684 addresses, not offsets.
25685
25686 REG2 contains the backchain that must be stored into *sp at each allocation.
25687
25688 This is subtly different than the Ada probing above in that it tries hard
25689 to prevent attacks that jump the stack guard. Thus, it is never allowed
25690 to allocate more than PROBE_INTERVAL bytes of stack space without a
25691 suitable probe. */
25692
25693 static const char *
25694 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25695 {
25696 static int labelno = 0;
25697 char loop_lab[32];
25698 rtx xops[3];
25699
25700 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25701
25702 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25703
25704 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25705
25706 /* This allocates and probes. */
25707 xops[0] = reg1;
25708 xops[1] = reg2;
25709 xops[2] = GEN_INT (-probe_interval);
25710 if (TARGET_64BIT)
25711 output_asm_insn ("stdu %1,%2(%0)", xops);
25712 else
25713 output_asm_insn ("stwu %1,%2(%0)", xops);
25714
25715 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25716 xops[0] = reg1;
25717 xops[1] = reg3;
25718 if (TARGET_64BIT)
25719 output_asm_insn ("cmpd 0,%0,%1", xops);
25720 else
25721 output_asm_insn ("cmpw 0,%0,%1", xops);
25722
25723 fputs ("\tbne 0,", asm_out_file);
25724 assemble_name_raw (asm_out_file, loop_lab);
25725 fputc ('\n', asm_out_file);
25726
25727 return "";
25728 }
25729
25730 /* Wrapper around the output_probe_stack_range routines. */
25731 const char *
25732 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25733 {
25734 if (flag_stack_clash_protection)
25735 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25736 else
25737 return output_probe_stack_range_1 (reg1, reg3);
25738 }
25739
25740 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25741 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25742 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25743 deduce these equivalences by itself so it wasn't necessary to hold
25744 its hand so much. Don't be tempted to always supply d2_f_d_e with
25745 the actual cfa register, ie. r31 when we are using a hard frame
25746 pointer. That fails when saving regs off r1, and sched moves the
25747 r31 setup past the reg saves. */
25748
25749 static rtx_insn *
25750 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25751 rtx reg2, rtx repl2)
25752 {
25753 rtx repl;
25754
25755 if (REGNO (reg) == STACK_POINTER_REGNUM)
25756 {
25757 gcc_checking_assert (val == 0);
25758 repl = NULL_RTX;
25759 }
25760 else
25761 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25762 GEN_INT (val));
25763
25764 rtx pat = PATTERN (insn);
25765 if (!repl && !reg2)
25766 {
25767 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25768 if (GET_CODE (pat) == PARALLEL)
25769 for (int i = 0; i < XVECLEN (pat, 0); i++)
25770 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25771 {
25772 rtx set = XVECEXP (pat, 0, i);
25773
25774 if (!REG_P (SET_SRC (set))
25775 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25776 RTX_FRAME_RELATED_P (set) = 1;
25777 }
25778 RTX_FRAME_RELATED_P (insn) = 1;
25779 return insn;
25780 }
25781
25782 /* We expect that 'pat' is either a SET or a PARALLEL containing
25783 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25784 are important so they all have to be marked RTX_FRAME_RELATED_P.
25785 Call simplify_replace_rtx on the SETs rather than the whole insn
25786 so as to leave the other stuff alone (for example USE of r12). */
25787
25788 set_used_flags (pat);
25789 if (GET_CODE (pat) == SET)
25790 {
25791 if (repl)
25792 pat = simplify_replace_rtx (pat, reg, repl);
25793 if (reg2)
25794 pat = simplify_replace_rtx (pat, reg2, repl2);
25795 }
25796 else if (GET_CODE (pat) == PARALLEL)
25797 {
25798 pat = shallow_copy_rtx (pat);
25799 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25800
25801 for (int i = 0; i < XVECLEN (pat, 0); i++)
25802 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25803 {
25804 rtx set = XVECEXP (pat, 0, i);
25805
25806 if (repl)
25807 set = simplify_replace_rtx (set, reg, repl);
25808 if (reg2)
25809 set = simplify_replace_rtx (set, reg2, repl2);
25810 XVECEXP (pat, 0, i) = set;
25811
25812 if (!REG_P (SET_SRC (set))
25813 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25814 RTX_FRAME_RELATED_P (set) = 1;
25815 }
25816 }
25817 else
25818 gcc_unreachable ();
25819
25820 RTX_FRAME_RELATED_P (insn) = 1;
25821 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25822
25823 return insn;
25824 }
25825
25826 /* Returns an insn that has a vrsave set operation with the
25827 appropriate CLOBBERs. */
25828
25829 static rtx
25830 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25831 {
25832 int nclobs, i;
25833 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25834 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25835
25836 clobs[0]
25837 = gen_rtx_SET (vrsave,
25838 gen_rtx_UNSPEC_VOLATILE (SImode,
25839 gen_rtvec (2, reg, vrsave),
25840 UNSPECV_SET_VRSAVE));
25841
25842 nclobs = 1;
25843
25844 /* We need to clobber the registers in the mask so the scheduler
25845 does not move sets to VRSAVE before sets of AltiVec registers.
25846
25847 However, if the function receives nonlocal gotos, reload will set
25848 all call saved registers live. We will end up with:
25849
25850 (set (reg 999) (mem))
25851 (parallel [ (set (reg vrsave) (unspec blah))
25852 (clobber (reg 999))])
25853
25854 The clobber will cause the store into reg 999 to be dead, and
25855 flow will attempt to delete an epilogue insn. In this case, we
25856 need an unspec use/set of the register. */
25857
25858 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25859 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25860 {
25861 if (!epiloguep || call_used_regs [i])
25862 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25863 gen_rtx_REG (V4SImode, i));
25864 else
25865 {
25866 rtx reg = gen_rtx_REG (V4SImode, i);
25867
25868 clobs[nclobs++]
25869 = gen_rtx_SET (reg,
25870 gen_rtx_UNSPEC (V4SImode,
25871 gen_rtvec (1, reg), 27));
25872 }
25873 }
25874
25875 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25876
25877 for (i = 0; i < nclobs; ++i)
25878 XVECEXP (insn, 0, i) = clobs[i];
25879
25880 return insn;
25881 }
25882
25883 static rtx
25884 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25885 {
25886 rtx addr, mem;
25887
25888 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25889 mem = gen_frame_mem (GET_MODE (reg), addr);
25890 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25891 }
25892
25893 static rtx
25894 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25895 {
25896 return gen_frame_set (reg, frame_reg, offset, false);
25897 }
25898
25899 static rtx
25900 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25901 {
25902 return gen_frame_set (reg, frame_reg, offset, true);
25903 }
25904
25905 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25906 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25907
25908 static rtx_insn *
25909 emit_frame_save (rtx frame_reg, machine_mode mode,
25910 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25911 {
25912 rtx reg;
25913
25914 /* Some cases that need register indexed addressing. */
25915 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25916 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25917
25918 reg = gen_rtx_REG (mode, regno);
25919 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25920 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25921 NULL_RTX, NULL_RTX);
25922 }
25923
25924 /* Emit an offset memory reference suitable for a frame store, while
25925 converting to a valid addressing mode. */
25926
25927 static rtx
25928 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25929 {
25930 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25931 }
25932
25933 #ifndef TARGET_FIX_AND_CONTINUE
25934 #define TARGET_FIX_AND_CONTINUE 0
25935 #endif
25936
25937 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25938 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25939 #define LAST_SAVRES_REGISTER 31
25940 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25941
25942 enum {
25943 SAVRES_LR = 0x1,
25944 SAVRES_SAVE = 0x2,
25945 SAVRES_REG = 0x0c,
25946 SAVRES_GPR = 0,
25947 SAVRES_FPR = 4,
25948 SAVRES_VR = 8
25949 };
25950
25951 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25952
25953 /* Temporary holding space for an out-of-line register save/restore
25954 routine name. */
25955 static char savres_routine_name[30];
25956
25957 /* Return the name for an out-of-line register save/restore routine.
25958 We are saving/restoring GPRs if GPR is true. */
25959
25960 static char *
25961 rs6000_savres_routine_name (int regno, int sel)
25962 {
25963 const char *prefix = "";
25964 const char *suffix = "";
25965
25966 /* Different targets are supposed to define
25967 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25968 routine name could be defined with:
25969
25970 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25971
25972 This is a nice idea in practice, but in reality, things are
25973 complicated in several ways:
25974
25975 - ELF targets have save/restore routines for GPRs.
25976
25977 - PPC64 ELF targets have routines for save/restore of GPRs that
25978 differ in what they do with the link register, so having a set
25979 prefix doesn't work. (We only use one of the save routines at
25980 the moment, though.)
25981
25982 - PPC32 elf targets have "exit" versions of the restore routines
25983 that restore the link register and can save some extra space.
25984 These require an extra suffix. (There are also "tail" versions
25985 of the restore routines and "GOT" versions of the save routines,
25986 but we don't generate those at present. Same problems apply,
25987 though.)
25988
25989 We deal with all this by synthesizing our own prefix/suffix and
25990 using that for the simple sprintf call shown above. */
25991 if (DEFAULT_ABI == ABI_V4)
25992 {
25993 if (TARGET_64BIT)
25994 goto aix_names;
25995
25996 if ((sel & SAVRES_REG) == SAVRES_GPR)
25997 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25998 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25999 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26000 else if ((sel & SAVRES_REG) == SAVRES_VR)
26001 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26002 else
26003 abort ();
26004
26005 if ((sel & SAVRES_LR))
26006 suffix = "_x";
26007 }
26008 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26009 {
26010 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26011 /* No out-of-line save/restore routines for GPRs on AIX. */
26012 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26013 #endif
26014
26015 aix_names:
26016 if ((sel & SAVRES_REG) == SAVRES_GPR)
26017 prefix = ((sel & SAVRES_SAVE)
26018 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26019 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26020 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26021 {
26022 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26023 if ((sel & SAVRES_LR))
26024 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26025 else
26026 #endif
26027 {
26028 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26029 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26030 }
26031 }
26032 else if ((sel & SAVRES_REG) == SAVRES_VR)
26033 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26034 else
26035 abort ();
26036 }
26037
26038 if (DEFAULT_ABI == ABI_DARWIN)
26039 {
26040 /* The Darwin approach is (slightly) different, in order to be
26041 compatible with code generated by the system toolchain. There is a
26042 single symbol for the start of save sequence, and the code here
26043 embeds an offset into that code on the basis of the first register
26044 to be saved. */
26045 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26046 if ((sel & SAVRES_REG) == SAVRES_GPR)
26047 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26048 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26049 (regno - 13) * 4, prefix, regno);
26050 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26051 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26052 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26053 else if ((sel & SAVRES_REG) == SAVRES_VR)
26054 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26055 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26056 else
26057 abort ();
26058 }
26059 else
26060 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26061
26062 return savres_routine_name;
26063 }
26064
26065 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26066 We are saving/restoring GPRs if GPR is true. */
26067
26068 static rtx
26069 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26070 {
26071 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26072 ? info->first_gp_reg_save
26073 : (sel & SAVRES_REG) == SAVRES_FPR
26074 ? info->first_fp_reg_save - 32
26075 : (sel & SAVRES_REG) == SAVRES_VR
26076 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26077 : -1);
26078 rtx sym;
26079 int select = sel;
26080
26081 /* Don't generate bogus routine names. */
26082 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26083 && regno <= LAST_SAVRES_REGISTER
26084 && select >= 0 && select <= 12);
26085
26086 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26087
26088 if (sym == NULL)
26089 {
26090 char *name;
26091
26092 name = rs6000_savres_routine_name (regno, sel);
26093
26094 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26095 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26096 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26097 }
26098
26099 return sym;
26100 }
26101
26102 /* Emit a sequence of insns, including a stack tie if needed, for
26103 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26104 reset the stack pointer, but move the base of the frame into
26105 reg UPDT_REGNO for use by out-of-line register restore routines. */
26106
26107 static rtx
26108 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26109 unsigned updt_regno)
26110 {
26111 /* If there is nothing to do, don't do anything. */
26112 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26113 return NULL_RTX;
26114
26115 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26116
26117 /* This blockage is needed so that sched doesn't decide to move
26118 the sp change before the register restores. */
26119 if (DEFAULT_ABI == ABI_V4)
26120 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26121 GEN_INT (frame_off)));
26122
26123 /* If we are restoring registers out-of-line, we will be using the
26124 "exit" variants of the restore routines, which will reset the
26125 stack for us. But we do need to point updt_reg into the
26126 right place for those routines. */
26127 if (frame_off != 0)
26128 return emit_insn (gen_add3_insn (updt_reg_rtx,
26129 frame_reg_rtx, GEN_INT (frame_off)));
26130 else
26131 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26132
26133 return NULL_RTX;
26134 }
26135
26136 /* Return the register number used as a pointer by out-of-line
26137 save/restore functions. */
26138
26139 static inline unsigned
26140 ptr_regno_for_savres (int sel)
26141 {
26142 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26143 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26144 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26145 }
26146
26147 /* Construct a parallel rtx describing the effect of a call to an
26148 out-of-line register save/restore routine, and emit the insn
26149 or jump_insn as appropriate. */
26150
26151 static rtx_insn *
26152 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26153 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26154 machine_mode reg_mode, int sel)
26155 {
26156 int i;
26157 int offset, start_reg, end_reg, n_regs, use_reg;
26158 int reg_size = GET_MODE_SIZE (reg_mode);
26159 rtx sym;
26160 rtvec p;
26161 rtx par;
26162 rtx_insn *insn;
26163
26164 offset = 0;
26165 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26166 ? info->first_gp_reg_save
26167 : (sel & SAVRES_REG) == SAVRES_FPR
26168 ? info->first_fp_reg_save
26169 : (sel & SAVRES_REG) == SAVRES_VR
26170 ? info->first_altivec_reg_save
26171 : -1);
26172 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26173 ? 32
26174 : (sel & SAVRES_REG) == SAVRES_FPR
26175 ? 64
26176 : (sel & SAVRES_REG) == SAVRES_VR
26177 ? LAST_ALTIVEC_REGNO + 1
26178 : -1);
26179 n_regs = end_reg - start_reg;
26180 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26181 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26182 + n_regs);
26183
26184 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26185 RTVEC_ELT (p, offset++) = ret_rtx;
26186
26187 RTVEC_ELT (p, offset++)
26188 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26189
26190 sym = rs6000_savres_routine_sym (info, sel);
26191 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26192
26193 use_reg = ptr_regno_for_savres (sel);
26194 if ((sel & SAVRES_REG) == SAVRES_VR)
26195 {
26196 /* Vector regs are saved/restored using [reg+reg] addressing. */
26197 RTVEC_ELT (p, offset++)
26198 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26199 RTVEC_ELT (p, offset++)
26200 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26201 }
26202 else
26203 RTVEC_ELT (p, offset++)
26204 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26205
26206 for (i = 0; i < end_reg - start_reg; i++)
26207 RTVEC_ELT (p, i + offset)
26208 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26209 frame_reg_rtx, save_area_offset + reg_size * i,
26210 (sel & SAVRES_SAVE) != 0);
26211
26212 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26213 RTVEC_ELT (p, i + offset)
26214 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26215
26216 par = gen_rtx_PARALLEL (VOIDmode, p);
26217
26218 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26219 {
26220 insn = emit_jump_insn (par);
26221 JUMP_LABEL (insn) = ret_rtx;
26222 }
26223 else
26224 insn = emit_insn (par);
26225 return insn;
26226 }
26227
26228 /* Emit prologue code to store CR fields that need to be saved into REG. This
26229 function should only be called when moving the non-volatile CRs to REG, it
26230 is not a general purpose routine to move the entire set of CRs to REG.
26231 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26232 volatile CRs. */
26233
26234 static void
26235 rs6000_emit_prologue_move_from_cr (rtx reg)
26236 {
26237 /* Only the ELFv2 ABI allows storing only selected fields. */
26238 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26239 {
26240 int i, cr_reg[8], count = 0;
26241
26242 /* Collect CR fields that must be saved. */
26243 for (i = 0; i < 8; i++)
26244 if (save_reg_p (CR0_REGNO + i))
26245 cr_reg[count++] = i;
26246
26247 /* If it's just a single one, use mfcrf. */
26248 if (count == 1)
26249 {
26250 rtvec p = rtvec_alloc (1);
26251 rtvec r = rtvec_alloc (2);
26252 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26253 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26254 RTVEC_ELT (p, 0)
26255 = gen_rtx_SET (reg,
26256 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26257
26258 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26259 return;
26260 }
26261
26262 /* ??? It might be better to handle count == 2 / 3 cases here
26263 as well, using logical operations to combine the values. */
26264 }
26265
26266 emit_insn (gen_prologue_movesi_from_cr (reg));
26267 }
26268
26269 /* Return whether the split-stack arg pointer (r12) is used. */
26270
26271 static bool
26272 split_stack_arg_pointer_used_p (void)
26273 {
26274 /* If the pseudo holding the arg pointer is no longer a pseudo,
26275 then the arg pointer is used. */
26276 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26277 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26278 || (REGNO (cfun->machine->split_stack_arg_pointer)
26279 < FIRST_PSEUDO_REGISTER)))
26280 return true;
26281
26282 /* Unfortunately we also need to do some code scanning, since
26283 r12 may have been substituted for the pseudo. */
26284 rtx_insn *insn;
26285 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26286 FOR_BB_INSNS (bb, insn)
26287 if (NONDEBUG_INSN_P (insn))
26288 {
26289 /* A call destroys r12. */
26290 if (CALL_P (insn))
26291 return false;
26292
26293 df_ref use;
26294 FOR_EACH_INSN_USE (use, insn)
26295 {
26296 rtx x = DF_REF_REG (use);
26297 if (REG_P (x) && REGNO (x) == 12)
26298 return true;
26299 }
26300 df_ref def;
26301 FOR_EACH_INSN_DEF (def, insn)
26302 {
26303 rtx x = DF_REF_REG (def);
26304 if (REG_P (x) && REGNO (x) == 12)
26305 return false;
26306 }
26307 }
26308 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26309 }
26310
26311 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26312
26313 static bool
26314 rs6000_global_entry_point_needed_p (void)
26315 {
26316 /* Only needed for the ELFv2 ABI. */
26317 if (DEFAULT_ABI != ABI_ELFv2)
26318 return false;
26319
26320 /* With -msingle-pic-base, we assume the whole program shares the same
26321 TOC, so no global entry point prologues are needed anywhere. */
26322 if (TARGET_SINGLE_PIC_BASE)
26323 return false;
26324
26325 /* Ensure we have a global entry point for thunks. ??? We could
26326 avoid that if the target routine doesn't need a global entry point,
26327 but we do not know whether this is the case at this point. */
26328 if (cfun->is_thunk)
26329 return true;
26330
26331 /* For regular functions, rs6000_emit_prologue sets this flag if the
26332 routine ever uses the TOC pointer. */
26333 return cfun->machine->r2_setup_needed;
26334 }
26335
26336 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26337 static sbitmap
26338 rs6000_get_separate_components (void)
26339 {
26340 rs6000_stack_t *info = rs6000_stack_info ();
26341
26342 if (WORLD_SAVE_P (info))
26343 return NULL;
26344
26345 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26346 && !(info->savres_strategy & REST_MULTIPLE));
26347
26348 /* Component 0 is the save/restore of LR (done via GPR0).
26349 Component 2 is the save of the TOC (GPR2).
26350 Components 13..31 are the save/restore of GPR13..GPR31.
26351 Components 46..63 are the save/restore of FPR14..FPR31. */
26352
26353 cfun->machine->n_components = 64;
26354
26355 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26356 bitmap_clear (components);
26357
26358 int reg_size = TARGET_32BIT ? 4 : 8;
26359 int fp_reg_size = 8;
26360
26361 /* The GPRs we need saved to the frame. */
26362 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26363 && (info->savres_strategy & REST_INLINE_GPRS))
26364 {
26365 int offset = info->gp_save_offset;
26366 if (info->push_p)
26367 offset += info->total_size;
26368
26369 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26370 {
26371 if (IN_RANGE (offset, -0x8000, 0x7fff)
26372 && save_reg_p (regno))
26373 bitmap_set_bit (components, regno);
26374
26375 offset += reg_size;
26376 }
26377 }
26378
26379 /* Don't mess with the hard frame pointer. */
26380 if (frame_pointer_needed)
26381 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26382
26383 /* Don't mess with the fixed TOC register. */
26384 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26385 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26386 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26387 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26388
26389 /* The FPRs we need saved to the frame. */
26390 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26391 && (info->savres_strategy & REST_INLINE_FPRS))
26392 {
26393 int offset = info->fp_save_offset;
26394 if (info->push_p)
26395 offset += info->total_size;
26396
26397 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26398 {
26399 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26400 bitmap_set_bit (components, regno);
26401
26402 offset += fp_reg_size;
26403 }
26404 }
26405
26406 /* Optimize LR save and restore if we can. This is component 0. Any
26407 out-of-line register save/restore routines need LR. */
26408 if (info->lr_save_p
26409 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26410 && (info->savres_strategy & SAVE_INLINE_GPRS)
26411 && (info->savres_strategy & REST_INLINE_GPRS)
26412 && (info->savres_strategy & SAVE_INLINE_FPRS)
26413 && (info->savres_strategy & REST_INLINE_FPRS)
26414 && (info->savres_strategy & SAVE_INLINE_VRS)
26415 && (info->savres_strategy & REST_INLINE_VRS))
26416 {
26417 int offset = info->lr_save_offset;
26418 if (info->push_p)
26419 offset += info->total_size;
26420 if (IN_RANGE (offset, -0x8000, 0x7fff))
26421 bitmap_set_bit (components, 0);
26422 }
26423
26424 /* Optimize saving the TOC. This is component 2. */
26425 if (cfun->machine->save_toc_in_prologue)
26426 bitmap_set_bit (components, 2);
26427
26428 return components;
26429 }
26430
26431 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26432 static sbitmap
26433 rs6000_components_for_bb (basic_block bb)
26434 {
26435 rs6000_stack_t *info = rs6000_stack_info ();
26436
26437 bitmap in = DF_LIVE_IN (bb);
26438 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26439 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26440
26441 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26442 bitmap_clear (components);
26443
26444 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26445
26446 /* GPRs. */
26447 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26448 if (bitmap_bit_p (in, regno)
26449 || bitmap_bit_p (gen, regno)
26450 || bitmap_bit_p (kill, regno))
26451 bitmap_set_bit (components, regno);
26452
26453 /* FPRs. */
26454 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26455 if (bitmap_bit_p (in, regno)
26456 || bitmap_bit_p (gen, regno)
26457 || bitmap_bit_p (kill, regno))
26458 bitmap_set_bit (components, regno);
26459
26460 /* The link register. */
26461 if (bitmap_bit_p (in, LR_REGNO)
26462 || bitmap_bit_p (gen, LR_REGNO)
26463 || bitmap_bit_p (kill, LR_REGNO))
26464 bitmap_set_bit (components, 0);
26465
26466 /* The TOC save. */
26467 if (bitmap_bit_p (in, TOC_REGNUM)
26468 || bitmap_bit_p (gen, TOC_REGNUM)
26469 || bitmap_bit_p (kill, TOC_REGNUM))
26470 bitmap_set_bit (components, 2);
26471
26472 return components;
26473 }
26474
26475 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26476 static void
26477 rs6000_disqualify_components (sbitmap components, edge e,
26478 sbitmap edge_components, bool /*is_prologue*/)
26479 {
26480 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26481 live where we want to place that code. */
26482 if (bitmap_bit_p (edge_components, 0)
26483 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26484 {
26485 if (dump_file)
26486 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26487 "on entry to bb %d\n", e->dest->index);
26488 bitmap_clear_bit (components, 0);
26489 }
26490 }
26491
26492 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26493 static void
26494 rs6000_emit_prologue_components (sbitmap components)
26495 {
26496 rs6000_stack_t *info = rs6000_stack_info ();
26497 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26498 ? HARD_FRAME_POINTER_REGNUM
26499 : STACK_POINTER_REGNUM);
26500
26501 machine_mode reg_mode = Pmode;
26502 int reg_size = TARGET_32BIT ? 4 : 8;
26503 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26504 int fp_reg_size = 8;
26505
26506 /* Prologue for LR. */
26507 if (bitmap_bit_p (components, 0))
26508 {
26509 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26510 rtx reg = gen_rtx_REG (reg_mode, 0);
26511 rtx_insn *insn = emit_move_insn (reg, lr);
26512 RTX_FRAME_RELATED_P (insn) = 1;
26513 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26514
26515 int offset = info->lr_save_offset;
26516 if (info->push_p)
26517 offset += info->total_size;
26518
26519 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26520 RTX_FRAME_RELATED_P (insn) = 1;
26521 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26522 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26523 }
26524
26525 /* Prologue for TOC. */
26526 if (bitmap_bit_p (components, 2))
26527 {
26528 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26529 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26530 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26531 }
26532
26533 /* Prologue for the GPRs. */
26534 int offset = info->gp_save_offset;
26535 if (info->push_p)
26536 offset += info->total_size;
26537
26538 for (int i = info->first_gp_reg_save; i < 32; i++)
26539 {
26540 if (bitmap_bit_p (components, i))
26541 {
26542 rtx reg = gen_rtx_REG (reg_mode, i);
26543 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26544 RTX_FRAME_RELATED_P (insn) = 1;
26545 rtx set = copy_rtx (single_set (insn));
26546 add_reg_note (insn, REG_CFA_OFFSET, set);
26547 }
26548
26549 offset += reg_size;
26550 }
26551
26552 /* Prologue for the FPRs. */
26553 offset = info->fp_save_offset;
26554 if (info->push_p)
26555 offset += info->total_size;
26556
26557 for (int i = info->first_fp_reg_save; i < 64; i++)
26558 {
26559 if (bitmap_bit_p (components, i))
26560 {
26561 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26562 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26563 RTX_FRAME_RELATED_P (insn) = 1;
26564 rtx set = copy_rtx (single_set (insn));
26565 add_reg_note (insn, REG_CFA_OFFSET, set);
26566 }
26567
26568 offset += fp_reg_size;
26569 }
26570 }
26571
26572 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26573 static void
26574 rs6000_emit_epilogue_components (sbitmap components)
26575 {
26576 rs6000_stack_t *info = rs6000_stack_info ();
26577 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26578 ? HARD_FRAME_POINTER_REGNUM
26579 : STACK_POINTER_REGNUM);
26580
26581 machine_mode reg_mode = Pmode;
26582 int reg_size = TARGET_32BIT ? 4 : 8;
26583
26584 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26585 int fp_reg_size = 8;
26586
26587 /* Epilogue for the FPRs. */
26588 int offset = info->fp_save_offset;
26589 if (info->push_p)
26590 offset += info->total_size;
26591
26592 for (int i = info->first_fp_reg_save; i < 64; i++)
26593 {
26594 if (bitmap_bit_p (components, i))
26595 {
26596 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26597 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26598 RTX_FRAME_RELATED_P (insn) = 1;
26599 add_reg_note (insn, REG_CFA_RESTORE, reg);
26600 }
26601
26602 offset += fp_reg_size;
26603 }
26604
26605 /* Epilogue for the GPRs. */
26606 offset = info->gp_save_offset;
26607 if (info->push_p)
26608 offset += info->total_size;
26609
26610 for (int i = info->first_gp_reg_save; i < 32; i++)
26611 {
26612 if (bitmap_bit_p (components, i))
26613 {
26614 rtx reg = gen_rtx_REG (reg_mode, i);
26615 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26616 RTX_FRAME_RELATED_P (insn) = 1;
26617 add_reg_note (insn, REG_CFA_RESTORE, reg);
26618 }
26619
26620 offset += reg_size;
26621 }
26622
26623 /* Epilogue for LR. */
26624 if (bitmap_bit_p (components, 0))
26625 {
26626 int offset = info->lr_save_offset;
26627 if (info->push_p)
26628 offset += info->total_size;
26629
26630 rtx reg = gen_rtx_REG (reg_mode, 0);
26631 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26632
26633 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26634 insn = emit_move_insn (lr, reg);
26635 RTX_FRAME_RELATED_P (insn) = 1;
26636 add_reg_note (insn, REG_CFA_RESTORE, lr);
26637 }
26638 }
26639
26640 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26641 static void
26642 rs6000_set_handled_components (sbitmap components)
26643 {
26644 rs6000_stack_t *info = rs6000_stack_info ();
26645
26646 for (int i = info->first_gp_reg_save; i < 32; i++)
26647 if (bitmap_bit_p (components, i))
26648 cfun->machine->gpr_is_wrapped_separately[i] = true;
26649
26650 for (int i = info->first_fp_reg_save; i < 64; i++)
26651 if (bitmap_bit_p (components, i))
26652 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26653
26654 if (bitmap_bit_p (components, 0))
26655 cfun->machine->lr_is_wrapped_separately = true;
26656
26657 if (bitmap_bit_p (components, 2))
26658 cfun->machine->toc_is_wrapped_separately = true;
26659 }
26660
26661 /* VRSAVE is a bit vector representing which AltiVec registers
26662 are used. The OS uses this to determine which vector
26663 registers to save on a context switch. We need to save
26664 VRSAVE on the stack frame, add whatever AltiVec registers we
26665 used in this function, and do the corresponding magic in the
26666 epilogue. */
26667 static void
26668 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26669 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26670 {
26671 /* Get VRSAVE into a GPR. */
26672 rtx reg = gen_rtx_REG (SImode, save_regno);
26673 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26674 if (TARGET_MACHO)
26675 emit_insn (gen_get_vrsave_internal (reg));
26676 else
26677 emit_insn (gen_rtx_SET (reg, vrsave));
26678
26679 /* Save VRSAVE. */
26680 int offset = info->vrsave_save_offset + frame_off;
26681 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26682
26683 /* Include the registers in the mask. */
26684 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26685
26686 emit_insn (generate_set_vrsave (reg, info, 0));
26687 }
26688
26689 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26690 called, it left the arg pointer to the old stack in r29. Otherwise, the
26691 arg pointer is the top of the current frame. */
26692 static void
26693 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26694 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26695 {
26696 cfun->machine->split_stack_argp_used = true;
26697
26698 if (sp_adjust)
26699 {
26700 rtx r12 = gen_rtx_REG (Pmode, 12);
26701 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26702 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26703 emit_insn_before (set_r12, sp_adjust);
26704 }
26705 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26706 {
26707 rtx r12 = gen_rtx_REG (Pmode, 12);
26708 if (frame_off == 0)
26709 emit_move_insn (r12, frame_reg_rtx);
26710 else
26711 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26712 }
26713
26714 if (info->push_p)
26715 {
26716 rtx r12 = gen_rtx_REG (Pmode, 12);
26717 rtx r29 = gen_rtx_REG (Pmode, 29);
26718 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26719 rtx not_more = gen_label_rtx ();
26720 rtx jump;
26721
26722 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26723 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26724 gen_rtx_LABEL_REF (VOIDmode, not_more),
26725 pc_rtx);
26726 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26727 JUMP_LABEL (jump) = not_more;
26728 LABEL_NUSES (not_more) += 1;
26729 emit_move_insn (r12, r29);
26730 emit_label (not_more);
26731 }
26732 }
26733
26734 /* Emit function prologue as insns. */
26735
26736 void
26737 rs6000_emit_prologue (void)
26738 {
26739 rs6000_stack_t *info = rs6000_stack_info ();
26740 machine_mode reg_mode = Pmode;
26741 int reg_size = TARGET_32BIT ? 4 : 8;
26742 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26743 int fp_reg_size = 8;
26744 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26745 rtx frame_reg_rtx = sp_reg_rtx;
26746 unsigned int cr_save_regno;
26747 rtx cr_save_rtx = NULL_RTX;
26748 rtx_insn *insn;
26749 int strategy;
26750 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26751 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26752 && call_used_regs[STATIC_CHAIN_REGNUM]);
26753 int using_split_stack = (flag_split_stack
26754 && (lookup_attribute ("no_split_stack",
26755 DECL_ATTRIBUTES (cfun->decl))
26756 == NULL));
26757
26758 /* Offset to top of frame for frame_reg and sp respectively. */
26759 HOST_WIDE_INT frame_off = 0;
26760 HOST_WIDE_INT sp_off = 0;
26761 /* sp_adjust is the stack adjusting instruction, tracked so that the
26762 insn setting up the split-stack arg pointer can be emitted just
26763 prior to it, when r12 is not used here for other purposes. */
26764 rtx_insn *sp_adjust = 0;
26765
26766 #if CHECKING_P
26767 /* Track and check usage of r0, r11, r12. */
26768 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26769 #define START_USE(R) do \
26770 { \
26771 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26772 reg_inuse |= 1 << (R); \
26773 } while (0)
26774 #define END_USE(R) do \
26775 { \
26776 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26777 reg_inuse &= ~(1 << (R)); \
26778 } while (0)
26779 #define NOT_INUSE(R) do \
26780 { \
26781 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26782 } while (0)
26783 #else
26784 #define START_USE(R) do {} while (0)
26785 #define END_USE(R) do {} while (0)
26786 #define NOT_INUSE(R) do {} while (0)
26787 #endif
26788
26789 if (DEFAULT_ABI == ABI_ELFv2
26790 && !TARGET_SINGLE_PIC_BASE)
26791 {
26792 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26793
26794 /* With -mminimal-toc we may generate an extra use of r2 below. */
26795 if (TARGET_TOC && TARGET_MINIMAL_TOC
26796 && !constant_pool_empty_p ())
26797 cfun->machine->r2_setup_needed = true;
26798 }
26799
26800
26801 if (flag_stack_usage_info)
26802 current_function_static_stack_size = info->total_size;
26803
26804 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26805 {
26806 HOST_WIDE_INT size = info->total_size;
26807
26808 if (crtl->is_leaf && !cfun->calls_alloca)
26809 {
26810 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26811 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26812 size - get_stack_check_protect ());
26813 }
26814 else if (size > 0)
26815 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26816 }
26817
26818 if (TARGET_FIX_AND_CONTINUE)
26819 {
26820 /* gdb on darwin arranges to forward a function from the old
26821 address by modifying the first 5 instructions of the function
26822 to branch to the overriding function. This is necessary to
26823 permit function pointers that point to the old function to
26824 actually forward to the new function. */
26825 emit_insn (gen_nop ());
26826 emit_insn (gen_nop ());
26827 emit_insn (gen_nop ());
26828 emit_insn (gen_nop ());
26829 emit_insn (gen_nop ());
26830 }
26831
26832 /* Handle world saves specially here. */
26833 if (WORLD_SAVE_P (info))
26834 {
26835 int i, j, sz;
26836 rtx treg;
26837 rtvec p;
26838 rtx reg0;
26839
26840 /* save_world expects lr in r0. */
26841 reg0 = gen_rtx_REG (Pmode, 0);
26842 if (info->lr_save_p)
26843 {
26844 insn = emit_move_insn (reg0,
26845 gen_rtx_REG (Pmode, LR_REGNO));
26846 RTX_FRAME_RELATED_P (insn) = 1;
26847 }
26848
26849 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26850 assumptions about the offsets of various bits of the stack
26851 frame. */
26852 gcc_assert (info->gp_save_offset == -220
26853 && info->fp_save_offset == -144
26854 && info->lr_save_offset == 8
26855 && info->cr_save_offset == 4
26856 && info->push_p
26857 && info->lr_save_p
26858 && (!crtl->calls_eh_return
26859 || info->ehrd_offset == -432)
26860 && info->vrsave_save_offset == -224
26861 && info->altivec_save_offset == -416);
26862
26863 treg = gen_rtx_REG (SImode, 11);
26864 emit_move_insn (treg, GEN_INT (-info->total_size));
26865
26866 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26867 in R11. It also clobbers R12, so beware! */
26868
26869 /* Preserve CR2 for save_world prologues */
26870 sz = 5;
26871 sz += 32 - info->first_gp_reg_save;
26872 sz += 64 - info->first_fp_reg_save;
26873 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26874 p = rtvec_alloc (sz);
26875 j = 0;
26876 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26877 gen_rtx_REG (SImode,
26878 LR_REGNO));
26879 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26880 gen_rtx_SYMBOL_REF (Pmode,
26881 "*save_world"));
26882 /* We do floats first so that the instruction pattern matches
26883 properly. */
26884 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26885 RTVEC_ELT (p, j++)
26886 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26887 info->first_fp_reg_save + i),
26888 frame_reg_rtx,
26889 info->fp_save_offset + frame_off + 8 * i);
26890 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26891 RTVEC_ELT (p, j++)
26892 = gen_frame_store (gen_rtx_REG (V4SImode,
26893 info->first_altivec_reg_save + i),
26894 frame_reg_rtx,
26895 info->altivec_save_offset + frame_off + 16 * i);
26896 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26897 RTVEC_ELT (p, j++)
26898 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26899 frame_reg_rtx,
26900 info->gp_save_offset + frame_off + reg_size * i);
26901
26902 /* CR register traditionally saved as CR2. */
26903 RTVEC_ELT (p, j++)
26904 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26905 frame_reg_rtx, info->cr_save_offset + frame_off);
26906 /* Explain about use of R0. */
26907 if (info->lr_save_p)
26908 RTVEC_ELT (p, j++)
26909 = gen_frame_store (reg0,
26910 frame_reg_rtx, info->lr_save_offset + frame_off);
26911 /* Explain what happens to the stack pointer. */
26912 {
26913 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26914 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26915 }
26916
26917 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26918 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26919 treg, GEN_INT (-info->total_size));
26920 sp_off = frame_off = info->total_size;
26921 }
26922
26923 strategy = info->savres_strategy;
26924
26925 /* For V.4, update stack before we do any saving and set back pointer. */
26926 if (! WORLD_SAVE_P (info)
26927 && info->push_p
26928 && (DEFAULT_ABI == ABI_V4
26929 || crtl->calls_eh_return))
26930 {
26931 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26932 || !(strategy & SAVE_INLINE_GPRS)
26933 || !(strategy & SAVE_INLINE_VRS));
26934 int ptr_regno = -1;
26935 rtx ptr_reg = NULL_RTX;
26936 int ptr_off = 0;
26937
26938 if (info->total_size < 32767)
26939 frame_off = info->total_size;
26940 else if (need_r11)
26941 ptr_regno = 11;
26942 else if (info->cr_save_p
26943 || info->lr_save_p
26944 || info->first_fp_reg_save < 64
26945 || info->first_gp_reg_save < 32
26946 || info->altivec_size != 0
26947 || info->vrsave_size != 0
26948 || crtl->calls_eh_return)
26949 ptr_regno = 12;
26950 else
26951 {
26952 /* The prologue won't be saving any regs so there is no need
26953 to set up a frame register to access any frame save area.
26954 We also won't be using frame_off anywhere below, but set
26955 the correct value anyway to protect against future
26956 changes to this function. */
26957 frame_off = info->total_size;
26958 }
26959 if (ptr_regno != -1)
26960 {
26961 /* Set up the frame offset to that needed by the first
26962 out-of-line save function. */
26963 START_USE (ptr_regno);
26964 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26965 frame_reg_rtx = ptr_reg;
26966 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26967 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26968 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26969 ptr_off = info->gp_save_offset + info->gp_size;
26970 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26971 ptr_off = info->altivec_save_offset + info->altivec_size;
26972 frame_off = -ptr_off;
26973 }
26974 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26975 ptr_reg, ptr_off);
26976 if (REGNO (frame_reg_rtx) == 12)
26977 sp_adjust = 0;
26978 sp_off = info->total_size;
26979 if (frame_reg_rtx != sp_reg_rtx)
26980 rs6000_emit_stack_tie (frame_reg_rtx, false);
26981 }
26982
26983 /* If we use the link register, get it into r0. */
26984 if (!WORLD_SAVE_P (info) && info->lr_save_p
26985 && !cfun->machine->lr_is_wrapped_separately)
26986 {
26987 rtx addr, reg, mem;
26988
26989 reg = gen_rtx_REG (Pmode, 0);
26990 START_USE (0);
26991 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26992 RTX_FRAME_RELATED_P (insn) = 1;
26993
26994 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26995 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26996 {
26997 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26998 GEN_INT (info->lr_save_offset + frame_off));
26999 mem = gen_rtx_MEM (Pmode, addr);
27000 /* This should not be of rs6000_sr_alias_set, because of
27001 __builtin_return_address. */
27002
27003 insn = emit_move_insn (mem, reg);
27004 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27005 NULL_RTX, NULL_RTX);
27006 END_USE (0);
27007 }
27008 }
27009
27010 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27011 r12 will be needed by out-of-line gpr save. */
27012 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27013 && !(strategy & (SAVE_INLINE_GPRS
27014 | SAVE_NOINLINE_GPRS_SAVES_LR))
27015 ? 11 : 12);
27016 if (!WORLD_SAVE_P (info)
27017 && info->cr_save_p
27018 && REGNO (frame_reg_rtx) != cr_save_regno
27019 && !(using_static_chain_p && cr_save_regno == 11)
27020 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27021 {
27022 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27023 START_USE (cr_save_regno);
27024 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27025 }
27026
27027 /* Do any required saving of fpr's. If only one or two to save, do
27028 it ourselves. Otherwise, call function. */
27029 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27030 {
27031 int offset = info->fp_save_offset + frame_off;
27032 for (int i = info->first_fp_reg_save; i < 64; i++)
27033 {
27034 if (save_reg_p (i)
27035 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27036 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27037 sp_off - frame_off);
27038
27039 offset += fp_reg_size;
27040 }
27041 }
27042 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27043 {
27044 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27045 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27046 unsigned ptr_regno = ptr_regno_for_savres (sel);
27047 rtx ptr_reg = frame_reg_rtx;
27048
27049 if (REGNO (frame_reg_rtx) == ptr_regno)
27050 gcc_checking_assert (frame_off == 0);
27051 else
27052 {
27053 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27054 NOT_INUSE (ptr_regno);
27055 emit_insn (gen_add3_insn (ptr_reg,
27056 frame_reg_rtx, GEN_INT (frame_off)));
27057 }
27058 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27059 info->fp_save_offset,
27060 info->lr_save_offset,
27061 DFmode, sel);
27062 rs6000_frame_related (insn, ptr_reg, sp_off,
27063 NULL_RTX, NULL_RTX);
27064 if (lr)
27065 END_USE (0);
27066 }
27067
27068 /* Save GPRs. This is done as a PARALLEL if we are using
27069 the store-multiple instructions. */
27070 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27071 {
27072 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27073 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27074 unsigned ptr_regno = ptr_regno_for_savres (sel);
27075 rtx ptr_reg = frame_reg_rtx;
27076 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27077 int end_save = info->gp_save_offset + info->gp_size;
27078 int ptr_off;
27079
27080 if (ptr_regno == 12)
27081 sp_adjust = 0;
27082 if (!ptr_set_up)
27083 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27084
27085 /* Need to adjust r11 (r12) if we saved any FPRs. */
27086 if (end_save + frame_off != 0)
27087 {
27088 rtx offset = GEN_INT (end_save + frame_off);
27089
27090 if (ptr_set_up)
27091 frame_off = -end_save;
27092 else
27093 NOT_INUSE (ptr_regno);
27094 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27095 }
27096 else if (!ptr_set_up)
27097 {
27098 NOT_INUSE (ptr_regno);
27099 emit_move_insn (ptr_reg, frame_reg_rtx);
27100 }
27101 ptr_off = -end_save;
27102 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27103 info->gp_save_offset + ptr_off,
27104 info->lr_save_offset + ptr_off,
27105 reg_mode, sel);
27106 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27107 NULL_RTX, NULL_RTX);
27108 if (lr)
27109 END_USE (0);
27110 }
27111 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27112 {
27113 rtvec p;
27114 int i;
27115 p = rtvec_alloc (32 - info->first_gp_reg_save);
27116 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27117 RTVEC_ELT (p, i)
27118 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27119 frame_reg_rtx,
27120 info->gp_save_offset + frame_off + reg_size * i);
27121 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27122 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27123 NULL_RTX, NULL_RTX);
27124 }
27125 else if (!WORLD_SAVE_P (info))
27126 {
27127 int offset = info->gp_save_offset + frame_off;
27128 for (int i = info->first_gp_reg_save; i < 32; i++)
27129 {
27130 if (save_reg_p (i)
27131 && !cfun->machine->gpr_is_wrapped_separately[i])
27132 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27133 sp_off - frame_off);
27134
27135 offset += reg_size;
27136 }
27137 }
27138
27139 if (crtl->calls_eh_return)
27140 {
27141 unsigned int i;
27142 rtvec p;
27143
27144 for (i = 0; ; ++i)
27145 {
27146 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27147 if (regno == INVALID_REGNUM)
27148 break;
27149 }
27150
27151 p = rtvec_alloc (i);
27152
27153 for (i = 0; ; ++i)
27154 {
27155 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27156 if (regno == INVALID_REGNUM)
27157 break;
27158
27159 rtx set
27160 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27161 sp_reg_rtx,
27162 info->ehrd_offset + sp_off + reg_size * (int) i);
27163 RTVEC_ELT (p, i) = set;
27164 RTX_FRAME_RELATED_P (set) = 1;
27165 }
27166
27167 insn = emit_insn (gen_blockage ());
27168 RTX_FRAME_RELATED_P (insn) = 1;
27169 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27170 }
27171
27172 /* In AIX ABI we need to make sure r2 is really saved. */
27173 if (TARGET_AIX && crtl->calls_eh_return)
27174 {
27175 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27176 rtx join_insn, note;
27177 rtx_insn *save_insn;
27178 long toc_restore_insn;
27179
27180 tmp_reg = gen_rtx_REG (Pmode, 11);
27181 tmp_reg_si = gen_rtx_REG (SImode, 11);
27182 if (using_static_chain_p)
27183 {
27184 START_USE (0);
27185 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27186 }
27187 else
27188 START_USE (11);
27189 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27190 /* Peek at instruction to which this function returns. If it's
27191 restoring r2, then we know we've already saved r2. We can't
27192 unconditionally save r2 because the value we have will already
27193 be updated if we arrived at this function via a plt call or
27194 toc adjusting stub. */
27195 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27196 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27197 + RS6000_TOC_SAVE_SLOT);
27198 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27199 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27200 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27201 validate_condition_mode (EQ, CCUNSmode);
27202 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27203 emit_insn (gen_rtx_SET (compare_result,
27204 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27205 toc_save_done = gen_label_rtx ();
27206 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27207 gen_rtx_EQ (VOIDmode, compare_result,
27208 const0_rtx),
27209 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27210 pc_rtx);
27211 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27212 JUMP_LABEL (jump) = toc_save_done;
27213 LABEL_NUSES (toc_save_done) += 1;
27214
27215 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27216 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27217 sp_off - frame_off);
27218
27219 emit_label (toc_save_done);
27220
27221 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27222 have a CFG that has different saves along different paths.
27223 Move the note to a dummy blockage insn, which describes that
27224 R2 is unconditionally saved after the label. */
27225 /* ??? An alternate representation might be a special insn pattern
27226 containing both the branch and the store. That might let the
27227 code that minimizes the number of DW_CFA_advance opcodes better
27228 freedom in placing the annotations. */
27229 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27230 if (note)
27231 remove_note (save_insn, note);
27232 else
27233 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27234 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27235 RTX_FRAME_RELATED_P (save_insn) = 0;
27236
27237 join_insn = emit_insn (gen_blockage ());
27238 REG_NOTES (join_insn) = note;
27239 RTX_FRAME_RELATED_P (join_insn) = 1;
27240
27241 if (using_static_chain_p)
27242 {
27243 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27244 END_USE (0);
27245 }
27246 else
27247 END_USE (11);
27248 }
27249
27250 /* Save CR if we use any that must be preserved. */
27251 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27252 {
27253 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27254 GEN_INT (info->cr_save_offset + frame_off));
27255 rtx mem = gen_frame_mem (SImode, addr);
27256
27257 /* If we didn't copy cr before, do so now using r0. */
27258 if (cr_save_rtx == NULL_RTX)
27259 {
27260 START_USE (0);
27261 cr_save_rtx = gen_rtx_REG (SImode, 0);
27262 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27263 }
27264
27265 /* Saving CR requires a two-instruction sequence: one instruction
27266 to move the CR to a general-purpose register, and a second
27267 instruction that stores the GPR to memory.
27268
27269 We do not emit any DWARF CFI records for the first of these,
27270 because we cannot properly represent the fact that CR is saved in
27271 a register. One reason is that we cannot express that multiple
27272 CR fields are saved; another reason is that on 64-bit, the size
27273 of the CR register in DWARF (4 bytes) differs from the size of
27274 a general-purpose register.
27275
27276 This means if any intervening instruction were to clobber one of
27277 the call-saved CR fields, we'd have incorrect CFI. To prevent
27278 this from happening, we mark the store to memory as a use of
27279 those CR fields, which prevents any such instruction from being
27280 scheduled in between the two instructions. */
27281 rtx crsave_v[9];
27282 int n_crsave = 0;
27283 int i;
27284
27285 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27286 for (i = 0; i < 8; i++)
27287 if (save_reg_p (CR0_REGNO + i))
27288 crsave_v[n_crsave++]
27289 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27290
27291 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27292 gen_rtvec_v (n_crsave, crsave_v)));
27293 END_USE (REGNO (cr_save_rtx));
27294
27295 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27296 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27297 so we need to construct a frame expression manually. */
27298 RTX_FRAME_RELATED_P (insn) = 1;
27299
27300 /* Update address to be stack-pointer relative, like
27301 rs6000_frame_related would do. */
27302 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27303 GEN_INT (info->cr_save_offset + sp_off));
27304 mem = gen_frame_mem (SImode, addr);
27305
27306 if (DEFAULT_ABI == ABI_ELFv2)
27307 {
27308 /* In the ELFv2 ABI we generate separate CFI records for each
27309 CR field that was actually saved. They all point to the
27310 same 32-bit stack slot. */
27311 rtx crframe[8];
27312 int n_crframe = 0;
27313
27314 for (i = 0; i < 8; i++)
27315 if (save_reg_p (CR0_REGNO + i))
27316 {
27317 crframe[n_crframe]
27318 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27319
27320 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27321 n_crframe++;
27322 }
27323
27324 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27325 gen_rtx_PARALLEL (VOIDmode,
27326 gen_rtvec_v (n_crframe, crframe)));
27327 }
27328 else
27329 {
27330 /* In other ABIs, by convention, we use a single CR regnum to
27331 represent the fact that all call-saved CR fields are saved.
27332 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27333 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27334 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27335 }
27336 }
27337
27338 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27339 *separate* slots if the routine calls __builtin_eh_return, so
27340 that they can be independently restored by the unwinder. */
27341 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27342 {
27343 int i, cr_off = info->ehcr_offset;
27344 rtx crsave;
27345
27346 /* ??? We might get better performance by using multiple mfocrf
27347 instructions. */
27348 crsave = gen_rtx_REG (SImode, 0);
27349 emit_insn (gen_prologue_movesi_from_cr (crsave));
27350
27351 for (i = 0; i < 8; i++)
27352 if (!call_used_regs[CR0_REGNO + i])
27353 {
27354 rtvec p = rtvec_alloc (2);
27355 RTVEC_ELT (p, 0)
27356 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27357 RTVEC_ELT (p, 1)
27358 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27359
27360 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27361
27362 RTX_FRAME_RELATED_P (insn) = 1;
27363 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27364 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27365 sp_reg_rtx, cr_off + sp_off));
27366
27367 cr_off += reg_size;
27368 }
27369 }
27370
27371 /* If we are emitting stack probes, but allocate no stack, then
27372 just note that in the dump file. */
27373 if (flag_stack_clash_protection
27374 && dump_file
27375 && !info->push_p)
27376 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27377
27378 /* Update stack and set back pointer unless this is V.4,
27379 for which it was done previously. */
27380 if (!WORLD_SAVE_P (info) && info->push_p
27381 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27382 {
27383 rtx ptr_reg = NULL;
27384 int ptr_off = 0;
27385
27386 /* If saving altivec regs we need to be able to address all save
27387 locations using a 16-bit offset. */
27388 if ((strategy & SAVE_INLINE_VRS) == 0
27389 || (info->altivec_size != 0
27390 && (info->altivec_save_offset + info->altivec_size - 16
27391 + info->total_size - frame_off) > 32767)
27392 || (info->vrsave_size != 0
27393 && (info->vrsave_save_offset
27394 + info->total_size - frame_off) > 32767))
27395 {
27396 int sel = SAVRES_SAVE | SAVRES_VR;
27397 unsigned ptr_regno = ptr_regno_for_savres (sel);
27398
27399 if (using_static_chain_p
27400 && ptr_regno == STATIC_CHAIN_REGNUM)
27401 ptr_regno = 12;
27402 if (REGNO (frame_reg_rtx) != ptr_regno)
27403 START_USE (ptr_regno);
27404 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27405 frame_reg_rtx = ptr_reg;
27406 ptr_off = info->altivec_save_offset + info->altivec_size;
27407 frame_off = -ptr_off;
27408 }
27409 else if (REGNO (frame_reg_rtx) == 1)
27410 frame_off = info->total_size;
27411 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27412 ptr_reg, ptr_off);
27413 if (REGNO (frame_reg_rtx) == 12)
27414 sp_adjust = 0;
27415 sp_off = info->total_size;
27416 if (frame_reg_rtx != sp_reg_rtx)
27417 rs6000_emit_stack_tie (frame_reg_rtx, false);
27418 }
27419
27420 /* Set frame pointer, if needed. */
27421 if (frame_pointer_needed)
27422 {
27423 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27424 sp_reg_rtx);
27425 RTX_FRAME_RELATED_P (insn) = 1;
27426 }
27427
27428 /* Save AltiVec registers if needed. Save here because the red zone does
27429 not always include AltiVec registers. */
27430 if (!WORLD_SAVE_P (info)
27431 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27432 {
27433 int end_save = info->altivec_save_offset + info->altivec_size;
27434 int ptr_off;
27435 /* Oddly, the vector save/restore functions point r0 at the end
27436 of the save area, then use r11 or r12 to load offsets for
27437 [reg+reg] addressing. */
27438 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27439 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27440 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27441
27442 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27443 NOT_INUSE (0);
27444 if (scratch_regno == 12)
27445 sp_adjust = 0;
27446 if (end_save + frame_off != 0)
27447 {
27448 rtx offset = GEN_INT (end_save + frame_off);
27449
27450 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27451 }
27452 else
27453 emit_move_insn (ptr_reg, frame_reg_rtx);
27454
27455 ptr_off = -end_save;
27456 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27457 info->altivec_save_offset + ptr_off,
27458 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27459 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27460 NULL_RTX, NULL_RTX);
27461 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27462 {
27463 /* The oddity mentioned above clobbered our frame reg. */
27464 emit_move_insn (frame_reg_rtx, ptr_reg);
27465 frame_off = ptr_off;
27466 }
27467 }
27468 else if (!WORLD_SAVE_P (info)
27469 && info->altivec_size != 0)
27470 {
27471 int i;
27472
27473 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27474 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27475 {
27476 rtx areg, savereg, mem;
27477 HOST_WIDE_INT offset;
27478
27479 offset = (info->altivec_save_offset + frame_off
27480 + 16 * (i - info->first_altivec_reg_save));
27481
27482 savereg = gen_rtx_REG (V4SImode, i);
27483
27484 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27485 {
27486 mem = gen_frame_mem (V4SImode,
27487 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27488 GEN_INT (offset)));
27489 insn = emit_insn (gen_rtx_SET (mem, savereg));
27490 areg = NULL_RTX;
27491 }
27492 else
27493 {
27494 NOT_INUSE (0);
27495 areg = gen_rtx_REG (Pmode, 0);
27496 emit_move_insn (areg, GEN_INT (offset));
27497
27498 /* AltiVec addressing mode is [reg+reg]. */
27499 mem = gen_frame_mem (V4SImode,
27500 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27501
27502 /* Rather than emitting a generic move, force use of the stvx
27503 instruction, which we always want on ISA 2.07 (power8) systems.
27504 In particular we don't want xxpermdi/stxvd2x for little
27505 endian. */
27506 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27507 }
27508
27509 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27510 areg, GEN_INT (offset));
27511 }
27512 }
27513
27514 /* VRSAVE is a bit vector representing which AltiVec registers
27515 are used. The OS uses this to determine which vector
27516 registers to save on a context switch. We need to save
27517 VRSAVE on the stack frame, add whatever AltiVec registers we
27518 used in this function, and do the corresponding magic in the
27519 epilogue. */
27520
27521 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27522 {
27523 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27524 be using r12 as frame_reg_rtx and r11 as the static chain
27525 pointer for nested functions. */
27526 int save_regno = 12;
27527 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27528 && !using_static_chain_p)
27529 save_regno = 11;
27530 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27531 {
27532 save_regno = 11;
27533 if (using_static_chain_p)
27534 save_regno = 0;
27535 }
27536 NOT_INUSE (save_regno);
27537
27538 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27539 }
27540
27541 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27542 if (!TARGET_SINGLE_PIC_BASE
27543 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27544 && !constant_pool_empty_p ())
27545 || (DEFAULT_ABI == ABI_V4
27546 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27547 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27548 {
27549 /* If emit_load_toc_table will use the link register, we need to save
27550 it. We use R12 for this purpose because emit_load_toc_table
27551 can use register 0. This allows us to use a plain 'blr' to return
27552 from the procedure more often. */
27553 int save_LR_around_toc_setup = (TARGET_ELF
27554 && DEFAULT_ABI == ABI_V4
27555 && flag_pic
27556 && ! info->lr_save_p
27557 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27558 if (save_LR_around_toc_setup)
27559 {
27560 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27561 rtx tmp = gen_rtx_REG (Pmode, 12);
27562
27563 sp_adjust = 0;
27564 insn = emit_move_insn (tmp, lr);
27565 RTX_FRAME_RELATED_P (insn) = 1;
27566
27567 rs6000_emit_load_toc_table (TRUE);
27568
27569 insn = emit_move_insn (lr, tmp);
27570 add_reg_note (insn, REG_CFA_RESTORE, lr);
27571 RTX_FRAME_RELATED_P (insn) = 1;
27572 }
27573 else
27574 rs6000_emit_load_toc_table (TRUE);
27575 }
27576
27577 #if TARGET_MACHO
27578 if (!TARGET_SINGLE_PIC_BASE
27579 && DEFAULT_ABI == ABI_DARWIN
27580 && flag_pic && crtl->uses_pic_offset_table)
27581 {
27582 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27583 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27584
27585 /* Save and restore LR locally around this call (in R0). */
27586 if (!info->lr_save_p)
27587 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27588
27589 emit_insn (gen_load_macho_picbase (src));
27590
27591 emit_move_insn (gen_rtx_REG (Pmode,
27592 RS6000_PIC_OFFSET_TABLE_REGNUM),
27593 lr);
27594
27595 if (!info->lr_save_p)
27596 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27597 }
27598 #endif
27599
27600 /* If we need to, save the TOC register after doing the stack setup.
27601 Do not emit eh frame info for this save. The unwinder wants info,
27602 conceptually attached to instructions in this function, about
27603 register values in the caller of this function. This R2 may have
27604 already been changed from the value in the caller.
27605 We don't attempt to write accurate DWARF EH frame info for R2
27606 because code emitted by gcc for a (non-pointer) function call
27607 doesn't save and restore R2. Instead, R2 is managed out-of-line
27608 by a linker generated plt call stub when the function resides in
27609 a shared library. This behavior is costly to describe in DWARF,
27610 both in terms of the size of DWARF info and the time taken in the
27611 unwinder to interpret it. R2 changes, apart from the
27612 calls_eh_return case earlier in this function, are handled by
27613 linux-unwind.h frob_update_context. */
27614 if (rs6000_save_toc_in_prologue_p ()
27615 && !cfun->machine->toc_is_wrapped_separately)
27616 {
27617 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27618 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27619 }
27620
27621 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27622 if (using_split_stack && split_stack_arg_pointer_used_p ())
27623 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27624 }
27625
27626 /* Output .extern statements for the save/restore routines we use. */
27627
27628 static void
27629 rs6000_output_savres_externs (FILE *file)
27630 {
27631 rs6000_stack_t *info = rs6000_stack_info ();
27632
27633 if (TARGET_DEBUG_STACK)
27634 debug_stack_info (info);
27635
27636 /* Write .extern for any function we will call to save and restore
27637 fp values. */
27638 if (info->first_fp_reg_save < 64
27639 && !TARGET_MACHO
27640 && !TARGET_ELF)
27641 {
27642 char *name;
27643 int regno = info->first_fp_reg_save - 32;
27644
27645 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27646 {
27647 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27648 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27649 name = rs6000_savres_routine_name (regno, sel);
27650 fprintf (file, "\t.extern %s\n", name);
27651 }
27652 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27653 {
27654 bool lr = (info->savres_strategy
27655 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27656 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27657 name = rs6000_savres_routine_name (regno, sel);
27658 fprintf (file, "\t.extern %s\n", name);
27659 }
27660 }
27661 }
27662
27663 /* Write function prologue. */
27664
27665 static void
27666 rs6000_output_function_prologue (FILE *file)
27667 {
27668 if (!cfun->is_thunk)
27669 rs6000_output_savres_externs (file);
27670
27671 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27672 immediately after the global entry point label. */
27673 if (rs6000_global_entry_point_needed_p ())
27674 {
27675 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27676
27677 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27678
27679 if (TARGET_CMODEL != CMODEL_LARGE)
27680 {
27681 /* In the small and medium code models, we assume the TOC is less
27682 2 GB away from the text section, so it can be computed via the
27683 following two-instruction sequence. */
27684 char buf[256];
27685
27686 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27687 fprintf (file, "0:\taddis 2,12,.TOC.-");
27688 assemble_name (file, buf);
27689 fprintf (file, "@ha\n");
27690 fprintf (file, "\taddi 2,2,.TOC.-");
27691 assemble_name (file, buf);
27692 fprintf (file, "@l\n");
27693 }
27694 else
27695 {
27696 /* In the large code model, we allow arbitrary offsets between the
27697 TOC and the text section, so we have to load the offset from
27698 memory. The data field is emitted directly before the global
27699 entry point in rs6000_elf_declare_function_name. */
27700 char buf[256];
27701
27702 #ifdef HAVE_AS_ENTRY_MARKERS
27703 /* If supported by the linker, emit a marker relocation. If the
27704 total code size of the final executable or shared library
27705 happens to fit into 2 GB after all, the linker will replace
27706 this code sequence with the sequence for the small or medium
27707 code model. */
27708 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27709 #endif
27710 fprintf (file, "\tld 2,");
27711 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27712 assemble_name (file, buf);
27713 fprintf (file, "-");
27714 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27715 assemble_name (file, buf);
27716 fprintf (file, "(12)\n");
27717 fprintf (file, "\tadd 2,2,12\n");
27718 }
27719
27720 fputs ("\t.localentry\t", file);
27721 assemble_name (file, name);
27722 fputs (",.-", file);
27723 assemble_name (file, name);
27724 fputs ("\n", file);
27725 }
27726
27727 /* Output -mprofile-kernel code. This needs to be done here instead of
27728 in output_function_profile since it must go after the ELFv2 ABI
27729 local entry point. */
27730 if (TARGET_PROFILE_KERNEL && crtl->profile)
27731 {
27732 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27733 gcc_assert (!TARGET_32BIT);
27734
27735 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27736
27737 /* In the ELFv2 ABI we have no compiler stack word. It must be
27738 the resposibility of _mcount to preserve the static chain
27739 register if required. */
27740 if (DEFAULT_ABI != ABI_ELFv2
27741 && cfun->static_chain_decl != NULL)
27742 {
27743 asm_fprintf (file, "\tstd %s,24(%s)\n",
27744 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27745 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27746 asm_fprintf (file, "\tld %s,24(%s)\n",
27747 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27748 }
27749 else
27750 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27751 }
27752
27753 rs6000_pic_labelno++;
27754 }
27755
27756 /* -mprofile-kernel code calls mcount before the function prolog,
27757 so a profiled leaf function should stay a leaf function. */
27758 static bool
27759 rs6000_keep_leaf_when_profiled ()
27760 {
27761 return TARGET_PROFILE_KERNEL;
27762 }
27763
27764 /* Non-zero if vmx regs are restored before the frame pop, zero if
27765 we restore after the pop when possible. */
27766 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27767
27768 /* Restoring cr is a two step process: loading a reg from the frame
27769 save, then moving the reg to cr. For ABI_V4 we must let the
27770 unwinder know that the stack location is no longer valid at or
27771 before the stack deallocation, but we can't emit a cfa_restore for
27772 cr at the stack deallocation like we do for other registers.
27773 The trouble is that it is possible for the move to cr to be
27774 scheduled after the stack deallocation. So say exactly where cr
27775 is located on each of the two insns. */
27776
27777 static rtx
27778 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27779 {
27780 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27781 rtx reg = gen_rtx_REG (SImode, regno);
27782 rtx_insn *insn = emit_move_insn (reg, mem);
27783
27784 if (!exit_func && DEFAULT_ABI == ABI_V4)
27785 {
27786 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27787 rtx set = gen_rtx_SET (reg, cr);
27788
27789 add_reg_note (insn, REG_CFA_REGISTER, set);
27790 RTX_FRAME_RELATED_P (insn) = 1;
27791 }
27792 return reg;
27793 }
27794
27795 /* Reload CR from REG. */
27796
27797 static void
27798 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27799 {
27800 int count = 0;
27801 int i;
27802
27803 if (using_mfcr_multiple)
27804 {
27805 for (i = 0; i < 8; i++)
27806 if (save_reg_p (CR0_REGNO + i))
27807 count++;
27808 gcc_assert (count);
27809 }
27810
27811 if (using_mfcr_multiple && count > 1)
27812 {
27813 rtx_insn *insn;
27814 rtvec p;
27815 int ndx;
27816
27817 p = rtvec_alloc (count);
27818
27819 ndx = 0;
27820 for (i = 0; i < 8; i++)
27821 if (save_reg_p (CR0_REGNO + i))
27822 {
27823 rtvec r = rtvec_alloc (2);
27824 RTVEC_ELT (r, 0) = reg;
27825 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27826 RTVEC_ELT (p, ndx) =
27827 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27828 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27829 ndx++;
27830 }
27831 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27832 gcc_assert (ndx == count);
27833
27834 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27835 CR field separately. */
27836 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27837 {
27838 for (i = 0; i < 8; i++)
27839 if (save_reg_p (CR0_REGNO + i))
27840 add_reg_note (insn, REG_CFA_RESTORE,
27841 gen_rtx_REG (SImode, CR0_REGNO + i));
27842
27843 RTX_FRAME_RELATED_P (insn) = 1;
27844 }
27845 }
27846 else
27847 for (i = 0; i < 8; i++)
27848 if (save_reg_p (CR0_REGNO + i))
27849 {
27850 rtx insn = emit_insn (gen_movsi_to_cr_one
27851 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27852
27853 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27854 CR field separately, attached to the insn that in fact
27855 restores this particular CR field. */
27856 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27857 {
27858 add_reg_note (insn, REG_CFA_RESTORE,
27859 gen_rtx_REG (SImode, CR0_REGNO + i));
27860
27861 RTX_FRAME_RELATED_P (insn) = 1;
27862 }
27863 }
27864
27865 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27866 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27867 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27868 {
27869 rtx_insn *insn = get_last_insn ();
27870 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27871
27872 add_reg_note (insn, REG_CFA_RESTORE, cr);
27873 RTX_FRAME_RELATED_P (insn) = 1;
27874 }
27875 }
27876
27877 /* Like cr, the move to lr instruction can be scheduled after the
27878 stack deallocation, but unlike cr, its stack frame save is still
27879 valid. So we only need to emit the cfa_restore on the correct
27880 instruction. */
27881
27882 static void
27883 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27884 {
27885 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27886 rtx reg = gen_rtx_REG (Pmode, regno);
27887
27888 emit_move_insn (reg, mem);
27889 }
27890
27891 static void
27892 restore_saved_lr (int regno, bool exit_func)
27893 {
27894 rtx reg = gen_rtx_REG (Pmode, regno);
27895 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27896 rtx_insn *insn = emit_move_insn (lr, reg);
27897
27898 if (!exit_func && flag_shrink_wrap)
27899 {
27900 add_reg_note (insn, REG_CFA_RESTORE, lr);
27901 RTX_FRAME_RELATED_P (insn) = 1;
27902 }
27903 }
27904
27905 static rtx
27906 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27907 {
27908 if (DEFAULT_ABI == ABI_ELFv2)
27909 {
27910 int i;
27911 for (i = 0; i < 8; i++)
27912 if (save_reg_p (CR0_REGNO + i))
27913 {
27914 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27915 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27916 cfa_restores);
27917 }
27918 }
27919 else if (info->cr_save_p)
27920 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27921 gen_rtx_REG (SImode, CR2_REGNO),
27922 cfa_restores);
27923
27924 if (info->lr_save_p)
27925 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27926 gen_rtx_REG (Pmode, LR_REGNO),
27927 cfa_restores);
27928 return cfa_restores;
27929 }
27930
27931 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27932 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27933 below stack pointer not cloberred by signals. */
27934
27935 static inline bool
27936 offset_below_red_zone_p (HOST_WIDE_INT offset)
27937 {
27938 return offset < (DEFAULT_ABI == ABI_V4
27939 ? 0
27940 : TARGET_32BIT ? -220 : -288);
27941 }
27942
27943 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27944
27945 static void
27946 emit_cfa_restores (rtx cfa_restores)
27947 {
27948 rtx_insn *insn = get_last_insn ();
27949 rtx *loc = &REG_NOTES (insn);
27950
27951 while (*loc)
27952 loc = &XEXP (*loc, 1);
27953 *loc = cfa_restores;
27954 RTX_FRAME_RELATED_P (insn) = 1;
27955 }
27956
27957 /* Emit function epilogue as insns. */
27958
27959 void
27960 rs6000_emit_epilogue (int sibcall)
27961 {
27962 rs6000_stack_t *info;
27963 int restoring_GPRs_inline;
27964 int restoring_FPRs_inline;
27965 int using_load_multiple;
27966 int using_mtcr_multiple;
27967 int use_backchain_to_restore_sp;
27968 int restore_lr;
27969 int strategy;
27970 HOST_WIDE_INT frame_off = 0;
27971 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27972 rtx frame_reg_rtx = sp_reg_rtx;
27973 rtx cfa_restores = NULL_RTX;
27974 rtx insn;
27975 rtx cr_save_reg = NULL_RTX;
27976 machine_mode reg_mode = Pmode;
27977 int reg_size = TARGET_32BIT ? 4 : 8;
27978 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
27979 int fp_reg_size = 8;
27980 int i;
27981 bool exit_func;
27982 unsigned ptr_regno;
27983
27984 info = rs6000_stack_info ();
27985
27986 strategy = info->savres_strategy;
27987 using_load_multiple = strategy & REST_MULTIPLE;
27988 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27989 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27990 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
27991 || rs6000_tune == PROCESSOR_PPC603
27992 || rs6000_tune == PROCESSOR_PPC750
27993 || optimize_size);
27994 /* Restore via the backchain when we have a large frame, since this
27995 is more efficient than an addis, addi pair. The second condition
27996 here will not trigger at the moment; We don't actually need a
27997 frame pointer for alloca, but the generic parts of the compiler
27998 give us one anyway. */
27999 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28000 ? info->lr_save_offset
28001 : 0) > 32767
28002 || (cfun->calls_alloca
28003 && !frame_pointer_needed));
28004 restore_lr = (info->lr_save_p
28005 && (restoring_FPRs_inline
28006 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28007 && (restoring_GPRs_inline
28008 || info->first_fp_reg_save < 64)
28009 && !cfun->machine->lr_is_wrapped_separately);
28010
28011
28012 if (WORLD_SAVE_P (info))
28013 {
28014 int i, j;
28015 char rname[30];
28016 const char *alloc_rname;
28017 rtvec p;
28018
28019 /* eh_rest_world_r10 will return to the location saved in the LR
28020 stack slot (which is not likely to be our caller.)
28021 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28022 rest_world is similar, except any R10 parameter is ignored.
28023 The exception-handling stuff that was here in 2.95 is no
28024 longer necessary. */
28025
28026 p = rtvec_alloc (9
28027 + 32 - info->first_gp_reg_save
28028 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28029 + 63 + 1 - info->first_fp_reg_save);
28030
28031 strcpy (rname, ((crtl->calls_eh_return) ?
28032 "*eh_rest_world_r10" : "*rest_world"));
28033 alloc_rname = ggc_strdup (rname);
28034
28035 j = 0;
28036 RTVEC_ELT (p, j++) = ret_rtx;
28037 RTVEC_ELT (p, j++)
28038 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28039 /* The instruction pattern requires a clobber here;
28040 it is shared with the restVEC helper. */
28041 RTVEC_ELT (p, j++)
28042 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
28043
28044 {
28045 /* CR register traditionally saved as CR2. */
28046 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28047 RTVEC_ELT (p, j++)
28048 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28049 if (flag_shrink_wrap)
28050 {
28051 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28052 gen_rtx_REG (Pmode, LR_REGNO),
28053 cfa_restores);
28054 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28055 }
28056 }
28057
28058 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28059 {
28060 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28061 RTVEC_ELT (p, j++)
28062 = gen_frame_load (reg,
28063 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28064 if (flag_shrink_wrap
28065 && save_reg_p (info->first_gp_reg_save + i))
28066 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28067 }
28068 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28069 {
28070 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28071 RTVEC_ELT (p, j++)
28072 = gen_frame_load (reg,
28073 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28074 if (flag_shrink_wrap
28075 && save_reg_p (info->first_altivec_reg_save + i))
28076 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28077 }
28078 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28079 {
28080 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
28081 info->first_fp_reg_save + i);
28082 RTVEC_ELT (p, j++)
28083 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28084 if (flag_shrink_wrap
28085 && save_reg_p (info->first_fp_reg_save + i))
28086 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28087 }
28088 RTVEC_ELT (p, j++)
28089 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28090 RTVEC_ELT (p, j++)
28091 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28092 RTVEC_ELT (p, j++)
28093 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28094 RTVEC_ELT (p, j++)
28095 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28096 RTVEC_ELT (p, j++)
28097 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28098 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28099
28100 if (flag_shrink_wrap)
28101 {
28102 REG_NOTES (insn) = cfa_restores;
28103 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28104 RTX_FRAME_RELATED_P (insn) = 1;
28105 }
28106 return;
28107 }
28108
28109 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28110 if (info->push_p)
28111 frame_off = info->total_size;
28112
28113 /* Restore AltiVec registers if we must do so before adjusting the
28114 stack. */
28115 if (info->altivec_size != 0
28116 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28117 || (DEFAULT_ABI != ABI_V4
28118 && offset_below_red_zone_p (info->altivec_save_offset))))
28119 {
28120 int i;
28121 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28122
28123 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28124 if (use_backchain_to_restore_sp)
28125 {
28126 int frame_regno = 11;
28127
28128 if ((strategy & REST_INLINE_VRS) == 0)
28129 {
28130 /* Of r11 and r12, select the one not clobbered by an
28131 out-of-line restore function for the frame register. */
28132 frame_regno = 11 + 12 - scratch_regno;
28133 }
28134 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28135 emit_move_insn (frame_reg_rtx,
28136 gen_rtx_MEM (Pmode, sp_reg_rtx));
28137 frame_off = 0;
28138 }
28139 else if (frame_pointer_needed)
28140 frame_reg_rtx = hard_frame_pointer_rtx;
28141
28142 if ((strategy & REST_INLINE_VRS) == 0)
28143 {
28144 int end_save = info->altivec_save_offset + info->altivec_size;
28145 int ptr_off;
28146 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28147 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28148
28149 if (end_save + frame_off != 0)
28150 {
28151 rtx offset = GEN_INT (end_save + frame_off);
28152
28153 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28154 }
28155 else
28156 emit_move_insn (ptr_reg, frame_reg_rtx);
28157
28158 ptr_off = -end_save;
28159 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28160 info->altivec_save_offset + ptr_off,
28161 0, V4SImode, SAVRES_VR);
28162 }
28163 else
28164 {
28165 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28166 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28167 {
28168 rtx addr, areg, mem, insn;
28169 rtx reg = gen_rtx_REG (V4SImode, i);
28170 HOST_WIDE_INT offset
28171 = (info->altivec_save_offset + frame_off
28172 + 16 * (i - info->first_altivec_reg_save));
28173
28174 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28175 {
28176 mem = gen_frame_mem (V4SImode,
28177 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28178 GEN_INT (offset)));
28179 insn = gen_rtx_SET (reg, mem);
28180 }
28181 else
28182 {
28183 areg = gen_rtx_REG (Pmode, 0);
28184 emit_move_insn (areg, GEN_INT (offset));
28185
28186 /* AltiVec addressing mode is [reg+reg]. */
28187 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28188 mem = gen_frame_mem (V4SImode, addr);
28189
28190 /* Rather than emitting a generic move, force use of the
28191 lvx instruction, which we always want. In particular we
28192 don't want lxvd2x/xxpermdi for little endian. */
28193 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28194 }
28195
28196 (void) emit_insn (insn);
28197 }
28198 }
28199
28200 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28201 if (((strategy & REST_INLINE_VRS) == 0
28202 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28203 && (flag_shrink_wrap
28204 || (offset_below_red_zone_p
28205 (info->altivec_save_offset
28206 + 16 * (i - info->first_altivec_reg_save))))
28207 && save_reg_p (i))
28208 {
28209 rtx reg = gen_rtx_REG (V4SImode, i);
28210 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28211 }
28212 }
28213
28214 /* Restore VRSAVE if we must do so before adjusting the stack. */
28215 if (info->vrsave_size != 0
28216 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28217 || (DEFAULT_ABI != ABI_V4
28218 && offset_below_red_zone_p (info->vrsave_save_offset))))
28219 {
28220 rtx reg;
28221
28222 if (frame_reg_rtx == sp_reg_rtx)
28223 {
28224 if (use_backchain_to_restore_sp)
28225 {
28226 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28227 emit_move_insn (frame_reg_rtx,
28228 gen_rtx_MEM (Pmode, sp_reg_rtx));
28229 frame_off = 0;
28230 }
28231 else if (frame_pointer_needed)
28232 frame_reg_rtx = hard_frame_pointer_rtx;
28233 }
28234
28235 reg = gen_rtx_REG (SImode, 12);
28236 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28237 info->vrsave_save_offset + frame_off));
28238
28239 emit_insn (generate_set_vrsave (reg, info, 1));
28240 }
28241
28242 insn = NULL_RTX;
28243 /* If we have a large stack frame, restore the old stack pointer
28244 using the backchain. */
28245 if (use_backchain_to_restore_sp)
28246 {
28247 if (frame_reg_rtx == sp_reg_rtx)
28248 {
28249 /* Under V.4, don't reset the stack pointer until after we're done
28250 loading the saved registers. */
28251 if (DEFAULT_ABI == ABI_V4)
28252 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28253
28254 insn = emit_move_insn (frame_reg_rtx,
28255 gen_rtx_MEM (Pmode, sp_reg_rtx));
28256 frame_off = 0;
28257 }
28258 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28259 && DEFAULT_ABI == ABI_V4)
28260 /* frame_reg_rtx has been set up by the altivec restore. */
28261 ;
28262 else
28263 {
28264 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28265 frame_reg_rtx = sp_reg_rtx;
28266 }
28267 }
28268 /* If we have a frame pointer, we can restore the old stack pointer
28269 from it. */
28270 else if (frame_pointer_needed)
28271 {
28272 frame_reg_rtx = sp_reg_rtx;
28273 if (DEFAULT_ABI == ABI_V4)
28274 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28275 /* Prevent reordering memory accesses against stack pointer restore. */
28276 else if (cfun->calls_alloca
28277 || offset_below_red_zone_p (-info->total_size))
28278 rs6000_emit_stack_tie (frame_reg_rtx, true);
28279
28280 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28281 GEN_INT (info->total_size)));
28282 frame_off = 0;
28283 }
28284 else if (info->push_p
28285 && DEFAULT_ABI != ABI_V4
28286 && !crtl->calls_eh_return)
28287 {
28288 /* Prevent reordering memory accesses against stack pointer restore. */
28289 if (cfun->calls_alloca
28290 || offset_below_red_zone_p (-info->total_size))
28291 rs6000_emit_stack_tie (frame_reg_rtx, false);
28292 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28293 GEN_INT (info->total_size)));
28294 frame_off = 0;
28295 }
28296 if (insn && frame_reg_rtx == sp_reg_rtx)
28297 {
28298 if (cfa_restores)
28299 {
28300 REG_NOTES (insn) = cfa_restores;
28301 cfa_restores = NULL_RTX;
28302 }
28303 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28304 RTX_FRAME_RELATED_P (insn) = 1;
28305 }
28306
28307 /* Restore AltiVec registers if we have not done so already. */
28308 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28309 && info->altivec_size != 0
28310 && (DEFAULT_ABI == ABI_V4
28311 || !offset_below_red_zone_p (info->altivec_save_offset)))
28312 {
28313 int i;
28314
28315 if ((strategy & REST_INLINE_VRS) == 0)
28316 {
28317 int end_save = info->altivec_save_offset + info->altivec_size;
28318 int ptr_off;
28319 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28320 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28321 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28322
28323 if (end_save + frame_off != 0)
28324 {
28325 rtx offset = GEN_INT (end_save + frame_off);
28326
28327 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28328 }
28329 else
28330 emit_move_insn (ptr_reg, frame_reg_rtx);
28331
28332 ptr_off = -end_save;
28333 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28334 info->altivec_save_offset + ptr_off,
28335 0, V4SImode, SAVRES_VR);
28336 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28337 {
28338 /* Frame reg was clobbered by out-of-line save. Restore it
28339 from ptr_reg, and if we are calling out-of-line gpr or
28340 fpr restore set up the correct pointer and offset. */
28341 unsigned newptr_regno = 1;
28342 if (!restoring_GPRs_inline)
28343 {
28344 bool lr = info->gp_save_offset + info->gp_size == 0;
28345 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28346 newptr_regno = ptr_regno_for_savres (sel);
28347 end_save = info->gp_save_offset + info->gp_size;
28348 }
28349 else if (!restoring_FPRs_inline)
28350 {
28351 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28352 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28353 newptr_regno = ptr_regno_for_savres (sel);
28354 end_save = info->fp_save_offset + info->fp_size;
28355 }
28356
28357 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28358 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28359
28360 if (end_save + ptr_off != 0)
28361 {
28362 rtx offset = GEN_INT (end_save + ptr_off);
28363
28364 frame_off = -end_save;
28365 if (TARGET_32BIT)
28366 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28367 ptr_reg, offset));
28368 else
28369 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28370 ptr_reg, offset));
28371 }
28372 else
28373 {
28374 frame_off = ptr_off;
28375 emit_move_insn (frame_reg_rtx, ptr_reg);
28376 }
28377 }
28378 }
28379 else
28380 {
28381 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28382 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28383 {
28384 rtx addr, areg, mem, insn;
28385 rtx reg = gen_rtx_REG (V4SImode, i);
28386 HOST_WIDE_INT offset
28387 = (info->altivec_save_offset + frame_off
28388 + 16 * (i - info->first_altivec_reg_save));
28389
28390 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28391 {
28392 mem = gen_frame_mem (V4SImode,
28393 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28394 GEN_INT (offset)));
28395 insn = gen_rtx_SET (reg, mem);
28396 }
28397 else
28398 {
28399 areg = gen_rtx_REG (Pmode, 0);
28400 emit_move_insn (areg, GEN_INT (offset));
28401
28402 /* AltiVec addressing mode is [reg+reg]. */
28403 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28404 mem = gen_frame_mem (V4SImode, addr);
28405
28406 /* Rather than emitting a generic move, force use of the
28407 lvx instruction, which we always want. In particular we
28408 don't want lxvd2x/xxpermdi for little endian. */
28409 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28410 }
28411
28412 (void) emit_insn (insn);
28413 }
28414 }
28415
28416 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28417 if (((strategy & REST_INLINE_VRS) == 0
28418 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28419 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28420 && save_reg_p (i))
28421 {
28422 rtx reg = gen_rtx_REG (V4SImode, i);
28423 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28424 }
28425 }
28426
28427 /* Restore VRSAVE if we have not done so already. */
28428 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28429 && info->vrsave_size != 0
28430 && (DEFAULT_ABI == ABI_V4
28431 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28432 {
28433 rtx reg;
28434
28435 reg = gen_rtx_REG (SImode, 12);
28436 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28437 info->vrsave_save_offset + frame_off));
28438
28439 emit_insn (generate_set_vrsave (reg, info, 1));
28440 }
28441
28442 /* If we exit by an out-of-line restore function on ABI_V4 then that
28443 function will deallocate the stack, so we don't need to worry
28444 about the unwinder restoring cr from an invalid stack frame
28445 location. */
28446 exit_func = (!restoring_FPRs_inline
28447 || (!restoring_GPRs_inline
28448 && info->first_fp_reg_save == 64));
28449
28450 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28451 *separate* slots if the routine calls __builtin_eh_return, so
28452 that they can be independently restored by the unwinder. */
28453 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28454 {
28455 int i, cr_off = info->ehcr_offset;
28456
28457 for (i = 0; i < 8; i++)
28458 if (!call_used_regs[CR0_REGNO + i])
28459 {
28460 rtx reg = gen_rtx_REG (SImode, 0);
28461 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28462 cr_off + frame_off));
28463
28464 insn = emit_insn (gen_movsi_to_cr_one
28465 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28466
28467 if (!exit_func && flag_shrink_wrap)
28468 {
28469 add_reg_note (insn, REG_CFA_RESTORE,
28470 gen_rtx_REG (SImode, CR0_REGNO + i));
28471
28472 RTX_FRAME_RELATED_P (insn) = 1;
28473 }
28474
28475 cr_off += reg_size;
28476 }
28477 }
28478
28479 /* Get the old lr if we saved it. If we are restoring registers
28480 out-of-line, then the out-of-line routines can do this for us. */
28481 if (restore_lr && restoring_GPRs_inline)
28482 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28483
28484 /* Get the old cr if we saved it. */
28485 if (info->cr_save_p)
28486 {
28487 unsigned cr_save_regno = 12;
28488
28489 if (!restoring_GPRs_inline)
28490 {
28491 /* Ensure we don't use the register used by the out-of-line
28492 gpr register restore below. */
28493 bool lr = info->gp_save_offset + info->gp_size == 0;
28494 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28495 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28496
28497 if (gpr_ptr_regno == 12)
28498 cr_save_regno = 11;
28499 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28500 }
28501 else if (REGNO (frame_reg_rtx) == 12)
28502 cr_save_regno = 11;
28503
28504 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28505 info->cr_save_offset + frame_off,
28506 exit_func);
28507 }
28508
28509 /* Set LR here to try to overlap restores below. */
28510 if (restore_lr && restoring_GPRs_inline)
28511 restore_saved_lr (0, exit_func);
28512
28513 /* Load exception handler data registers, if needed. */
28514 if (crtl->calls_eh_return)
28515 {
28516 unsigned int i, regno;
28517
28518 if (TARGET_AIX)
28519 {
28520 rtx reg = gen_rtx_REG (reg_mode, 2);
28521 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28522 frame_off + RS6000_TOC_SAVE_SLOT));
28523 }
28524
28525 for (i = 0; ; ++i)
28526 {
28527 rtx mem;
28528
28529 regno = EH_RETURN_DATA_REGNO (i);
28530 if (regno == INVALID_REGNUM)
28531 break;
28532
28533 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28534 info->ehrd_offset + frame_off
28535 + reg_size * (int) i);
28536
28537 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28538 }
28539 }
28540
28541 /* Restore GPRs. This is done as a PARALLEL if we are using
28542 the load-multiple instructions. */
28543 if (!restoring_GPRs_inline)
28544 {
28545 /* We are jumping to an out-of-line function. */
28546 rtx ptr_reg;
28547 int end_save = info->gp_save_offset + info->gp_size;
28548 bool can_use_exit = end_save == 0;
28549 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28550 int ptr_off;
28551
28552 /* Emit stack reset code if we need it. */
28553 ptr_regno = ptr_regno_for_savres (sel);
28554 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28555 if (can_use_exit)
28556 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28557 else if (end_save + frame_off != 0)
28558 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28559 GEN_INT (end_save + frame_off)));
28560 else if (REGNO (frame_reg_rtx) != ptr_regno)
28561 emit_move_insn (ptr_reg, frame_reg_rtx);
28562 if (REGNO (frame_reg_rtx) == ptr_regno)
28563 frame_off = -end_save;
28564
28565 if (can_use_exit && info->cr_save_p)
28566 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28567
28568 ptr_off = -end_save;
28569 rs6000_emit_savres_rtx (info, ptr_reg,
28570 info->gp_save_offset + ptr_off,
28571 info->lr_save_offset + ptr_off,
28572 reg_mode, sel);
28573 }
28574 else if (using_load_multiple)
28575 {
28576 rtvec p;
28577 p = rtvec_alloc (32 - info->first_gp_reg_save);
28578 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28579 RTVEC_ELT (p, i)
28580 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28581 frame_reg_rtx,
28582 info->gp_save_offset + frame_off + reg_size * i);
28583 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28584 }
28585 else
28586 {
28587 int offset = info->gp_save_offset + frame_off;
28588 for (i = info->first_gp_reg_save; i < 32; i++)
28589 {
28590 if (save_reg_p (i)
28591 && !cfun->machine->gpr_is_wrapped_separately[i])
28592 {
28593 rtx reg = gen_rtx_REG (reg_mode, i);
28594 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28595 }
28596
28597 offset += reg_size;
28598 }
28599 }
28600
28601 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28602 {
28603 /* If the frame pointer was used then we can't delay emitting
28604 a REG_CFA_DEF_CFA note. This must happen on the insn that
28605 restores the frame pointer, r31. We may have already emitted
28606 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28607 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28608 be harmless if emitted. */
28609 if (frame_pointer_needed)
28610 {
28611 insn = get_last_insn ();
28612 add_reg_note (insn, REG_CFA_DEF_CFA,
28613 plus_constant (Pmode, frame_reg_rtx, frame_off));
28614 RTX_FRAME_RELATED_P (insn) = 1;
28615 }
28616
28617 /* Set up cfa_restores. We always need these when
28618 shrink-wrapping. If not shrink-wrapping then we only need
28619 the cfa_restore when the stack location is no longer valid.
28620 The cfa_restores must be emitted on or before the insn that
28621 invalidates the stack, and of course must not be emitted
28622 before the insn that actually does the restore. The latter
28623 is why it is a bad idea to emit the cfa_restores as a group
28624 on the last instruction here that actually does a restore:
28625 That insn may be reordered with respect to others doing
28626 restores. */
28627 if (flag_shrink_wrap
28628 && !restoring_GPRs_inline
28629 && info->first_fp_reg_save == 64)
28630 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28631
28632 for (i = info->first_gp_reg_save; i < 32; i++)
28633 if (save_reg_p (i)
28634 && !cfun->machine->gpr_is_wrapped_separately[i])
28635 {
28636 rtx reg = gen_rtx_REG (reg_mode, i);
28637 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28638 }
28639 }
28640
28641 if (!restoring_GPRs_inline
28642 && info->first_fp_reg_save == 64)
28643 {
28644 /* We are jumping to an out-of-line function. */
28645 if (cfa_restores)
28646 emit_cfa_restores (cfa_restores);
28647 return;
28648 }
28649
28650 if (restore_lr && !restoring_GPRs_inline)
28651 {
28652 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28653 restore_saved_lr (0, exit_func);
28654 }
28655
28656 /* Restore fpr's if we need to do it without calling a function. */
28657 if (restoring_FPRs_inline)
28658 {
28659 int offset = info->fp_save_offset + frame_off;
28660 for (i = info->first_fp_reg_save; i < 64; i++)
28661 {
28662 if (save_reg_p (i)
28663 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28664 {
28665 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28666 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28667 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28668 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28669 cfa_restores);
28670 }
28671
28672 offset += fp_reg_size;
28673 }
28674 }
28675
28676 /* If we saved cr, restore it here. Just those that were used. */
28677 if (info->cr_save_p)
28678 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28679
28680 /* If this is V.4, unwind the stack pointer after all of the loads
28681 have been done, or set up r11 if we are restoring fp out of line. */
28682 ptr_regno = 1;
28683 if (!restoring_FPRs_inline)
28684 {
28685 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28686 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28687 ptr_regno = ptr_regno_for_savres (sel);
28688 }
28689
28690 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28691 if (REGNO (frame_reg_rtx) == ptr_regno)
28692 frame_off = 0;
28693
28694 if (insn && restoring_FPRs_inline)
28695 {
28696 if (cfa_restores)
28697 {
28698 REG_NOTES (insn) = cfa_restores;
28699 cfa_restores = NULL_RTX;
28700 }
28701 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28702 RTX_FRAME_RELATED_P (insn) = 1;
28703 }
28704
28705 if (crtl->calls_eh_return)
28706 {
28707 rtx sa = EH_RETURN_STACKADJ_RTX;
28708 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28709 }
28710
28711 if (!sibcall && restoring_FPRs_inline)
28712 {
28713 if (cfa_restores)
28714 {
28715 /* We can't hang the cfa_restores off a simple return,
28716 since the shrink-wrap code sometimes uses an existing
28717 return. This means there might be a path from
28718 pre-prologue code to this return, and dwarf2cfi code
28719 wants the eh_frame unwinder state to be the same on
28720 all paths to any point. So we need to emit the
28721 cfa_restores before the return. For -m64 we really
28722 don't need epilogue cfa_restores at all, except for
28723 this irritating dwarf2cfi with shrink-wrap
28724 requirement; The stack red-zone means eh_frame info
28725 from the prologue telling the unwinder to restore
28726 from the stack is perfectly good right to the end of
28727 the function. */
28728 emit_insn (gen_blockage ());
28729 emit_cfa_restores (cfa_restores);
28730 cfa_restores = NULL_RTX;
28731 }
28732
28733 emit_jump_insn (targetm.gen_simple_return ());
28734 }
28735
28736 if (!sibcall && !restoring_FPRs_inline)
28737 {
28738 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28739 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28740 int elt = 0;
28741 RTVEC_ELT (p, elt++) = ret_rtx;
28742 if (lr)
28743 RTVEC_ELT (p, elt++)
28744 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28745
28746 /* We have to restore more than two FP registers, so branch to the
28747 restore function. It will return to our caller. */
28748 int i;
28749 int reg;
28750 rtx sym;
28751
28752 if (flag_shrink_wrap)
28753 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28754
28755 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28756 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28757 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28758 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28759
28760 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28761 {
28762 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28763
28764 RTVEC_ELT (p, elt++)
28765 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28766 if (flag_shrink_wrap
28767 && save_reg_p (info->first_fp_reg_save + i))
28768 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28769 }
28770
28771 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28772 }
28773
28774 if (cfa_restores)
28775 {
28776 if (sibcall)
28777 /* Ensure the cfa_restores are hung off an insn that won't
28778 be reordered above other restores. */
28779 emit_insn (gen_blockage ());
28780
28781 emit_cfa_restores (cfa_restores);
28782 }
28783 }
28784
28785 /* Write function epilogue. */
28786
28787 static void
28788 rs6000_output_function_epilogue (FILE *file)
28789 {
28790 #if TARGET_MACHO
28791 macho_branch_islands ();
28792
28793 {
28794 rtx_insn *insn = get_last_insn ();
28795 rtx_insn *deleted_debug_label = NULL;
28796
28797 /* Mach-O doesn't support labels at the end of objects, so if
28798 it looks like we might want one, take special action.
28799
28800 First, collect any sequence of deleted debug labels. */
28801 while (insn
28802 && NOTE_P (insn)
28803 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28804 {
28805 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28806 notes only, instead set their CODE_LABEL_NUMBER to -1,
28807 otherwise there would be code generation differences
28808 in between -g and -g0. */
28809 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28810 deleted_debug_label = insn;
28811 insn = PREV_INSN (insn);
28812 }
28813
28814 /* Second, if we have:
28815 label:
28816 barrier
28817 then this needs to be detected, so skip past the barrier. */
28818
28819 if (insn && BARRIER_P (insn))
28820 insn = PREV_INSN (insn);
28821
28822 /* Up to now we've only seen notes or barriers. */
28823 if (insn)
28824 {
28825 if (LABEL_P (insn)
28826 || (NOTE_P (insn)
28827 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28828 /* Trailing label: <barrier>. */
28829 fputs ("\tnop\n", file);
28830 else
28831 {
28832 /* Lastly, see if we have a completely empty function body. */
28833 while (insn && ! INSN_P (insn))
28834 insn = PREV_INSN (insn);
28835 /* If we don't find any insns, we've got an empty function body;
28836 I.e. completely empty - without a return or branch. This is
28837 taken as the case where a function body has been removed
28838 because it contains an inline __builtin_unreachable(). GCC
28839 states that reaching __builtin_unreachable() means UB so we're
28840 not obliged to do anything special; however, we want
28841 non-zero-sized function bodies. To meet this, and help the
28842 user out, let's trap the case. */
28843 if (insn == NULL)
28844 fputs ("\ttrap\n", file);
28845 }
28846 }
28847 else if (deleted_debug_label)
28848 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28849 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28850 CODE_LABEL_NUMBER (insn) = -1;
28851 }
28852 #endif
28853
28854 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28855 on its format.
28856
28857 We don't output a traceback table if -finhibit-size-directive was
28858 used. The documentation for -finhibit-size-directive reads
28859 ``don't output a @code{.size} assembler directive, or anything
28860 else that would cause trouble if the function is split in the
28861 middle, and the two halves are placed at locations far apart in
28862 memory.'' The traceback table has this property, since it
28863 includes the offset from the start of the function to the
28864 traceback table itself.
28865
28866 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28867 different traceback table. */
28868 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28869 && ! flag_inhibit_size_directive
28870 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28871 {
28872 const char *fname = NULL;
28873 const char *language_string = lang_hooks.name;
28874 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28875 int i;
28876 int optional_tbtab;
28877 rs6000_stack_t *info = rs6000_stack_info ();
28878
28879 if (rs6000_traceback == traceback_full)
28880 optional_tbtab = 1;
28881 else if (rs6000_traceback == traceback_part)
28882 optional_tbtab = 0;
28883 else
28884 optional_tbtab = !optimize_size && !TARGET_ELF;
28885
28886 if (optional_tbtab)
28887 {
28888 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28889 while (*fname == '.') /* V.4 encodes . in the name */
28890 fname++;
28891
28892 /* Need label immediately before tbtab, so we can compute
28893 its offset from the function start. */
28894 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28895 ASM_OUTPUT_LABEL (file, fname);
28896 }
28897
28898 /* The .tbtab pseudo-op can only be used for the first eight
28899 expressions, since it can't handle the possibly variable
28900 length fields that follow. However, if you omit the optional
28901 fields, the assembler outputs zeros for all optional fields
28902 anyways, giving each variable length field is minimum length
28903 (as defined in sys/debug.h). Thus we can not use the .tbtab
28904 pseudo-op at all. */
28905
28906 /* An all-zero word flags the start of the tbtab, for debuggers
28907 that have to find it by searching forward from the entry
28908 point or from the current pc. */
28909 fputs ("\t.long 0\n", file);
28910
28911 /* Tbtab format type. Use format type 0. */
28912 fputs ("\t.byte 0,", file);
28913
28914 /* Language type. Unfortunately, there does not seem to be any
28915 official way to discover the language being compiled, so we
28916 use language_string.
28917 C is 0. Fortran is 1. Ada is 3. C++ is 9.
28918 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28919 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
28920 numbers either, so for now use 0. */
28921 if (lang_GNU_C ()
28922 || ! strcmp (language_string, "GNU GIMPLE")
28923 || ! strcmp (language_string, "GNU Go")
28924 || ! strcmp (language_string, "GNU D")
28925 || ! strcmp (language_string, "libgccjit"))
28926 i = 0;
28927 else if (! strcmp (language_string, "GNU F77")
28928 || lang_GNU_Fortran ())
28929 i = 1;
28930 else if (! strcmp (language_string, "GNU Ada"))
28931 i = 3;
28932 else if (lang_GNU_CXX ()
28933 || ! strcmp (language_string, "GNU Objective-C++"))
28934 i = 9;
28935 else if (! strcmp (language_string, "GNU Java"))
28936 i = 13;
28937 else if (! strcmp (language_string, "GNU Objective-C"))
28938 i = 14;
28939 else
28940 gcc_unreachable ();
28941 fprintf (file, "%d,", i);
28942
28943 /* 8 single bit fields: global linkage (not set for C extern linkage,
28944 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28945 from start of procedure stored in tbtab, internal function, function
28946 has controlled storage, function has no toc, function uses fp,
28947 function logs/aborts fp operations. */
28948 /* Assume that fp operations are used if any fp reg must be saved. */
28949 fprintf (file, "%d,",
28950 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28951
28952 /* 6 bitfields: function is interrupt handler, name present in
28953 proc table, function calls alloca, on condition directives
28954 (controls stack walks, 3 bits), saves condition reg, saves
28955 link reg. */
28956 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28957 set up as a frame pointer, even when there is no alloca call. */
28958 fprintf (file, "%d,",
28959 ((optional_tbtab << 6)
28960 | ((optional_tbtab & frame_pointer_needed) << 5)
28961 | (info->cr_save_p << 1)
28962 | (info->lr_save_p)));
28963
28964 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28965 (6 bits). */
28966 fprintf (file, "%d,",
28967 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28968
28969 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28970 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28971
28972 if (optional_tbtab)
28973 {
28974 /* Compute the parameter info from the function decl argument
28975 list. */
28976 tree decl;
28977 int next_parm_info_bit = 31;
28978
28979 for (decl = DECL_ARGUMENTS (current_function_decl);
28980 decl; decl = DECL_CHAIN (decl))
28981 {
28982 rtx parameter = DECL_INCOMING_RTL (decl);
28983 machine_mode mode = GET_MODE (parameter);
28984
28985 if (GET_CODE (parameter) == REG)
28986 {
28987 if (SCALAR_FLOAT_MODE_P (mode))
28988 {
28989 int bits;
28990
28991 float_parms++;
28992
28993 switch (mode)
28994 {
28995 case E_SFmode:
28996 case E_SDmode:
28997 bits = 0x2;
28998 break;
28999
29000 case E_DFmode:
29001 case E_DDmode:
29002 case E_TFmode:
29003 case E_TDmode:
29004 case E_IFmode:
29005 case E_KFmode:
29006 bits = 0x3;
29007 break;
29008
29009 default:
29010 gcc_unreachable ();
29011 }
29012
29013 /* If only one bit will fit, don't or in this entry. */
29014 if (next_parm_info_bit > 0)
29015 parm_info |= (bits << (next_parm_info_bit - 1));
29016 next_parm_info_bit -= 2;
29017 }
29018 else
29019 {
29020 fixed_parms += ((GET_MODE_SIZE (mode)
29021 + (UNITS_PER_WORD - 1))
29022 / UNITS_PER_WORD);
29023 next_parm_info_bit -= 1;
29024 }
29025 }
29026 }
29027 }
29028
29029 /* Number of fixed point parameters. */
29030 /* This is actually the number of words of fixed point parameters; thus
29031 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29032 fprintf (file, "%d,", fixed_parms);
29033
29034 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29035 all on stack. */
29036 /* This is actually the number of fp registers that hold parameters;
29037 and thus the maximum value is 13. */
29038 /* Set parameters on stack bit if parameters are not in their original
29039 registers, regardless of whether they are on the stack? Xlc
29040 seems to set the bit when not optimizing. */
29041 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29042
29043 if (optional_tbtab)
29044 {
29045 /* Optional fields follow. Some are variable length. */
29046
29047 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29048 float, 11 double float. */
29049 /* There is an entry for each parameter in a register, in the order
29050 that they occur in the parameter list. Any intervening arguments
29051 on the stack are ignored. If the list overflows a long (max
29052 possible length 34 bits) then completely leave off all elements
29053 that don't fit. */
29054 /* Only emit this long if there was at least one parameter. */
29055 if (fixed_parms || float_parms)
29056 fprintf (file, "\t.long %d\n", parm_info);
29057
29058 /* Offset from start of code to tb table. */
29059 fputs ("\t.long ", file);
29060 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29061 RS6000_OUTPUT_BASENAME (file, fname);
29062 putc ('-', file);
29063 rs6000_output_function_entry (file, fname);
29064 putc ('\n', file);
29065
29066 /* Interrupt handler mask. */
29067 /* Omit this long, since we never set the interrupt handler bit
29068 above. */
29069
29070 /* Number of CTL (controlled storage) anchors. */
29071 /* Omit this long, since the has_ctl bit is never set above. */
29072
29073 /* Displacement into stack of each CTL anchor. */
29074 /* Omit this list of longs, because there are no CTL anchors. */
29075
29076 /* Length of function name. */
29077 if (*fname == '*')
29078 ++fname;
29079 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29080
29081 /* Function name. */
29082 assemble_string (fname, strlen (fname));
29083
29084 /* Register for alloca automatic storage; this is always reg 31.
29085 Only emit this if the alloca bit was set above. */
29086 if (frame_pointer_needed)
29087 fputs ("\t.byte 31\n", file);
29088
29089 fputs ("\t.align 2\n", file);
29090 }
29091 }
29092
29093 /* Arrange to define .LCTOC1 label, if not already done. */
29094 if (need_toc_init)
29095 {
29096 need_toc_init = 0;
29097 if (!toc_initialized)
29098 {
29099 switch_to_section (toc_section);
29100 switch_to_section (current_function_section ());
29101 }
29102 }
29103 }
29104
29105 /* -fsplit-stack support. */
29106
29107 /* A SYMBOL_REF for __morestack. */
29108 static GTY(()) rtx morestack_ref;
29109
29110 static rtx
29111 gen_add3_const (rtx rt, rtx ra, long c)
29112 {
29113 if (TARGET_64BIT)
29114 return gen_adddi3 (rt, ra, GEN_INT (c));
29115 else
29116 return gen_addsi3 (rt, ra, GEN_INT (c));
29117 }
29118
29119 /* Emit -fsplit-stack prologue, which goes before the regular function
29120 prologue (at local entry point in the case of ELFv2). */
29121
29122 void
29123 rs6000_expand_split_stack_prologue (void)
29124 {
29125 rs6000_stack_t *info = rs6000_stack_info ();
29126 unsigned HOST_WIDE_INT allocate;
29127 long alloc_hi, alloc_lo;
29128 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29129 rtx_insn *insn;
29130
29131 gcc_assert (flag_split_stack && reload_completed);
29132
29133 if (!info->push_p)
29134 return;
29135
29136 if (global_regs[29])
29137 {
29138 error ("%qs uses register r29", "-fsplit-stack");
29139 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29140 "conflicts with %qD", global_regs_decl[29]);
29141 }
29142
29143 allocate = info->total_size;
29144 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29145 {
29146 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29147 return;
29148 }
29149 if (morestack_ref == NULL_RTX)
29150 {
29151 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29152 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29153 | SYMBOL_FLAG_FUNCTION);
29154 }
29155
29156 r0 = gen_rtx_REG (Pmode, 0);
29157 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29158 r12 = gen_rtx_REG (Pmode, 12);
29159 emit_insn (gen_load_split_stack_limit (r0));
29160 /* Always emit two insns here to calculate the requested stack,
29161 so that the linker can edit them when adjusting size for calling
29162 non-split-stack code. */
29163 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29164 alloc_lo = -allocate - alloc_hi;
29165 if (alloc_hi != 0)
29166 {
29167 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29168 if (alloc_lo != 0)
29169 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29170 else
29171 emit_insn (gen_nop ());
29172 }
29173 else
29174 {
29175 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29176 emit_insn (gen_nop ());
29177 }
29178
29179 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29180 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29181 ok_label = gen_label_rtx ();
29182 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29183 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29184 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29185 pc_rtx);
29186 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29187 JUMP_LABEL (insn) = ok_label;
29188 /* Mark the jump as very likely to be taken. */
29189 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29190
29191 lr = gen_rtx_REG (Pmode, LR_REGNO);
29192 insn = emit_move_insn (r0, lr);
29193 RTX_FRAME_RELATED_P (insn) = 1;
29194 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29195 RTX_FRAME_RELATED_P (insn) = 1;
29196
29197 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29198 const0_rtx, const0_rtx));
29199 call_fusage = NULL_RTX;
29200 use_reg (&call_fusage, r12);
29201 /* Say the call uses r0, even though it doesn't, to stop regrename
29202 from twiddling with the insns saving lr, trashing args for cfun.
29203 The insns restoring lr are similarly protected by making
29204 split_stack_return use r0. */
29205 use_reg (&call_fusage, r0);
29206 add_function_usage_to (insn, call_fusage);
29207 /* Indicate that this function can't jump to non-local gotos. */
29208 make_reg_eh_region_note_nothrow_nononlocal (insn);
29209 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29210 insn = emit_move_insn (lr, r0);
29211 add_reg_note (insn, REG_CFA_RESTORE, lr);
29212 RTX_FRAME_RELATED_P (insn) = 1;
29213 emit_insn (gen_split_stack_return ());
29214
29215 emit_label (ok_label);
29216 LABEL_NUSES (ok_label) = 1;
29217 }
29218
29219 /* Return the internal arg pointer used for function incoming
29220 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29221 to copy it to a pseudo in order for it to be preserved over calls
29222 and suchlike. We'd really like to use a pseudo here for the
29223 internal arg pointer but data-flow analysis is not prepared to
29224 accept pseudos as live at the beginning of a function. */
29225
29226 static rtx
29227 rs6000_internal_arg_pointer (void)
29228 {
29229 if (flag_split_stack
29230 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29231 == NULL))
29232
29233 {
29234 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29235 {
29236 rtx pat;
29237
29238 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29239 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29240
29241 /* Put the pseudo initialization right after the note at the
29242 beginning of the function. */
29243 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29244 gen_rtx_REG (Pmode, 12));
29245 push_topmost_sequence ();
29246 emit_insn_after (pat, get_insns ());
29247 pop_topmost_sequence ();
29248 }
29249 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29250 FIRST_PARM_OFFSET (current_function_decl));
29251 return copy_to_reg (ret);
29252 }
29253 return virtual_incoming_args_rtx;
29254 }
29255
29256 /* We may have to tell the dataflow pass that the split stack prologue
29257 is initializing a register. */
29258
29259 static void
29260 rs6000_live_on_entry (bitmap regs)
29261 {
29262 if (flag_split_stack)
29263 bitmap_set_bit (regs, 12);
29264 }
29265
29266 /* Emit -fsplit-stack dynamic stack allocation space check. */
29267
29268 void
29269 rs6000_split_stack_space_check (rtx size, rtx label)
29270 {
29271 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29272 rtx limit = gen_reg_rtx (Pmode);
29273 rtx requested = gen_reg_rtx (Pmode);
29274 rtx cmp = gen_reg_rtx (CCUNSmode);
29275 rtx jump;
29276
29277 emit_insn (gen_load_split_stack_limit (limit));
29278 if (CONST_INT_P (size))
29279 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29280 else
29281 {
29282 size = force_reg (Pmode, size);
29283 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29284 }
29285 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29286 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29287 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29288 gen_rtx_LABEL_REF (VOIDmode, label),
29289 pc_rtx);
29290 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29291 JUMP_LABEL (jump) = label;
29292 }
29293 \f
29294 /* A C compound statement that outputs the assembler code for a thunk
29295 function, used to implement C++ virtual function calls with
29296 multiple inheritance. The thunk acts as a wrapper around a virtual
29297 function, adjusting the implicit object parameter before handing
29298 control off to the real function.
29299
29300 First, emit code to add the integer DELTA to the location that
29301 contains the incoming first argument. Assume that this argument
29302 contains a pointer, and is the one used to pass the `this' pointer
29303 in C++. This is the incoming argument *before* the function
29304 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29305 values of all other incoming arguments.
29306
29307 After the addition, emit code to jump to FUNCTION, which is a
29308 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29309 not touch the return address. Hence returning from FUNCTION will
29310 return to whoever called the current `thunk'.
29311
29312 The effect must be as if FUNCTION had been called directly with the
29313 adjusted first argument. This macro is responsible for emitting
29314 all of the code for a thunk function; output_function_prologue()
29315 and output_function_epilogue() are not invoked.
29316
29317 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29318 been extracted from it.) It might possibly be useful on some
29319 targets, but probably not.
29320
29321 If you do not define this macro, the target-independent code in the
29322 C++ frontend will generate a less efficient heavyweight thunk that
29323 calls FUNCTION instead of jumping to it. The generic approach does
29324 not support varargs. */
29325
29326 static void
29327 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29328 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29329 tree function)
29330 {
29331 rtx this_rtx, funexp;
29332 rtx_insn *insn;
29333
29334 reload_completed = 1;
29335 epilogue_completed = 1;
29336
29337 /* Mark the end of the (empty) prologue. */
29338 emit_note (NOTE_INSN_PROLOGUE_END);
29339
29340 /* Find the "this" pointer. If the function returns a structure,
29341 the structure return pointer is in r3. */
29342 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29343 this_rtx = gen_rtx_REG (Pmode, 4);
29344 else
29345 this_rtx = gen_rtx_REG (Pmode, 3);
29346
29347 /* Apply the constant offset, if required. */
29348 if (delta)
29349 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29350
29351 /* Apply the offset from the vtable, if required. */
29352 if (vcall_offset)
29353 {
29354 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29355 rtx tmp = gen_rtx_REG (Pmode, 12);
29356
29357 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29358 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29359 {
29360 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29361 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29362 }
29363 else
29364 {
29365 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29366
29367 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29368 }
29369 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29370 }
29371
29372 /* Generate a tail call to the target function. */
29373 if (!TREE_USED (function))
29374 {
29375 assemble_external (function);
29376 TREE_USED (function) = 1;
29377 }
29378 funexp = XEXP (DECL_RTL (function), 0);
29379 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29380
29381 #if TARGET_MACHO
29382 if (MACHOPIC_INDIRECT)
29383 funexp = machopic_indirect_call_target (funexp);
29384 #endif
29385
29386 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29387 generate sibcall RTL explicitly. */
29388 insn = emit_call_insn (
29389 gen_rtx_PARALLEL (VOIDmode,
29390 gen_rtvec (3,
29391 gen_rtx_CALL (VOIDmode,
29392 funexp, const0_rtx),
29393 gen_rtx_USE (VOIDmode, const0_rtx),
29394 simple_return_rtx)));
29395 SIBLING_CALL_P (insn) = 1;
29396 emit_barrier ();
29397
29398 /* Run just enough of rest_of_compilation to get the insns emitted.
29399 There's not really enough bulk here to make other passes such as
29400 instruction scheduling worth while. Note that use_thunk calls
29401 assemble_start_function and assemble_end_function. */
29402 insn = get_insns ();
29403 shorten_branches (insn);
29404 final_start_function (insn, file, 1);
29405 final (insn, file, 1);
29406 final_end_function ();
29407
29408 reload_completed = 0;
29409 epilogue_completed = 0;
29410 }
29411 \f
29412 /* A quick summary of the various types of 'constant-pool tables'
29413 under PowerPC:
29414
29415 Target Flags Name One table per
29416 AIX (none) AIX TOC object file
29417 AIX -mfull-toc AIX TOC object file
29418 AIX -mminimal-toc AIX minimal TOC translation unit
29419 SVR4/EABI (none) SVR4 SDATA object file
29420 SVR4/EABI -fpic SVR4 pic object file
29421 SVR4/EABI -fPIC SVR4 PIC translation unit
29422 SVR4/EABI -mrelocatable EABI TOC function
29423 SVR4/EABI -maix AIX TOC object file
29424 SVR4/EABI -maix -mminimal-toc
29425 AIX minimal TOC translation unit
29426
29427 Name Reg. Set by entries contains:
29428 made by addrs? fp? sum?
29429
29430 AIX TOC 2 crt0 as Y option option
29431 AIX minimal TOC 30 prolog gcc Y Y option
29432 SVR4 SDATA 13 crt0 gcc N Y N
29433 SVR4 pic 30 prolog ld Y not yet N
29434 SVR4 PIC 30 prolog gcc Y option option
29435 EABI TOC 30 prolog gcc Y option option
29436
29437 */
29438
29439 /* Hash functions for the hash table. */
29440
29441 static unsigned
29442 rs6000_hash_constant (rtx k)
29443 {
29444 enum rtx_code code = GET_CODE (k);
29445 machine_mode mode = GET_MODE (k);
29446 unsigned result = (code << 3) ^ mode;
29447 const char *format;
29448 int flen, fidx;
29449
29450 format = GET_RTX_FORMAT (code);
29451 flen = strlen (format);
29452 fidx = 0;
29453
29454 switch (code)
29455 {
29456 case LABEL_REF:
29457 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29458
29459 case CONST_WIDE_INT:
29460 {
29461 int i;
29462 flen = CONST_WIDE_INT_NUNITS (k);
29463 for (i = 0; i < flen; i++)
29464 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29465 return result;
29466 }
29467
29468 case CONST_DOUBLE:
29469 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29470
29471 case CODE_LABEL:
29472 fidx = 3;
29473 break;
29474
29475 default:
29476 break;
29477 }
29478
29479 for (; fidx < flen; fidx++)
29480 switch (format[fidx])
29481 {
29482 case 's':
29483 {
29484 unsigned i, len;
29485 const char *str = XSTR (k, fidx);
29486 len = strlen (str);
29487 result = result * 613 + len;
29488 for (i = 0; i < len; i++)
29489 result = result * 613 + (unsigned) str[i];
29490 break;
29491 }
29492 case 'u':
29493 case 'e':
29494 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29495 break;
29496 case 'i':
29497 case 'n':
29498 result = result * 613 + (unsigned) XINT (k, fidx);
29499 break;
29500 case 'w':
29501 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29502 result = result * 613 + (unsigned) XWINT (k, fidx);
29503 else
29504 {
29505 size_t i;
29506 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29507 result = result * 613 + (unsigned) (XWINT (k, fidx)
29508 >> CHAR_BIT * i);
29509 }
29510 break;
29511 case '0':
29512 break;
29513 default:
29514 gcc_unreachable ();
29515 }
29516
29517 return result;
29518 }
29519
29520 hashval_t
29521 toc_hasher::hash (toc_hash_struct *thc)
29522 {
29523 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29524 }
29525
29526 /* Compare H1 and H2 for equivalence. */
29527
29528 bool
29529 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29530 {
29531 rtx r1 = h1->key;
29532 rtx r2 = h2->key;
29533
29534 if (h1->key_mode != h2->key_mode)
29535 return 0;
29536
29537 return rtx_equal_p (r1, r2);
29538 }
29539
29540 /* These are the names given by the C++ front-end to vtables, and
29541 vtable-like objects. Ideally, this logic should not be here;
29542 instead, there should be some programmatic way of inquiring as
29543 to whether or not an object is a vtable. */
29544
29545 #define VTABLE_NAME_P(NAME) \
29546 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29547 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29548 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29549 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29550 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29551
29552 #ifdef NO_DOLLAR_IN_LABEL
29553 /* Return a GGC-allocated character string translating dollar signs in
29554 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29555
29556 const char *
29557 rs6000_xcoff_strip_dollar (const char *name)
29558 {
29559 char *strip, *p;
29560 const char *q;
29561 size_t len;
29562
29563 q = (const char *) strchr (name, '$');
29564
29565 if (q == 0 || q == name)
29566 return name;
29567
29568 len = strlen (name);
29569 strip = XALLOCAVEC (char, len + 1);
29570 strcpy (strip, name);
29571 p = strip + (q - name);
29572 while (p)
29573 {
29574 *p = '_';
29575 p = strchr (p + 1, '$');
29576 }
29577
29578 return ggc_alloc_string (strip, len);
29579 }
29580 #endif
29581
29582 void
29583 rs6000_output_symbol_ref (FILE *file, rtx x)
29584 {
29585 const char *name = XSTR (x, 0);
29586
29587 /* Currently C++ toc references to vtables can be emitted before it
29588 is decided whether the vtable is public or private. If this is
29589 the case, then the linker will eventually complain that there is
29590 a reference to an unknown section. Thus, for vtables only,
29591 we emit the TOC reference to reference the identifier and not the
29592 symbol. */
29593 if (VTABLE_NAME_P (name))
29594 {
29595 RS6000_OUTPUT_BASENAME (file, name);
29596 }
29597 else
29598 assemble_name (file, name);
29599 }
29600
29601 /* Output a TOC entry. We derive the entry name from what is being
29602 written. */
29603
29604 void
29605 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29606 {
29607 char buf[256];
29608 const char *name = buf;
29609 rtx base = x;
29610 HOST_WIDE_INT offset = 0;
29611
29612 gcc_assert (!TARGET_NO_TOC);
29613
29614 /* When the linker won't eliminate them, don't output duplicate
29615 TOC entries (this happens on AIX if there is any kind of TOC,
29616 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29617 CODE_LABELs. */
29618 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29619 {
29620 struct toc_hash_struct *h;
29621
29622 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29623 time because GGC is not initialized at that point. */
29624 if (toc_hash_table == NULL)
29625 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29626
29627 h = ggc_alloc<toc_hash_struct> ();
29628 h->key = x;
29629 h->key_mode = mode;
29630 h->labelno = labelno;
29631
29632 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29633 if (*found == NULL)
29634 *found = h;
29635 else /* This is indeed a duplicate.
29636 Set this label equal to that label. */
29637 {
29638 fputs ("\t.set ", file);
29639 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29640 fprintf (file, "%d,", labelno);
29641 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29642 fprintf (file, "%d\n", ((*found)->labelno));
29643
29644 #ifdef HAVE_AS_TLS
29645 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29646 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29647 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29648 {
29649 fputs ("\t.set ", file);
29650 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29651 fprintf (file, "%d,", labelno);
29652 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29653 fprintf (file, "%d\n", ((*found)->labelno));
29654 }
29655 #endif
29656 return;
29657 }
29658 }
29659
29660 /* If we're going to put a double constant in the TOC, make sure it's
29661 aligned properly when strict alignment is on. */
29662 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29663 && STRICT_ALIGNMENT
29664 && GET_MODE_BITSIZE (mode) >= 64
29665 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29666 ASM_OUTPUT_ALIGN (file, 3);
29667 }
29668
29669 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29670
29671 /* Handle FP constants specially. Note that if we have a minimal
29672 TOC, things we put here aren't actually in the TOC, so we can allow
29673 FP constants. */
29674 if (CONST_DOUBLE_P (x)
29675 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29676 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29677 {
29678 long k[4];
29679
29680 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29681 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29682 else
29683 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29684
29685 if (TARGET_64BIT)
29686 {
29687 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29688 fputs (DOUBLE_INT_ASM_OP, file);
29689 else
29690 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29691 k[0] & 0xffffffff, k[1] & 0xffffffff,
29692 k[2] & 0xffffffff, k[3] & 0xffffffff);
29693 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29694 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29695 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29696 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29697 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29698 return;
29699 }
29700 else
29701 {
29702 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29703 fputs ("\t.long ", file);
29704 else
29705 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29706 k[0] & 0xffffffff, k[1] & 0xffffffff,
29707 k[2] & 0xffffffff, k[3] & 0xffffffff);
29708 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29709 k[0] & 0xffffffff, k[1] & 0xffffffff,
29710 k[2] & 0xffffffff, k[3] & 0xffffffff);
29711 return;
29712 }
29713 }
29714 else if (CONST_DOUBLE_P (x)
29715 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29716 {
29717 long k[2];
29718
29719 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29720 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29721 else
29722 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29723
29724 if (TARGET_64BIT)
29725 {
29726 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29727 fputs (DOUBLE_INT_ASM_OP, file);
29728 else
29729 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29730 k[0] & 0xffffffff, k[1] & 0xffffffff);
29731 fprintf (file, "0x%lx%08lx\n",
29732 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29733 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29734 return;
29735 }
29736 else
29737 {
29738 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29739 fputs ("\t.long ", file);
29740 else
29741 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29742 k[0] & 0xffffffff, k[1] & 0xffffffff);
29743 fprintf (file, "0x%lx,0x%lx\n",
29744 k[0] & 0xffffffff, k[1] & 0xffffffff);
29745 return;
29746 }
29747 }
29748 else if (CONST_DOUBLE_P (x)
29749 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29750 {
29751 long l;
29752
29753 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29754 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29755 else
29756 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29757
29758 if (TARGET_64BIT)
29759 {
29760 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29761 fputs (DOUBLE_INT_ASM_OP, file);
29762 else
29763 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29764 if (WORDS_BIG_ENDIAN)
29765 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29766 else
29767 fprintf (file, "0x%lx\n", l & 0xffffffff);
29768 return;
29769 }
29770 else
29771 {
29772 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29773 fputs ("\t.long ", file);
29774 else
29775 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29776 fprintf (file, "0x%lx\n", l & 0xffffffff);
29777 return;
29778 }
29779 }
29780 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29781 {
29782 unsigned HOST_WIDE_INT low;
29783 HOST_WIDE_INT high;
29784
29785 low = INTVAL (x) & 0xffffffff;
29786 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29787
29788 /* TOC entries are always Pmode-sized, so when big-endian
29789 smaller integer constants in the TOC need to be padded.
29790 (This is still a win over putting the constants in
29791 a separate constant pool, because then we'd have
29792 to have both a TOC entry _and_ the actual constant.)
29793
29794 For a 32-bit target, CONST_INT values are loaded and shifted
29795 entirely within `low' and can be stored in one TOC entry. */
29796
29797 /* It would be easy to make this work, but it doesn't now. */
29798 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29799
29800 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29801 {
29802 low |= high << 32;
29803 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29804 high = (HOST_WIDE_INT) low >> 32;
29805 low &= 0xffffffff;
29806 }
29807
29808 if (TARGET_64BIT)
29809 {
29810 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29811 fputs (DOUBLE_INT_ASM_OP, file);
29812 else
29813 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29814 (long) high & 0xffffffff, (long) low & 0xffffffff);
29815 fprintf (file, "0x%lx%08lx\n",
29816 (long) high & 0xffffffff, (long) low & 0xffffffff);
29817 return;
29818 }
29819 else
29820 {
29821 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29822 {
29823 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29824 fputs ("\t.long ", file);
29825 else
29826 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29827 (long) high & 0xffffffff, (long) low & 0xffffffff);
29828 fprintf (file, "0x%lx,0x%lx\n",
29829 (long) high & 0xffffffff, (long) low & 0xffffffff);
29830 }
29831 else
29832 {
29833 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29834 fputs ("\t.long ", file);
29835 else
29836 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29837 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29838 }
29839 return;
29840 }
29841 }
29842
29843 if (GET_CODE (x) == CONST)
29844 {
29845 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29846 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29847
29848 base = XEXP (XEXP (x, 0), 0);
29849 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29850 }
29851
29852 switch (GET_CODE (base))
29853 {
29854 case SYMBOL_REF:
29855 name = XSTR (base, 0);
29856 break;
29857
29858 case LABEL_REF:
29859 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29860 CODE_LABEL_NUMBER (XEXP (base, 0)));
29861 break;
29862
29863 case CODE_LABEL:
29864 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29865 break;
29866
29867 default:
29868 gcc_unreachable ();
29869 }
29870
29871 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29872 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29873 else
29874 {
29875 fputs ("\t.tc ", file);
29876 RS6000_OUTPUT_BASENAME (file, name);
29877
29878 if (offset < 0)
29879 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29880 else if (offset)
29881 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29882
29883 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29884 after other TOC symbols, reducing overflow of small TOC access
29885 to [TC] symbols. */
29886 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29887 ? "[TE]," : "[TC],", file);
29888 }
29889
29890 /* Currently C++ toc references to vtables can be emitted before it
29891 is decided whether the vtable is public or private. If this is
29892 the case, then the linker will eventually complain that there is
29893 a TOC reference to an unknown section. Thus, for vtables only,
29894 we emit the TOC reference to reference the symbol and not the
29895 section. */
29896 if (VTABLE_NAME_P (name))
29897 {
29898 RS6000_OUTPUT_BASENAME (file, name);
29899 if (offset < 0)
29900 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29901 else if (offset > 0)
29902 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29903 }
29904 else
29905 output_addr_const (file, x);
29906
29907 #if HAVE_AS_TLS
29908 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29909 {
29910 switch (SYMBOL_REF_TLS_MODEL (base))
29911 {
29912 case 0:
29913 break;
29914 case TLS_MODEL_LOCAL_EXEC:
29915 fputs ("@le", file);
29916 break;
29917 case TLS_MODEL_INITIAL_EXEC:
29918 fputs ("@ie", file);
29919 break;
29920 /* Use global-dynamic for local-dynamic. */
29921 case TLS_MODEL_GLOBAL_DYNAMIC:
29922 case TLS_MODEL_LOCAL_DYNAMIC:
29923 putc ('\n', file);
29924 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29925 fputs ("\t.tc .", file);
29926 RS6000_OUTPUT_BASENAME (file, name);
29927 fputs ("[TC],", file);
29928 output_addr_const (file, x);
29929 fputs ("@m", file);
29930 break;
29931 default:
29932 gcc_unreachable ();
29933 }
29934 }
29935 #endif
29936
29937 putc ('\n', file);
29938 }
29939 \f
29940 /* Output an assembler pseudo-op to write an ASCII string of N characters
29941 starting at P to FILE.
29942
29943 On the RS/6000, we have to do this using the .byte operation and
29944 write out special characters outside the quoted string.
29945 Also, the assembler is broken; very long strings are truncated,
29946 so we must artificially break them up early. */
29947
29948 void
29949 output_ascii (FILE *file, const char *p, int n)
29950 {
29951 char c;
29952 int i, count_string;
29953 const char *for_string = "\t.byte \"";
29954 const char *for_decimal = "\t.byte ";
29955 const char *to_close = NULL;
29956
29957 count_string = 0;
29958 for (i = 0; i < n; i++)
29959 {
29960 c = *p++;
29961 if (c >= ' ' && c < 0177)
29962 {
29963 if (for_string)
29964 fputs (for_string, file);
29965 putc (c, file);
29966
29967 /* Write two quotes to get one. */
29968 if (c == '"')
29969 {
29970 putc (c, file);
29971 ++count_string;
29972 }
29973
29974 for_string = NULL;
29975 for_decimal = "\"\n\t.byte ";
29976 to_close = "\"\n";
29977 ++count_string;
29978
29979 if (count_string >= 512)
29980 {
29981 fputs (to_close, file);
29982
29983 for_string = "\t.byte \"";
29984 for_decimal = "\t.byte ";
29985 to_close = NULL;
29986 count_string = 0;
29987 }
29988 }
29989 else
29990 {
29991 if (for_decimal)
29992 fputs (for_decimal, file);
29993 fprintf (file, "%d", c);
29994
29995 for_string = "\n\t.byte \"";
29996 for_decimal = ", ";
29997 to_close = "\n";
29998 count_string = 0;
29999 }
30000 }
30001
30002 /* Now close the string if we have written one. Then end the line. */
30003 if (to_close)
30004 fputs (to_close, file);
30005 }
30006 \f
30007 /* Generate a unique section name for FILENAME for a section type
30008 represented by SECTION_DESC. Output goes into BUF.
30009
30010 SECTION_DESC can be any string, as long as it is different for each
30011 possible section type.
30012
30013 We name the section in the same manner as xlc. The name begins with an
30014 underscore followed by the filename (after stripping any leading directory
30015 names) with the last period replaced by the string SECTION_DESC. If
30016 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30017 the name. */
30018
30019 void
30020 rs6000_gen_section_name (char **buf, const char *filename,
30021 const char *section_desc)
30022 {
30023 const char *q, *after_last_slash, *last_period = 0;
30024 char *p;
30025 int len;
30026
30027 after_last_slash = filename;
30028 for (q = filename; *q; q++)
30029 {
30030 if (*q == '/')
30031 after_last_slash = q + 1;
30032 else if (*q == '.')
30033 last_period = q;
30034 }
30035
30036 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30037 *buf = (char *) xmalloc (len);
30038
30039 p = *buf;
30040 *p++ = '_';
30041
30042 for (q = after_last_slash; *q; q++)
30043 {
30044 if (q == last_period)
30045 {
30046 strcpy (p, section_desc);
30047 p += strlen (section_desc);
30048 break;
30049 }
30050
30051 else if (ISALNUM (*q))
30052 *p++ = *q;
30053 }
30054
30055 if (last_period == 0)
30056 strcpy (p, section_desc);
30057 else
30058 *p = '\0';
30059 }
30060 \f
30061 /* Emit profile function. */
30062
30063 void
30064 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30065 {
30066 /* Non-standard profiling for kernels, which just saves LR then calls
30067 _mcount without worrying about arg saves. The idea is to change
30068 the function prologue as little as possible as it isn't easy to
30069 account for arg save/restore code added just for _mcount. */
30070 if (TARGET_PROFILE_KERNEL)
30071 return;
30072
30073 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30074 {
30075 #ifndef NO_PROFILE_COUNTERS
30076 # define NO_PROFILE_COUNTERS 0
30077 #endif
30078 if (NO_PROFILE_COUNTERS)
30079 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30080 LCT_NORMAL, VOIDmode);
30081 else
30082 {
30083 char buf[30];
30084 const char *label_name;
30085 rtx fun;
30086
30087 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30088 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30089 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30090
30091 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30092 LCT_NORMAL, VOIDmode, fun, Pmode);
30093 }
30094 }
30095 else if (DEFAULT_ABI == ABI_DARWIN)
30096 {
30097 const char *mcount_name = RS6000_MCOUNT;
30098 int caller_addr_regno = LR_REGNO;
30099
30100 /* Be conservative and always set this, at least for now. */
30101 crtl->uses_pic_offset_table = 1;
30102
30103 #if TARGET_MACHO
30104 /* For PIC code, set up a stub and collect the caller's address
30105 from r0, which is where the prologue puts it. */
30106 if (MACHOPIC_INDIRECT
30107 && crtl->uses_pic_offset_table)
30108 caller_addr_regno = 0;
30109 #endif
30110 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30111 LCT_NORMAL, VOIDmode,
30112 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30113 }
30114 }
30115
30116 /* Write function profiler code. */
30117
30118 void
30119 output_function_profiler (FILE *file, int labelno)
30120 {
30121 char buf[100];
30122
30123 switch (DEFAULT_ABI)
30124 {
30125 default:
30126 gcc_unreachable ();
30127
30128 case ABI_V4:
30129 if (!TARGET_32BIT)
30130 {
30131 warning (0, "no profiling of 64-bit code for this ABI");
30132 return;
30133 }
30134 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30135 fprintf (file, "\tmflr %s\n", reg_names[0]);
30136 if (NO_PROFILE_COUNTERS)
30137 {
30138 asm_fprintf (file, "\tstw %s,4(%s)\n",
30139 reg_names[0], reg_names[1]);
30140 }
30141 else if (TARGET_SECURE_PLT && flag_pic)
30142 {
30143 if (TARGET_LINK_STACK)
30144 {
30145 char name[32];
30146 get_ppc476_thunk_name (name);
30147 asm_fprintf (file, "\tbl %s\n", name);
30148 }
30149 else
30150 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30151 asm_fprintf (file, "\tstw %s,4(%s)\n",
30152 reg_names[0], reg_names[1]);
30153 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30154 asm_fprintf (file, "\taddis %s,%s,",
30155 reg_names[12], reg_names[12]);
30156 assemble_name (file, buf);
30157 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30158 assemble_name (file, buf);
30159 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30160 }
30161 else if (flag_pic == 1)
30162 {
30163 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30164 asm_fprintf (file, "\tstw %s,4(%s)\n",
30165 reg_names[0], reg_names[1]);
30166 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30167 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30168 assemble_name (file, buf);
30169 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30170 }
30171 else if (flag_pic > 1)
30172 {
30173 asm_fprintf (file, "\tstw %s,4(%s)\n",
30174 reg_names[0], reg_names[1]);
30175 /* Now, we need to get the address of the label. */
30176 if (TARGET_LINK_STACK)
30177 {
30178 char name[32];
30179 get_ppc476_thunk_name (name);
30180 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30181 assemble_name (file, buf);
30182 fputs ("-.\n1:", file);
30183 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30184 asm_fprintf (file, "\taddi %s,%s,4\n",
30185 reg_names[11], reg_names[11]);
30186 }
30187 else
30188 {
30189 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30190 assemble_name (file, buf);
30191 fputs ("-.\n1:", file);
30192 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30193 }
30194 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30195 reg_names[0], reg_names[11]);
30196 asm_fprintf (file, "\tadd %s,%s,%s\n",
30197 reg_names[0], reg_names[0], reg_names[11]);
30198 }
30199 else
30200 {
30201 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30202 assemble_name (file, buf);
30203 fputs ("@ha\n", file);
30204 asm_fprintf (file, "\tstw %s,4(%s)\n",
30205 reg_names[0], reg_names[1]);
30206 asm_fprintf (file, "\tla %s,", reg_names[0]);
30207 assemble_name (file, buf);
30208 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30209 }
30210
30211 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30212 fprintf (file, "\tbl %s%s\n",
30213 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30214 break;
30215
30216 case ABI_AIX:
30217 case ABI_ELFv2:
30218 case ABI_DARWIN:
30219 /* Don't do anything, done in output_profile_hook (). */
30220 break;
30221 }
30222 }
30223
30224 \f
30225
30226 /* The following variable value is the last issued insn. */
30227
30228 static rtx_insn *last_scheduled_insn;
30229
30230 /* The following variable helps to balance issuing of load and
30231 store instructions */
30232
30233 static int load_store_pendulum;
30234
30235 /* The following variable helps pair divide insns during scheduling. */
30236 static int divide_cnt;
30237 /* The following variable helps pair and alternate vector and vector load
30238 insns during scheduling. */
30239 static int vec_pairing;
30240
30241
30242 /* Power4 load update and store update instructions are cracked into a
30243 load or store and an integer insn which are executed in the same cycle.
30244 Branches have their own dispatch slot which does not count against the
30245 GCC issue rate, but it changes the program flow so there are no other
30246 instructions to issue in this cycle. */
30247
30248 static int
30249 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30250 {
30251 last_scheduled_insn = insn;
30252 if (GET_CODE (PATTERN (insn)) == USE
30253 || GET_CODE (PATTERN (insn)) == CLOBBER)
30254 {
30255 cached_can_issue_more = more;
30256 return cached_can_issue_more;
30257 }
30258
30259 if (insn_terminates_group_p (insn, current_group))
30260 {
30261 cached_can_issue_more = 0;
30262 return cached_can_issue_more;
30263 }
30264
30265 /* If no reservation, but reach here */
30266 if (recog_memoized (insn) < 0)
30267 return more;
30268
30269 if (rs6000_sched_groups)
30270 {
30271 if (is_microcoded_insn (insn))
30272 cached_can_issue_more = 0;
30273 else if (is_cracked_insn (insn))
30274 cached_can_issue_more = more > 2 ? more - 2 : 0;
30275 else
30276 cached_can_issue_more = more - 1;
30277
30278 return cached_can_issue_more;
30279 }
30280
30281 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30282 return 0;
30283
30284 cached_can_issue_more = more - 1;
30285 return cached_can_issue_more;
30286 }
30287
30288 static int
30289 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30290 {
30291 int r = rs6000_variable_issue_1 (insn, more);
30292 if (verbose)
30293 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30294 return r;
30295 }
30296
30297 /* Adjust the cost of a scheduling dependency. Return the new cost of
30298 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30299
30300 static int
30301 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30302 unsigned int)
30303 {
30304 enum attr_type attr_type;
30305
30306 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30307 return cost;
30308
30309 switch (dep_type)
30310 {
30311 case REG_DEP_TRUE:
30312 {
30313 /* Data dependency; DEP_INSN writes a register that INSN reads
30314 some cycles later. */
30315
30316 /* Separate a load from a narrower, dependent store. */
30317 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30318 && GET_CODE (PATTERN (insn)) == SET
30319 && GET_CODE (PATTERN (dep_insn)) == SET
30320 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30321 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30322 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30323 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30324 return cost + 14;
30325
30326 attr_type = get_attr_type (insn);
30327
30328 switch (attr_type)
30329 {
30330 case TYPE_JMPREG:
30331 /* Tell the first scheduling pass about the latency between
30332 a mtctr and bctr (and mtlr and br/blr). The first
30333 scheduling pass will not know about this latency since
30334 the mtctr instruction, which has the latency associated
30335 to it, will be generated by reload. */
30336 return 4;
30337 case TYPE_BRANCH:
30338 /* Leave some extra cycles between a compare and its
30339 dependent branch, to inhibit expensive mispredicts. */
30340 if ((rs6000_tune == PROCESSOR_PPC603
30341 || rs6000_tune == PROCESSOR_PPC604
30342 || rs6000_tune == PROCESSOR_PPC604e
30343 || rs6000_tune == PROCESSOR_PPC620
30344 || rs6000_tune == PROCESSOR_PPC630
30345 || rs6000_tune == PROCESSOR_PPC750
30346 || rs6000_tune == PROCESSOR_PPC7400
30347 || rs6000_tune == PROCESSOR_PPC7450
30348 || rs6000_tune == PROCESSOR_PPCE5500
30349 || rs6000_tune == PROCESSOR_PPCE6500
30350 || rs6000_tune == PROCESSOR_POWER4
30351 || rs6000_tune == PROCESSOR_POWER5
30352 || rs6000_tune == PROCESSOR_POWER7
30353 || rs6000_tune == PROCESSOR_POWER8
30354 || rs6000_tune == PROCESSOR_POWER9
30355 || rs6000_tune == PROCESSOR_CELL)
30356 && recog_memoized (dep_insn)
30357 && (INSN_CODE (dep_insn) >= 0))
30358
30359 switch (get_attr_type (dep_insn))
30360 {
30361 case TYPE_CMP:
30362 case TYPE_FPCOMPARE:
30363 case TYPE_CR_LOGICAL:
30364 return cost + 2;
30365 case TYPE_EXTS:
30366 case TYPE_MUL:
30367 if (get_attr_dot (dep_insn) == DOT_YES)
30368 return cost + 2;
30369 else
30370 break;
30371 case TYPE_SHIFT:
30372 if (get_attr_dot (dep_insn) == DOT_YES
30373 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30374 return cost + 2;
30375 else
30376 break;
30377 default:
30378 break;
30379 }
30380 break;
30381
30382 case TYPE_STORE:
30383 case TYPE_FPSTORE:
30384 if ((rs6000_tune == PROCESSOR_POWER6)
30385 && recog_memoized (dep_insn)
30386 && (INSN_CODE (dep_insn) >= 0))
30387 {
30388
30389 if (GET_CODE (PATTERN (insn)) != SET)
30390 /* If this happens, we have to extend this to schedule
30391 optimally. Return default for now. */
30392 return cost;
30393
30394 /* Adjust the cost for the case where the value written
30395 by a fixed point operation is used as the address
30396 gen value on a store. */
30397 switch (get_attr_type (dep_insn))
30398 {
30399 case TYPE_LOAD:
30400 case TYPE_CNTLZ:
30401 {
30402 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30403 return get_attr_sign_extend (dep_insn)
30404 == SIGN_EXTEND_YES ? 6 : 4;
30405 break;
30406 }
30407 case TYPE_SHIFT:
30408 {
30409 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30410 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30411 6 : 3;
30412 break;
30413 }
30414 case TYPE_INTEGER:
30415 case TYPE_ADD:
30416 case TYPE_LOGICAL:
30417 case TYPE_EXTS:
30418 case TYPE_INSERT:
30419 {
30420 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30421 return 3;
30422 break;
30423 }
30424 case TYPE_STORE:
30425 case TYPE_FPLOAD:
30426 case TYPE_FPSTORE:
30427 {
30428 if (get_attr_update (dep_insn) == UPDATE_YES
30429 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30430 return 3;
30431 break;
30432 }
30433 case TYPE_MUL:
30434 {
30435 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30436 return 17;
30437 break;
30438 }
30439 case TYPE_DIV:
30440 {
30441 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30442 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30443 break;
30444 }
30445 default:
30446 break;
30447 }
30448 }
30449 break;
30450
30451 case TYPE_LOAD:
30452 if ((rs6000_tune == PROCESSOR_POWER6)
30453 && recog_memoized (dep_insn)
30454 && (INSN_CODE (dep_insn) >= 0))
30455 {
30456
30457 /* Adjust the cost for the case where the value written
30458 by a fixed point instruction is used within the address
30459 gen portion of a subsequent load(u)(x) */
30460 switch (get_attr_type (dep_insn))
30461 {
30462 case TYPE_LOAD:
30463 case TYPE_CNTLZ:
30464 {
30465 if (set_to_load_agen (dep_insn, insn))
30466 return get_attr_sign_extend (dep_insn)
30467 == SIGN_EXTEND_YES ? 6 : 4;
30468 break;
30469 }
30470 case TYPE_SHIFT:
30471 {
30472 if (set_to_load_agen (dep_insn, insn))
30473 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30474 6 : 3;
30475 break;
30476 }
30477 case TYPE_INTEGER:
30478 case TYPE_ADD:
30479 case TYPE_LOGICAL:
30480 case TYPE_EXTS:
30481 case TYPE_INSERT:
30482 {
30483 if (set_to_load_agen (dep_insn, insn))
30484 return 3;
30485 break;
30486 }
30487 case TYPE_STORE:
30488 case TYPE_FPLOAD:
30489 case TYPE_FPSTORE:
30490 {
30491 if (get_attr_update (dep_insn) == UPDATE_YES
30492 && set_to_load_agen (dep_insn, insn))
30493 return 3;
30494 break;
30495 }
30496 case TYPE_MUL:
30497 {
30498 if (set_to_load_agen (dep_insn, insn))
30499 return 17;
30500 break;
30501 }
30502 case TYPE_DIV:
30503 {
30504 if (set_to_load_agen (dep_insn, insn))
30505 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30506 break;
30507 }
30508 default:
30509 break;
30510 }
30511 }
30512 break;
30513
30514 case TYPE_FPLOAD:
30515 if ((rs6000_tune == PROCESSOR_POWER6)
30516 && get_attr_update (insn) == UPDATE_NO
30517 && recog_memoized (dep_insn)
30518 && (INSN_CODE (dep_insn) >= 0)
30519 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30520 return 2;
30521
30522 default:
30523 break;
30524 }
30525
30526 /* Fall out to return default cost. */
30527 }
30528 break;
30529
30530 case REG_DEP_OUTPUT:
30531 /* Output dependency; DEP_INSN writes a register that INSN writes some
30532 cycles later. */
30533 if ((rs6000_tune == PROCESSOR_POWER6)
30534 && recog_memoized (dep_insn)
30535 && (INSN_CODE (dep_insn) >= 0))
30536 {
30537 attr_type = get_attr_type (insn);
30538
30539 switch (attr_type)
30540 {
30541 case TYPE_FP:
30542 case TYPE_FPSIMPLE:
30543 if (get_attr_type (dep_insn) == TYPE_FP
30544 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30545 return 1;
30546 break;
30547 case TYPE_FPLOAD:
30548 if (get_attr_update (insn) == UPDATE_NO
30549 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30550 return 2;
30551 break;
30552 default:
30553 break;
30554 }
30555 }
30556 /* Fall through, no cost for output dependency. */
30557 /* FALLTHRU */
30558
30559 case REG_DEP_ANTI:
30560 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30561 cycles later. */
30562 return 0;
30563
30564 default:
30565 gcc_unreachable ();
30566 }
30567
30568 return cost;
30569 }
30570
30571 /* Debug version of rs6000_adjust_cost. */
30572
30573 static int
30574 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30575 int cost, unsigned int dw)
30576 {
30577 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30578
30579 if (ret != cost)
30580 {
30581 const char *dep;
30582
30583 switch (dep_type)
30584 {
30585 default: dep = "unknown depencency"; break;
30586 case REG_DEP_TRUE: dep = "data dependency"; break;
30587 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30588 case REG_DEP_ANTI: dep = "anti depencency"; break;
30589 }
30590
30591 fprintf (stderr,
30592 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30593 "%s, insn:\n", ret, cost, dep);
30594
30595 debug_rtx (insn);
30596 }
30597
30598 return ret;
30599 }
30600
30601 /* The function returns a true if INSN is microcoded.
30602 Return false otherwise. */
30603
30604 static bool
30605 is_microcoded_insn (rtx_insn *insn)
30606 {
30607 if (!insn || !NONDEBUG_INSN_P (insn)
30608 || GET_CODE (PATTERN (insn)) == USE
30609 || GET_CODE (PATTERN (insn)) == CLOBBER)
30610 return false;
30611
30612 if (rs6000_tune == PROCESSOR_CELL)
30613 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30614
30615 if (rs6000_sched_groups
30616 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30617 {
30618 enum attr_type type = get_attr_type (insn);
30619 if ((type == TYPE_LOAD
30620 && get_attr_update (insn) == UPDATE_YES
30621 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30622 || ((type == TYPE_LOAD || type == TYPE_STORE)
30623 && get_attr_update (insn) == UPDATE_YES
30624 && get_attr_indexed (insn) == INDEXED_YES)
30625 || type == TYPE_MFCR)
30626 return true;
30627 }
30628
30629 return false;
30630 }
30631
30632 /* The function returns true if INSN is cracked into 2 instructions
30633 by the processor (and therefore occupies 2 issue slots). */
30634
30635 static bool
30636 is_cracked_insn (rtx_insn *insn)
30637 {
30638 if (!insn || !NONDEBUG_INSN_P (insn)
30639 || GET_CODE (PATTERN (insn)) == USE
30640 || GET_CODE (PATTERN (insn)) == CLOBBER)
30641 return false;
30642
30643 if (rs6000_sched_groups
30644 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30645 {
30646 enum attr_type type = get_attr_type (insn);
30647 if ((type == TYPE_LOAD
30648 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30649 && get_attr_update (insn) == UPDATE_NO)
30650 || (type == TYPE_LOAD
30651 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30652 && get_attr_update (insn) == UPDATE_YES
30653 && get_attr_indexed (insn) == INDEXED_NO)
30654 || (type == TYPE_STORE
30655 && get_attr_update (insn) == UPDATE_YES
30656 && get_attr_indexed (insn) == INDEXED_NO)
30657 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30658 && get_attr_update (insn) == UPDATE_YES)
30659 || (type == TYPE_CR_LOGICAL
30660 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30661 || (type == TYPE_EXTS
30662 && get_attr_dot (insn) == DOT_YES)
30663 || (type == TYPE_SHIFT
30664 && get_attr_dot (insn) == DOT_YES
30665 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30666 || (type == TYPE_MUL
30667 && get_attr_dot (insn) == DOT_YES)
30668 || type == TYPE_DIV
30669 || (type == TYPE_INSERT
30670 && get_attr_size (insn) == SIZE_32))
30671 return true;
30672 }
30673
30674 return false;
30675 }
30676
30677 /* The function returns true if INSN can be issued only from
30678 the branch slot. */
30679
30680 static bool
30681 is_branch_slot_insn (rtx_insn *insn)
30682 {
30683 if (!insn || !NONDEBUG_INSN_P (insn)
30684 || GET_CODE (PATTERN (insn)) == USE
30685 || GET_CODE (PATTERN (insn)) == CLOBBER)
30686 return false;
30687
30688 if (rs6000_sched_groups)
30689 {
30690 enum attr_type type = get_attr_type (insn);
30691 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30692 return true;
30693 return false;
30694 }
30695
30696 return false;
30697 }
30698
30699 /* The function returns true if out_inst sets a value that is
30700 used in the address generation computation of in_insn */
30701 static bool
30702 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30703 {
30704 rtx out_set, in_set;
30705
30706 /* For performance reasons, only handle the simple case where
30707 both loads are a single_set. */
30708 out_set = single_set (out_insn);
30709 if (out_set)
30710 {
30711 in_set = single_set (in_insn);
30712 if (in_set)
30713 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30714 }
30715
30716 return false;
30717 }
30718
30719 /* Try to determine base/offset/size parts of the given MEM.
30720 Return true if successful, false if all the values couldn't
30721 be determined.
30722
30723 This function only looks for REG or REG+CONST address forms.
30724 REG+REG address form will return false. */
30725
30726 static bool
30727 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30728 HOST_WIDE_INT *size)
30729 {
30730 rtx addr_rtx;
30731 if MEM_SIZE_KNOWN_P (mem)
30732 *size = MEM_SIZE (mem);
30733 else
30734 return false;
30735
30736 addr_rtx = (XEXP (mem, 0));
30737 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30738 addr_rtx = XEXP (addr_rtx, 1);
30739
30740 *offset = 0;
30741 while (GET_CODE (addr_rtx) == PLUS
30742 && CONST_INT_P (XEXP (addr_rtx, 1)))
30743 {
30744 *offset += INTVAL (XEXP (addr_rtx, 1));
30745 addr_rtx = XEXP (addr_rtx, 0);
30746 }
30747 if (!REG_P (addr_rtx))
30748 return false;
30749
30750 *base = addr_rtx;
30751 return true;
30752 }
30753
30754 /* The function returns true if the target storage location of
30755 mem1 is adjacent to the target storage location of mem2 */
30756 /* Return 1 if memory locations are adjacent. */
30757
30758 static bool
30759 adjacent_mem_locations (rtx mem1, rtx mem2)
30760 {
30761 rtx reg1, reg2;
30762 HOST_WIDE_INT off1, size1, off2, size2;
30763
30764 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30765 && get_memref_parts (mem2, &reg2, &off2, &size2))
30766 return ((REGNO (reg1) == REGNO (reg2))
30767 && ((off1 + size1 == off2)
30768 || (off2 + size2 == off1)));
30769
30770 return false;
30771 }
30772
30773 /* This function returns true if it can be determined that the two MEM
30774 locations overlap by at least 1 byte based on base reg/offset/size. */
30775
30776 static bool
30777 mem_locations_overlap (rtx mem1, rtx mem2)
30778 {
30779 rtx reg1, reg2;
30780 HOST_WIDE_INT off1, size1, off2, size2;
30781
30782 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30783 && get_memref_parts (mem2, &reg2, &off2, &size2))
30784 return ((REGNO (reg1) == REGNO (reg2))
30785 && (((off1 <= off2) && (off1 + size1 > off2))
30786 || ((off2 <= off1) && (off2 + size2 > off1))));
30787
30788 return false;
30789 }
30790
30791 /* A C statement (sans semicolon) to update the integer scheduling
30792 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30793 INSN earlier, reduce the priority to execute INSN later. Do not
30794 define this macro if you do not need to adjust the scheduling
30795 priorities of insns. */
30796
30797 static int
30798 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30799 {
30800 rtx load_mem, str_mem;
30801 /* On machines (like the 750) which have asymmetric integer units,
30802 where one integer unit can do multiply and divides and the other
30803 can't, reduce the priority of multiply/divide so it is scheduled
30804 before other integer operations. */
30805
30806 #if 0
30807 if (! INSN_P (insn))
30808 return priority;
30809
30810 if (GET_CODE (PATTERN (insn)) == USE)
30811 return priority;
30812
30813 switch (rs6000_tune) {
30814 case PROCESSOR_PPC750:
30815 switch (get_attr_type (insn))
30816 {
30817 default:
30818 break;
30819
30820 case TYPE_MUL:
30821 case TYPE_DIV:
30822 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30823 priority, priority);
30824 if (priority >= 0 && priority < 0x01000000)
30825 priority >>= 3;
30826 break;
30827 }
30828 }
30829 #endif
30830
30831 if (insn_must_be_first_in_group (insn)
30832 && reload_completed
30833 && current_sched_info->sched_max_insns_priority
30834 && rs6000_sched_restricted_insns_priority)
30835 {
30836
30837 /* Prioritize insns that can be dispatched only in the first
30838 dispatch slot. */
30839 if (rs6000_sched_restricted_insns_priority == 1)
30840 /* Attach highest priority to insn. This means that in
30841 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30842 precede 'priority' (critical path) considerations. */
30843 return current_sched_info->sched_max_insns_priority;
30844 else if (rs6000_sched_restricted_insns_priority == 2)
30845 /* Increase priority of insn by a minimal amount. This means that in
30846 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30847 considerations precede dispatch-slot restriction considerations. */
30848 return (priority + 1);
30849 }
30850
30851 if (rs6000_tune == PROCESSOR_POWER6
30852 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30853 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30854 /* Attach highest priority to insn if the scheduler has just issued two
30855 stores and this instruction is a load, or two loads and this instruction
30856 is a store. Power6 wants loads and stores scheduled alternately
30857 when possible */
30858 return current_sched_info->sched_max_insns_priority;
30859
30860 return priority;
30861 }
30862
30863 /* Return true if the instruction is nonpipelined on the Cell. */
30864 static bool
30865 is_nonpipeline_insn (rtx_insn *insn)
30866 {
30867 enum attr_type type;
30868 if (!insn || !NONDEBUG_INSN_P (insn)
30869 || GET_CODE (PATTERN (insn)) == USE
30870 || GET_CODE (PATTERN (insn)) == CLOBBER)
30871 return false;
30872
30873 type = get_attr_type (insn);
30874 if (type == TYPE_MUL
30875 || type == TYPE_DIV
30876 || type == TYPE_SDIV
30877 || type == TYPE_DDIV
30878 || type == TYPE_SSQRT
30879 || type == TYPE_DSQRT
30880 || type == TYPE_MFCR
30881 || type == TYPE_MFCRF
30882 || type == TYPE_MFJMPR)
30883 {
30884 return true;
30885 }
30886 return false;
30887 }
30888
30889
30890 /* Return how many instructions the machine can issue per cycle. */
30891
30892 static int
30893 rs6000_issue_rate (void)
30894 {
30895 /* Unless scheduling for register pressure, use issue rate of 1 for
30896 first scheduling pass to decrease degradation. */
30897 if (!reload_completed && !flag_sched_pressure)
30898 return 1;
30899
30900 switch (rs6000_tune) {
30901 case PROCESSOR_RS64A:
30902 case PROCESSOR_PPC601: /* ? */
30903 case PROCESSOR_PPC7450:
30904 return 3;
30905 case PROCESSOR_PPC440:
30906 case PROCESSOR_PPC603:
30907 case PROCESSOR_PPC750:
30908 case PROCESSOR_PPC7400:
30909 case PROCESSOR_PPC8540:
30910 case PROCESSOR_PPC8548:
30911 case PROCESSOR_CELL:
30912 case PROCESSOR_PPCE300C2:
30913 case PROCESSOR_PPCE300C3:
30914 case PROCESSOR_PPCE500MC:
30915 case PROCESSOR_PPCE500MC64:
30916 case PROCESSOR_PPCE5500:
30917 case PROCESSOR_PPCE6500:
30918 case PROCESSOR_TITAN:
30919 return 2;
30920 case PROCESSOR_PPC476:
30921 case PROCESSOR_PPC604:
30922 case PROCESSOR_PPC604e:
30923 case PROCESSOR_PPC620:
30924 case PROCESSOR_PPC630:
30925 return 4;
30926 case PROCESSOR_POWER4:
30927 case PROCESSOR_POWER5:
30928 case PROCESSOR_POWER6:
30929 case PROCESSOR_POWER7:
30930 return 5;
30931 case PROCESSOR_POWER8:
30932 return 7;
30933 case PROCESSOR_POWER9:
30934 return 6;
30935 default:
30936 return 1;
30937 }
30938 }
30939
30940 /* Return how many instructions to look ahead for better insn
30941 scheduling. */
30942
30943 static int
30944 rs6000_use_sched_lookahead (void)
30945 {
30946 switch (rs6000_tune)
30947 {
30948 case PROCESSOR_PPC8540:
30949 case PROCESSOR_PPC8548:
30950 return 4;
30951
30952 case PROCESSOR_CELL:
30953 return (reload_completed ? 8 : 0);
30954
30955 default:
30956 return 0;
30957 }
30958 }
30959
30960 /* We are choosing insn from the ready queue. Return zero if INSN can be
30961 chosen. */
30962 static int
30963 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30964 {
30965 if (ready_index == 0)
30966 return 0;
30967
30968 if (rs6000_tune != PROCESSOR_CELL)
30969 return 0;
30970
30971 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30972
30973 if (!reload_completed
30974 || is_nonpipeline_insn (insn)
30975 || is_microcoded_insn (insn))
30976 return 1;
30977
30978 return 0;
30979 }
30980
30981 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30982 and return true. */
30983
30984 static bool
30985 find_mem_ref (rtx pat, rtx *mem_ref)
30986 {
30987 const char * fmt;
30988 int i, j;
30989
30990 /* stack_tie does not produce any real memory traffic. */
30991 if (tie_operand (pat, VOIDmode))
30992 return false;
30993
30994 if (GET_CODE (pat) == MEM)
30995 {
30996 *mem_ref = pat;
30997 return true;
30998 }
30999
31000 /* Recursively process the pattern. */
31001 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31002
31003 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31004 {
31005 if (fmt[i] == 'e')
31006 {
31007 if (find_mem_ref (XEXP (pat, i), mem_ref))
31008 return true;
31009 }
31010 else if (fmt[i] == 'E')
31011 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31012 {
31013 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31014 return true;
31015 }
31016 }
31017
31018 return false;
31019 }
31020
31021 /* Determine if PAT is a PATTERN of a load insn. */
31022
31023 static bool
31024 is_load_insn1 (rtx pat, rtx *load_mem)
31025 {
31026 if (!pat || pat == NULL_RTX)
31027 return false;
31028
31029 if (GET_CODE (pat) == SET)
31030 return find_mem_ref (SET_SRC (pat), load_mem);
31031
31032 if (GET_CODE (pat) == PARALLEL)
31033 {
31034 int i;
31035
31036 for (i = 0; i < XVECLEN (pat, 0); i++)
31037 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31038 return true;
31039 }
31040
31041 return false;
31042 }
31043
31044 /* Determine if INSN loads from memory. */
31045
31046 static bool
31047 is_load_insn (rtx insn, rtx *load_mem)
31048 {
31049 if (!insn || !INSN_P (insn))
31050 return false;
31051
31052 if (CALL_P (insn))
31053 return false;
31054
31055 return is_load_insn1 (PATTERN (insn), load_mem);
31056 }
31057
31058 /* Determine if PAT is a PATTERN of a store insn. */
31059
31060 static bool
31061 is_store_insn1 (rtx pat, rtx *str_mem)
31062 {
31063 if (!pat || pat == NULL_RTX)
31064 return false;
31065
31066 if (GET_CODE (pat) == SET)
31067 return find_mem_ref (SET_DEST (pat), str_mem);
31068
31069 if (GET_CODE (pat) == PARALLEL)
31070 {
31071 int i;
31072
31073 for (i = 0; i < XVECLEN (pat, 0); i++)
31074 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31075 return true;
31076 }
31077
31078 return false;
31079 }
31080
31081 /* Determine if INSN stores to memory. */
31082
31083 static bool
31084 is_store_insn (rtx insn, rtx *str_mem)
31085 {
31086 if (!insn || !INSN_P (insn))
31087 return false;
31088
31089 return is_store_insn1 (PATTERN (insn), str_mem);
31090 }
31091
31092 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31093
31094 static bool
31095 is_power9_pairable_vec_type (enum attr_type type)
31096 {
31097 switch (type)
31098 {
31099 case TYPE_VECSIMPLE:
31100 case TYPE_VECCOMPLEX:
31101 case TYPE_VECDIV:
31102 case TYPE_VECCMP:
31103 case TYPE_VECPERM:
31104 case TYPE_VECFLOAT:
31105 case TYPE_VECFDIV:
31106 case TYPE_VECDOUBLE:
31107 return true;
31108 default:
31109 break;
31110 }
31111 return false;
31112 }
31113
31114 /* Returns whether the dependence between INSN and NEXT is considered
31115 costly by the given target. */
31116
31117 static bool
31118 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31119 {
31120 rtx insn;
31121 rtx next;
31122 rtx load_mem, str_mem;
31123
31124 /* If the flag is not enabled - no dependence is considered costly;
31125 allow all dependent insns in the same group.
31126 This is the most aggressive option. */
31127 if (rs6000_sched_costly_dep == no_dep_costly)
31128 return false;
31129
31130 /* If the flag is set to 1 - a dependence is always considered costly;
31131 do not allow dependent instructions in the same group.
31132 This is the most conservative option. */
31133 if (rs6000_sched_costly_dep == all_deps_costly)
31134 return true;
31135
31136 insn = DEP_PRO (dep);
31137 next = DEP_CON (dep);
31138
31139 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31140 && is_load_insn (next, &load_mem)
31141 && is_store_insn (insn, &str_mem))
31142 /* Prevent load after store in the same group. */
31143 return true;
31144
31145 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31146 && is_load_insn (next, &load_mem)
31147 && is_store_insn (insn, &str_mem)
31148 && DEP_TYPE (dep) == REG_DEP_TRUE
31149 && mem_locations_overlap(str_mem, load_mem))
31150 /* Prevent load after store in the same group if it is a true
31151 dependence. */
31152 return true;
31153
31154 /* The flag is set to X; dependences with latency >= X are considered costly,
31155 and will not be scheduled in the same group. */
31156 if (rs6000_sched_costly_dep <= max_dep_latency
31157 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31158 return true;
31159
31160 return false;
31161 }
31162
31163 /* Return the next insn after INSN that is found before TAIL is reached,
31164 skipping any "non-active" insns - insns that will not actually occupy
31165 an issue slot. Return NULL_RTX if such an insn is not found. */
31166
31167 static rtx_insn *
31168 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31169 {
31170 if (insn == NULL_RTX || insn == tail)
31171 return NULL;
31172
31173 while (1)
31174 {
31175 insn = NEXT_INSN (insn);
31176 if (insn == NULL_RTX || insn == tail)
31177 return NULL;
31178
31179 if (CALL_P (insn)
31180 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31181 || (NONJUMP_INSN_P (insn)
31182 && GET_CODE (PATTERN (insn)) != USE
31183 && GET_CODE (PATTERN (insn)) != CLOBBER
31184 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31185 break;
31186 }
31187 return insn;
31188 }
31189
31190 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31191
31192 static int
31193 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31194 {
31195 int pos;
31196 int i;
31197 rtx_insn *tmp;
31198 enum attr_type type, type2;
31199
31200 type = get_attr_type (last_scheduled_insn);
31201
31202 /* Try to issue fixed point divides back-to-back in pairs so they will be
31203 routed to separate execution units and execute in parallel. */
31204 if (type == TYPE_DIV && divide_cnt == 0)
31205 {
31206 /* First divide has been scheduled. */
31207 divide_cnt = 1;
31208
31209 /* Scan the ready list looking for another divide, if found move it
31210 to the end of the list so it is chosen next. */
31211 pos = lastpos;
31212 while (pos >= 0)
31213 {
31214 if (recog_memoized (ready[pos]) >= 0
31215 && get_attr_type (ready[pos]) == TYPE_DIV)
31216 {
31217 tmp = ready[pos];
31218 for (i = pos; i < lastpos; i++)
31219 ready[i] = ready[i + 1];
31220 ready[lastpos] = tmp;
31221 break;
31222 }
31223 pos--;
31224 }
31225 }
31226 else
31227 {
31228 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31229 divide_cnt = 0;
31230
31231 /* The best dispatch throughput for vector and vector load insns can be
31232 achieved by interleaving a vector and vector load such that they'll
31233 dispatch to the same superslice. If this pairing cannot be achieved
31234 then it is best to pair vector insns together and vector load insns
31235 together.
31236
31237 To aid in this pairing, vec_pairing maintains the current state with
31238 the following values:
31239
31240 0 : Initial state, no vecload/vector pairing has been started.
31241
31242 1 : A vecload or vector insn has been issued and a candidate for
31243 pairing has been found and moved to the end of the ready
31244 list. */
31245 if (type == TYPE_VECLOAD)
31246 {
31247 /* Issued a vecload. */
31248 if (vec_pairing == 0)
31249 {
31250 int vecload_pos = -1;
31251 /* We issued a single vecload, look for a vector insn to pair it
31252 with. If one isn't found, try to pair another vecload. */
31253 pos = lastpos;
31254 while (pos >= 0)
31255 {
31256 if (recog_memoized (ready[pos]) >= 0)
31257 {
31258 type2 = get_attr_type (ready[pos]);
31259 if (is_power9_pairable_vec_type (type2))
31260 {
31261 /* Found a vector insn to pair with, move it to the
31262 end of the ready list so it is scheduled next. */
31263 tmp = ready[pos];
31264 for (i = pos; i < lastpos; i++)
31265 ready[i] = ready[i + 1];
31266 ready[lastpos] = tmp;
31267 vec_pairing = 1;
31268 return cached_can_issue_more;
31269 }
31270 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31271 /* Remember position of first vecload seen. */
31272 vecload_pos = pos;
31273 }
31274 pos--;
31275 }
31276 if (vecload_pos >= 0)
31277 {
31278 /* Didn't find a vector to pair with but did find a vecload,
31279 move it to the end of the ready list. */
31280 tmp = ready[vecload_pos];
31281 for (i = vecload_pos; i < lastpos; i++)
31282 ready[i] = ready[i + 1];
31283 ready[lastpos] = tmp;
31284 vec_pairing = 1;
31285 return cached_can_issue_more;
31286 }
31287 }
31288 }
31289 else if (is_power9_pairable_vec_type (type))
31290 {
31291 /* Issued a vector operation. */
31292 if (vec_pairing == 0)
31293 {
31294 int vec_pos = -1;
31295 /* We issued a single vector insn, look for a vecload to pair it
31296 with. If one isn't found, try to pair another vector. */
31297 pos = lastpos;
31298 while (pos >= 0)
31299 {
31300 if (recog_memoized (ready[pos]) >= 0)
31301 {
31302 type2 = get_attr_type (ready[pos]);
31303 if (type2 == TYPE_VECLOAD)
31304 {
31305 /* Found a vecload insn to pair with, move it to the
31306 end of the ready list so it is scheduled next. */
31307 tmp = ready[pos];
31308 for (i = pos; i < lastpos; i++)
31309 ready[i] = ready[i + 1];
31310 ready[lastpos] = tmp;
31311 vec_pairing = 1;
31312 return cached_can_issue_more;
31313 }
31314 else if (is_power9_pairable_vec_type (type2)
31315 && vec_pos == -1)
31316 /* Remember position of first vector insn seen. */
31317 vec_pos = pos;
31318 }
31319 pos--;
31320 }
31321 if (vec_pos >= 0)
31322 {
31323 /* Didn't find a vecload to pair with but did find a vector
31324 insn, move it to the end of the ready list. */
31325 tmp = ready[vec_pos];
31326 for (i = vec_pos; i < lastpos; i++)
31327 ready[i] = ready[i + 1];
31328 ready[lastpos] = tmp;
31329 vec_pairing = 1;
31330 return cached_can_issue_more;
31331 }
31332 }
31333 }
31334
31335 /* We've either finished a vec/vecload pair, couldn't find an insn to
31336 continue the current pair, or the last insn had nothing to do with
31337 with pairing. In any case, reset the state. */
31338 vec_pairing = 0;
31339 }
31340
31341 return cached_can_issue_more;
31342 }
31343
31344 /* We are about to begin issuing insns for this clock cycle. */
31345
31346 static int
31347 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31348 rtx_insn **ready ATTRIBUTE_UNUSED,
31349 int *pn_ready ATTRIBUTE_UNUSED,
31350 int clock_var ATTRIBUTE_UNUSED)
31351 {
31352 int n_ready = *pn_ready;
31353
31354 if (sched_verbose)
31355 fprintf (dump, "// rs6000_sched_reorder :\n");
31356
31357 /* Reorder the ready list, if the second to last ready insn
31358 is a nonepipeline insn. */
31359 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31360 {
31361 if (is_nonpipeline_insn (ready[n_ready - 1])
31362 && (recog_memoized (ready[n_ready - 2]) > 0))
31363 /* Simply swap first two insns. */
31364 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31365 }
31366
31367 if (rs6000_tune == PROCESSOR_POWER6)
31368 load_store_pendulum = 0;
31369
31370 return rs6000_issue_rate ();
31371 }
31372
31373 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31374
31375 static int
31376 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31377 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31378 {
31379 if (sched_verbose)
31380 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31381
31382 /* For Power6, we need to handle some special cases to try and keep the
31383 store queue from overflowing and triggering expensive flushes.
31384
31385 This code monitors how load and store instructions are being issued
31386 and skews the ready list one way or the other to increase the likelihood
31387 that a desired instruction is issued at the proper time.
31388
31389 A couple of things are done. First, we maintain a "load_store_pendulum"
31390 to track the current state of load/store issue.
31391
31392 - If the pendulum is at zero, then no loads or stores have been
31393 issued in the current cycle so we do nothing.
31394
31395 - If the pendulum is 1, then a single load has been issued in this
31396 cycle and we attempt to locate another load in the ready list to
31397 issue with it.
31398
31399 - If the pendulum is -2, then two stores have already been
31400 issued in this cycle, so we increase the priority of the first load
31401 in the ready list to increase it's likelihood of being chosen first
31402 in the next cycle.
31403
31404 - If the pendulum is -1, then a single store has been issued in this
31405 cycle and we attempt to locate another store in the ready list to
31406 issue with it, preferring a store to an adjacent memory location to
31407 facilitate store pairing in the store queue.
31408
31409 - If the pendulum is 2, then two loads have already been
31410 issued in this cycle, so we increase the priority of the first store
31411 in the ready list to increase it's likelihood of being chosen first
31412 in the next cycle.
31413
31414 - If the pendulum < -2 or > 2, then do nothing.
31415
31416 Note: This code covers the most common scenarios. There exist non
31417 load/store instructions which make use of the LSU and which
31418 would need to be accounted for to strictly model the behavior
31419 of the machine. Those instructions are currently unaccounted
31420 for to help minimize compile time overhead of this code.
31421 */
31422 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31423 {
31424 int pos;
31425 int i;
31426 rtx_insn *tmp;
31427 rtx load_mem, str_mem;
31428
31429 if (is_store_insn (last_scheduled_insn, &str_mem))
31430 /* Issuing a store, swing the load_store_pendulum to the left */
31431 load_store_pendulum--;
31432 else if (is_load_insn (last_scheduled_insn, &load_mem))
31433 /* Issuing a load, swing the load_store_pendulum to the right */
31434 load_store_pendulum++;
31435 else
31436 return cached_can_issue_more;
31437
31438 /* If the pendulum is balanced, or there is only one instruction on
31439 the ready list, then all is well, so return. */
31440 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31441 return cached_can_issue_more;
31442
31443 if (load_store_pendulum == 1)
31444 {
31445 /* A load has been issued in this cycle. Scan the ready list
31446 for another load to issue with it */
31447 pos = *pn_ready-1;
31448
31449 while (pos >= 0)
31450 {
31451 if (is_load_insn (ready[pos], &load_mem))
31452 {
31453 /* Found a load. Move it to the head of the ready list,
31454 and adjust it's priority so that it is more likely to
31455 stay there */
31456 tmp = ready[pos];
31457 for (i=pos; i<*pn_ready-1; i++)
31458 ready[i] = ready[i + 1];
31459 ready[*pn_ready-1] = tmp;
31460
31461 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31462 INSN_PRIORITY (tmp)++;
31463 break;
31464 }
31465 pos--;
31466 }
31467 }
31468 else if (load_store_pendulum == -2)
31469 {
31470 /* Two stores have been issued in this cycle. Increase the
31471 priority of the first load in the ready list to favor it for
31472 issuing in the next cycle. */
31473 pos = *pn_ready-1;
31474
31475 while (pos >= 0)
31476 {
31477 if (is_load_insn (ready[pos], &load_mem)
31478 && !sel_sched_p ()
31479 && INSN_PRIORITY_KNOWN (ready[pos]))
31480 {
31481 INSN_PRIORITY (ready[pos])++;
31482
31483 /* Adjust the pendulum to account for the fact that a load
31484 was found and increased in priority. This is to prevent
31485 increasing the priority of multiple loads */
31486 load_store_pendulum--;
31487
31488 break;
31489 }
31490 pos--;
31491 }
31492 }
31493 else if (load_store_pendulum == -1)
31494 {
31495 /* A store has been issued in this cycle. Scan the ready list for
31496 another store to issue with it, preferring a store to an adjacent
31497 memory location */
31498 int first_store_pos = -1;
31499
31500 pos = *pn_ready-1;
31501
31502 while (pos >= 0)
31503 {
31504 if (is_store_insn (ready[pos], &str_mem))
31505 {
31506 rtx str_mem2;
31507 /* Maintain the index of the first store found on the
31508 list */
31509 if (first_store_pos == -1)
31510 first_store_pos = pos;
31511
31512 if (is_store_insn (last_scheduled_insn, &str_mem2)
31513 && adjacent_mem_locations (str_mem, str_mem2))
31514 {
31515 /* Found an adjacent store. Move it to the head of the
31516 ready list, and adjust it's priority so that it is
31517 more likely to stay there */
31518 tmp = ready[pos];
31519 for (i=pos; i<*pn_ready-1; i++)
31520 ready[i] = ready[i + 1];
31521 ready[*pn_ready-1] = tmp;
31522
31523 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31524 INSN_PRIORITY (tmp)++;
31525
31526 first_store_pos = -1;
31527
31528 break;
31529 };
31530 }
31531 pos--;
31532 }
31533
31534 if (first_store_pos >= 0)
31535 {
31536 /* An adjacent store wasn't found, but a non-adjacent store was,
31537 so move the non-adjacent store to the front of the ready
31538 list, and adjust its priority so that it is more likely to
31539 stay there. */
31540 tmp = ready[first_store_pos];
31541 for (i=first_store_pos; i<*pn_ready-1; i++)
31542 ready[i] = ready[i + 1];
31543 ready[*pn_ready-1] = tmp;
31544 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31545 INSN_PRIORITY (tmp)++;
31546 }
31547 }
31548 else if (load_store_pendulum == 2)
31549 {
31550 /* Two loads have been issued in this cycle. Increase the priority
31551 of the first store in the ready list to favor it for issuing in
31552 the next cycle. */
31553 pos = *pn_ready-1;
31554
31555 while (pos >= 0)
31556 {
31557 if (is_store_insn (ready[pos], &str_mem)
31558 && !sel_sched_p ()
31559 && INSN_PRIORITY_KNOWN (ready[pos]))
31560 {
31561 INSN_PRIORITY (ready[pos])++;
31562
31563 /* Adjust the pendulum to account for the fact that a store
31564 was found and increased in priority. This is to prevent
31565 increasing the priority of multiple stores */
31566 load_store_pendulum++;
31567
31568 break;
31569 }
31570 pos--;
31571 }
31572 }
31573 }
31574
31575 /* Do Power9 dependent reordering if necessary. */
31576 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31577 && recog_memoized (last_scheduled_insn) >= 0)
31578 return power9_sched_reorder2 (ready, *pn_ready - 1);
31579
31580 return cached_can_issue_more;
31581 }
31582
31583 /* Return whether the presence of INSN causes a dispatch group termination
31584 of group WHICH_GROUP.
31585
31586 If WHICH_GROUP == current_group, this function will return true if INSN
31587 causes the termination of the current group (i.e, the dispatch group to
31588 which INSN belongs). This means that INSN will be the last insn in the
31589 group it belongs to.
31590
31591 If WHICH_GROUP == previous_group, this function will return true if INSN
31592 causes the termination of the previous group (i.e, the dispatch group that
31593 precedes the group to which INSN belongs). This means that INSN will be
31594 the first insn in the group it belongs to). */
31595
31596 static bool
31597 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31598 {
31599 bool first, last;
31600
31601 if (! insn)
31602 return false;
31603
31604 first = insn_must_be_first_in_group (insn);
31605 last = insn_must_be_last_in_group (insn);
31606
31607 if (first && last)
31608 return true;
31609
31610 if (which_group == current_group)
31611 return last;
31612 else if (which_group == previous_group)
31613 return first;
31614
31615 return false;
31616 }
31617
31618
31619 static bool
31620 insn_must_be_first_in_group (rtx_insn *insn)
31621 {
31622 enum attr_type type;
31623
31624 if (!insn
31625 || NOTE_P (insn)
31626 || DEBUG_INSN_P (insn)
31627 || GET_CODE (PATTERN (insn)) == USE
31628 || GET_CODE (PATTERN (insn)) == CLOBBER)
31629 return false;
31630
31631 switch (rs6000_tune)
31632 {
31633 case PROCESSOR_POWER5:
31634 if (is_cracked_insn (insn))
31635 return true;
31636 /* FALLTHRU */
31637 case PROCESSOR_POWER4:
31638 if (is_microcoded_insn (insn))
31639 return true;
31640
31641 if (!rs6000_sched_groups)
31642 return false;
31643
31644 type = get_attr_type (insn);
31645
31646 switch (type)
31647 {
31648 case TYPE_MFCR:
31649 case TYPE_MFCRF:
31650 case TYPE_MTCR:
31651 case TYPE_CR_LOGICAL:
31652 case TYPE_MTJMPR:
31653 case TYPE_MFJMPR:
31654 case TYPE_DIV:
31655 case TYPE_LOAD_L:
31656 case TYPE_STORE_C:
31657 case TYPE_ISYNC:
31658 case TYPE_SYNC:
31659 return true;
31660 default:
31661 break;
31662 }
31663 break;
31664 case PROCESSOR_POWER6:
31665 type = get_attr_type (insn);
31666
31667 switch (type)
31668 {
31669 case TYPE_EXTS:
31670 case TYPE_CNTLZ:
31671 case TYPE_TRAP:
31672 case TYPE_MUL:
31673 case TYPE_INSERT:
31674 case TYPE_FPCOMPARE:
31675 case TYPE_MFCR:
31676 case TYPE_MTCR:
31677 case TYPE_MFJMPR:
31678 case TYPE_MTJMPR:
31679 case TYPE_ISYNC:
31680 case TYPE_SYNC:
31681 case TYPE_LOAD_L:
31682 case TYPE_STORE_C:
31683 return true;
31684 case TYPE_SHIFT:
31685 if (get_attr_dot (insn) == DOT_NO
31686 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31687 return true;
31688 else
31689 break;
31690 case TYPE_DIV:
31691 if (get_attr_size (insn) == SIZE_32)
31692 return true;
31693 else
31694 break;
31695 case TYPE_LOAD:
31696 case TYPE_STORE:
31697 case TYPE_FPLOAD:
31698 case TYPE_FPSTORE:
31699 if (get_attr_update (insn) == UPDATE_YES)
31700 return true;
31701 else
31702 break;
31703 default:
31704 break;
31705 }
31706 break;
31707 case PROCESSOR_POWER7:
31708 type = get_attr_type (insn);
31709
31710 switch (type)
31711 {
31712 case TYPE_CR_LOGICAL:
31713 case TYPE_MFCR:
31714 case TYPE_MFCRF:
31715 case TYPE_MTCR:
31716 case TYPE_DIV:
31717 case TYPE_ISYNC:
31718 case TYPE_LOAD_L:
31719 case TYPE_STORE_C:
31720 case TYPE_MFJMPR:
31721 case TYPE_MTJMPR:
31722 return true;
31723 case TYPE_MUL:
31724 case TYPE_SHIFT:
31725 case TYPE_EXTS:
31726 if (get_attr_dot (insn) == DOT_YES)
31727 return true;
31728 else
31729 break;
31730 case TYPE_LOAD:
31731 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31732 || get_attr_update (insn) == UPDATE_YES)
31733 return true;
31734 else
31735 break;
31736 case TYPE_STORE:
31737 case TYPE_FPLOAD:
31738 case TYPE_FPSTORE:
31739 if (get_attr_update (insn) == UPDATE_YES)
31740 return true;
31741 else
31742 break;
31743 default:
31744 break;
31745 }
31746 break;
31747 case PROCESSOR_POWER8:
31748 type = get_attr_type (insn);
31749
31750 switch (type)
31751 {
31752 case TYPE_CR_LOGICAL:
31753 case TYPE_MFCR:
31754 case TYPE_MFCRF:
31755 case TYPE_MTCR:
31756 case TYPE_SYNC:
31757 case TYPE_ISYNC:
31758 case TYPE_LOAD_L:
31759 case TYPE_STORE_C:
31760 case TYPE_VECSTORE:
31761 case TYPE_MFJMPR:
31762 case TYPE_MTJMPR:
31763 return true;
31764 case TYPE_SHIFT:
31765 case TYPE_EXTS:
31766 case TYPE_MUL:
31767 if (get_attr_dot (insn) == DOT_YES)
31768 return true;
31769 else
31770 break;
31771 case TYPE_LOAD:
31772 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31773 || get_attr_update (insn) == UPDATE_YES)
31774 return true;
31775 else
31776 break;
31777 case TYPE_STORE:
31778 if (get_attr_update (insn) == UPDATE_YES
31779 && get_attr_indexed (insn) == INDEXED_YES)
31780 return true;
31781 else
31782 break;
31783 default:
31784 break;
31785 }
31786 break;
31787 default:
31788 break;
31789 }
31790
31791 return false;
31792 }
31793
31794 static bool
31795 insn_must_be_last_in_group (rtx_insn *insn)
31796 {
31797 enum attr_type type;
31798
31799 if (!insn
31800 || NOTE_P (insn)
31801 || DEBUG_INSN_P (insn)
31802 || GET_CODE (PATTERN (insn)) == USE
31803 || GET_CODE (PATTERN (insn)) == CLOBBER)
31804 return false;
31805
31806 switch (rs6000_tune) {
31807 case PROCESSOR_POWER4:
31808 case PROCESSOR_POWER5:
31809 if (is_microcoded_insn (insn))
31810 return true;
31811
31812 if (is_branch_slot_insn (insn))
31813 return true;
31814
31815 break;
31816 case PROCESSOR_POWER6:
31817 type = get_attr_type (insn);
31818
31819 switch (type)
31820 {
31821 case TYPE_EXTS:
31822 case TYPE_CNTLZ:
31823 case TYPE_TRAP:
31824 case TYPE_MUL:
31825 case TYPE_FPCOMPARE:
31826 case TYPE_MFCR:
31827 case TYPE_MTCR:
31828 case TYPE_MFJMPR:
31829 case TYPE_MTJMPR:
31830 case TYPE_ISYNC:
31831 case TYPE_SYNC:
31832 case TYPE_LOAD_L:
31833 case TYPE_STORE_C:
31834 return true;
31835 case TYPE_SHIFT:
31836 if (get_attr_dot (insn) == DOT_NO
31837 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31838 return true;
31839 else
31840 break;
31841 case TYPE_DIV:
31842 if (get_attr_size (insn) == SIZE_32)
31843 return true;
31844 else
31845 break;
31846 default:
31847 break;
31848 }
31849 break;
31850 case PROCESSOR_POWER7:
31851 type = get_attr_type (insn);
31852
31853 switch (type)
31854 {
31855 case TYPE_ISYNC:
31856 case TYPE_SYNC:
31857 case TYPE_LOAD_L:
31858 case TYPE_STORE_C:
31859 return true;
31860 case TYPE_LOAD:
31861 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31862 && get_attr_update (insn) == UPDATE_YES)
31863 return true;
31864 else
31865 break;
31866 case TYPE_STORE:
31867 if (get_attr_update (insn) == UPDATE_YES
31868 && get_attr_indexed (insn) == INDEXED_YES)
31869 return true;
31870 else
31871 break;
31872 default:
31873 break;
31874 }
31875 break;
31876 case PROCESSOR_POWER8:
31877 type = get_attr_type (insn);
31878
31879 switch (type)
31880 {
31881 case TYPE_MFCR:
31882 case TYPE_MTCR:
31883 case TYPE_ISYNC:
31884 case TYPE_SYNC:
31885 case TYPE_LOAD_L:
31886 case TYPE_STORE_C:
31887 return true;
31888 case TYPE_LOAD:
31889 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31890 && get_attr_update (insn) == UPDATE_YES)
31891 return true;
31892 else
31893 break;
31894 case TYPE_STORE:
31895 if (get_attr_update (insn) == UPDATE_YES
31896 && get_attr_indexed (insn) == INDEXED_YES)
31897 return true;
31898 else
31899 break;
31900 default:
31901 break;
31902 }
31903 break;
31904 default:
31905 break;
31906 }
31907
31908 return false;
31909 }
31910
31911 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31912 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31913
31914 static bool
31915 is_costly_group (rtx *group_insns, rtx next_insn)
31916 {
31917 int i;
31918 int issue_rate = rs6000_issue_rate ();
31919
31920 for (i = 0; i < issue_rate; i++)
31921 {
31922 sd_iterator_def sd_it;
31923 dep_t dep;
31924 rtx insn = group_insns[i];
31925
31926 if (!insn)
31927 continue;
31928
31929 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31930 {
31931 rtx next = DEP_CON (dep);
31932
31933 if (next == next_insn
31934 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31935 return true;
31936 }
31937 }
31938
31939 return false;
31940 }
31941
31942 /* Utility of the function redefine_groups.
31943 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31944 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31945 to keep it "far" (in a separate group) from GROUP_INSNS, following
31946 one of the following schemes, depending on the value of the flag
31947 -minsert_sched_nops = X:
31948 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31949 in order to force NEXT_INSN into a separate group.
31950 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31951 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31952 insertion (has a group just ended, how many vacant issue slots remain in the
31953 last group, and how many dispatch groups were encountered so far). */
31954
31955 static int
31956 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31957 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31958 int *group_count)
31959 {
31960 rtx nop;
31961 bool force;
31962 int issue_rate = rs6000_issue_rate ();
31963 bool end = *group_end;
31964 int i;
31965
31966 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31967 return can_issue_more;
31968
31969 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31970 return can_issue_more;
31971
31972 force = is_costly_group (group_insns, next_insn);
31973 if (!force)
31974 return can_issue_more;
31975
31976 if (sched_verbose > 6)
31977 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31978 *group_count ,can_issue_more);
31979
31980 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31981 {
31982 if (*group_end)
31983 can_issue_more = 0;
31984
31985 /* Since only a branch can be issued in the last issue_slot, it is
31986 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31987 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31988 in this case the last nop will start a new group and the branch
31989 will be forced to the new group. */
31990 if (can_issue_more && !is_branch_slot_insn (next_insn))
31991 can_issue_more--;
31992
31993 /* Do we have a special group ending nop? */
31994 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
31995 || rs6000_tune == PROCESSOR_POWER8)
31996 {
31997 nop = gen_group_ending_nop ();
31998 emit_insn_before (nop, next_insn);
31999 can_issue_more = 0;
32000 }
32001 else
32002 while (can_issue_more > 0)
32003 {
32004 nop = gen_nop ();
32005 emit_insn_before (nop, next_insn);
32006 can_issue_more--;
32007 }
32008
32009 *group_end = true;
32010 return 0;
32011 }
32012
32013 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32014 {
32015 int n_nops = rs6000_sched_insert_nops;
32016
32017 /* Nops can't be issued from the branch slot, so the effective
32018 issue_rate for nops is 'issue_rate - 1'. */
32019 if (can_issue_more == 0)
32020 can_issue_more = issue_rate;
32021 can_issue_more--;
32022 if (can_issue_more == 0)
32023 {
32024 can_issue_more = issue_rate - 1;
32025 (*group_count)++;
32026 end = true;
32027 for (i = 0; i < issue_rate; i++)
32028 {
32029 group_insns[i] = 0;
32030 }
32031 }
32032
32033 while (n_nops > 0)
32034 {
32035 nop = gen_nop ();
32036 emit_insn_before (nop, next_insn);
32037 if (can_issue_more == issue_rate - 1) /* new group begins */
32038 end = false;
32039 can_issue_more--;
32040 if (can_issue_more == 0)
32041 {
32042 can_issue_more = issue_rate - 1;
32043 (*group_count)++;
32044 end = true;
32045 for (i = 0; i < issue_rate; i++)
32046 {
32047 group_insns[i] = 0;
32048 }
32049 }
32050 n_nops--;
32051 }
32052
32053 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32054 can_issue_more++;
32055
32056 /* Is next_insn going to start a new group? */
32057 *group_end
32058 = (end
32059 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32060 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32061 || (can_issue_more < issue_rate &&
32062 insn_terminates_group_p (next_insn, previous_group)));
32063 if (*group_end && end)
32064 (*group_count)--;
32065
32066 if (sched_verbose > 6)
32067 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32068 *group_count, can_issue_more);
32069 return can_issue_more;
32070 }
32071
32072 return can_issue_more;
32073 }
32074
32075 /* This function tries to synch the dispatch groups that the compiler "sees"
32076 with the dispatch groups that the processor dispatcher is expected to
32077 form in practice. It tries to achieve this synchronization by forcing the
32078 estimated processor grouping on the compiler (as opposed to the function
32079 'pad_goups' which tries to force the scheduler's grouping on the processor).
32080
32081 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32082 examines the (estimated) dispatch groups that will be formed by the processor
32083 dispatcher. It marks these group boundaries to reflect the estimated
32084 processor grouping, overriding the grouping that the scheduler had marked.
32085 Depending on the value of the flag '-minsert-sched-nops' this function can
32086 force certain insns into separate groups or force a certain distance between
32087 them by inserting nops, for example, if there exists a "costly dependence"
32088 between the insns.
32089
32090 The function estimates the group boundaries that the processor will form as
32091 follows: It keeps track of how many vacant issue slots are available after
32092 each insn. A subsequent insn will start a new group if one of the following
32093 4 cases applies:
32094 - no more vacant issue slots remain in the current dispatch group.
32095 - only the last issue slot, which is the branch slot, is vacant, but the next
32096 insn is not a branch.
32097 - only the last 2 or less issue slots, including the branch slot, are vacant,
32098 which means that a cracked insn (which occupies two issue slots) can't be
32099 issued in this group.
32100 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32101 start a new group. */
32102
32103 static int
32104 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32105 rtx_insn *tail)
32106 {
32107 rtx_insn *insn, *next_insn;
32108 int issue_rate;
32109 int can_issue_more;
32110 int slot, i;
32111 bool group_end;
32112 int group_count = 0;
32113 rtx *group_insns;
32114
32115 /* Initialize. */
32116 issue_rate = rs6000_issue_rate ();
32117 group_insns = XALLOCAVEC (rtx, issue_rate);
32118 for (i = 0; i < issue_rate; i++)
32119 {
32120 group_insns[i] = 0;
32121 }
32122 can_issue_more = issue_rate;
32123 slot = 0;
32124 insn = get_next_active_insn (prev_head_insn, tail);
32125 group_end = false;
32126
32127 while (insn != NULL_RTX)
32128 {
32129 slot = (issue_rate - can_issue_more);
32130 group_insns[slot] = insn;
32131 can_issue_more =
32132 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32133 if (insn_terminates_group_p (insn, current_group))
32134 can_issue_more = 0;
32135
32136 next_insn = get_next_active_insn (insn, tail);
32137 if (next_insn == NULL_RTX)
32138 return group_count + 1;
32139
32140 /* Is next_insn going to start a new group? */
32141 group_end
32142 = (can_issue_more == 0
32143 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32144 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32145 || (can_issue_more < issue_rate &&
32146 insn_terminates_group_p (next_insn, previous_group)));
32147
32148 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32149 next_insn, &group_end, can_issue_more,
32150 &group_count);
32151
32152 if (group_end)
32153 {
32154 group_count++;
32155 can_issue_more = 0;
32156 for (i = 0; i < issue_rate; i++)
32157 {
32158 group_insns[i] = 0;
32159 }
32160 }
32161
32162 if (GET_MODE (next_insn) == TImode && can_issue_more)
32163 PUT_MODE (next_insn, VOIDmode);
32164 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32165 PUT_MODE (next_insn, TImode);
32166
32167 insn = next_insn;
32168 if (can_issue_more == 0)
32169 can_issue_more = issue_rate;
32170 } /* while */
32171
32172 return group_count;
32173 }
32174
32175 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32176 dispatch group boundaries that the scheduler had marked. Pad with nops
32177 any dispatch groups which have vacant issue slots, in order to force the
32178 scheduler's grouping on the processor dispatcher. The function
32179 returns the number of dispatch groups found. */
32180
32181 static int
32182 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32183 rtx_insn *tail)
32184 {
32185 rtx_insn *insn, *next_insn;
32186 rtx nop;
32187 int issue_rate;
32188 int can_issue_more;
32189 int group_end;
32190 int group_count = 0;
32191
32192 /* Initialize issue_rate. */
32193 issue_rate = rs6000_issue_rate ();
32194 can_issue_more = issue_rate;
32195
32196 insn = get_next_active_insn (prev_head_insn, tail);
32197 next_insn = get_next_active_insn (insn, tail);
32198
32199 while (insn != NULL_RTX)
32200 {
32201 can_issue_more =
32202 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32203
32204 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32205
32206 if (next_insn == NULL_RTX)
32207 break;
32208
32209 if (group_end)
32210 {
32211 /* If the scheduler had marked group termination at this location
32212 (between insn and next_insn), and neither insn nor next_insn will
32213 force group termination, pad the group with nops to force group
32214 termination. */
32215 if (can_issue_more
32216 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32217 && !insn_terminates_group_p (insn, current_group)
32218 && !insn_terminates_group_p (next_insn, previous_group))
32219 {
32220 if (!is_branch_slot_insn (next_insn))
32221 can_issue_more--;
32222
32223 while (can_issue_more)
32224 {
32225 nop = gen_nop ();
32226 emit_insn_before (nop, next_insn);
32227 can_issue_more--;
32228 }
32229 }
32230
32231 can_issue_more = issue_rate;
32232 group_count++;
32233 }
32234
32235 insn = next_insn;
32236 next_insn = get_next_active_insn (insn, tail);
32237 }
32238
32239 return group_count;
32240 }
32241
32242 /* We're beginning a new block. Initialize data structures as necessary. */
32243
32244 static void
32245 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32246 int sched_verbose ATTRIBUTE_UNUSED,
32247 int max_ready ATTRIBUTE_UNUSED)
32248 {
32249 last_scheduled_insn = NULL;
32250 load_store_pendulum = 0;
32251 divide_cnt = 0;
32252 vec_pairing = 0;
32253 }
32254
32255 /* The following function is called at the end of scheduling BB.
32256 After reload, it inserts nops at insn group bundling. */
32257
32258 static void
32259 rs6000_sched_finish (FILE *dump, int sched_verbose)
32260 {
32261 int n_groups;
32262
32263 if (sched_verbose)
32264 fprintf (dump, "=== Finishing schedule.\n");
32265
32266 if (reload_completed && rs6000_sched_groups)
32267 {
32268 /* Do not run sched_finish hook when selective scheduling enabled. */
32269 if (sel_sched_p ())
32270 return;
32271
32272 if (rs6000_sched_insert_nops == sched_finish_none)
32273 return;
32274
32275 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32276 n_groups = pad_groups (dump, sched_verbose,
32277 current_sched_info->prev_head,
32278 current_sched_info->next_tail);
32279 else
32280 n_groups = redefine_groups (dump, sched_verbose,
32281 current_sched_info->prev_head,
32282 current_sched_info->next_tail);
32283
32284 if (sched_verbose >= 6)
32285 {
32286 fprintf (dump, "ngroups = %d\n", n_groups);
32287 print_rtl (dump, current_sched_info->prev_head);
32288 fprintf (dump, "Done finish_sched\n");
32289 }
32290 }
32291 }
32292
32293 struct rs6000_sched_context
32294 {
32295 short cached_can_issue_more;
32296 rtx_insn *last_scheduled_insn;
32297 int load_store_pendulum;
32298 int divide_cnt;
32299 int vec_pairing;
32300 };
32301
32302 typedef struct rs6000_sched_context rs6000_sched_context_def;
32303 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32304
32305 /* Allocate store for new scheduling context. */
32306 static void *
32307 rs6000_alloc_sched_context (void)
32308 {
32309 return xmalloc (sizeof (rs6000_sched_context_def));
32310 }
32311
32312 /* If CLEAN_P is true then initializes _SC with clean data,
32313 and from the global context otherwise. */
32314 static void
32315 rs6000_init_sched_context (void *_sc, bool clean_p)
32316 {
32317 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32318
32319 if (clean_p)
32320 {
32321 sc->cached_can_issue_more = 0;
32322 sc->last_scheduled_insn = NULL;
32323 sc->load_store_pendulum = 0;
32324 sc->divide_cnt = 0;
32325 sc->vec_pairing = 0;
32326 }
32327 else
32328 {
32329 sc->cached_can_issue_more = cached_can_issue_more;
32330 sc->last_scheduled_insn = last_scheduled_insn;
32331 sc->load_store_pendulum = load_store_pendulum;
32332 sc->divide_cnt = divide_cnt;
32333 sc->vec_pairing = vec_pairing;
32334 }
32335 }
32336
32337 /* Sets the global scheduling context to the one pointed to by _SC. */
32338 static void
32339 rs6000_set_sched_context (void *_sc)
32340 {
32341 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32342
32343 gcc_assert (sc != NULL);
32344
32345 cached_can_issue_more = sc->cached_can_issue_more;
32346 last_scheduled_insn = sc->last_scheduled_insn;
32347 load_store_pendulum = sc->load_store_pendulum;
32348 divide_cnt = sc->divide_cnt;
32349 vec_pairing = sc->vec_pairing;
32350 }
32351
32352 /* Free _SC. */
32353 static void
32354 rs6000_free_sched_context (void *_sc)
32355 {
32356 gcc_assert (_sc != NULL);
32357
32358 free (_sc);
32359 }
32360
32361 static bool
32362 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32363 {
32364 switch (get_attr_type (insn))
32365 {
32366 case TYPE_DIV:
32367 case TYPE_SDIV:
32368 case TYPE_DDIV:
32369 case TYPE_VECDIV:
32370 case TYPE_SSQRT:
32371 case TYPE_DSQRT:
32372 return false;
32373
32374 default:
32375 return true;
32376 }
32377 }
32378 \f
32379 /* Length in units of the trampoline for entering a nested function. */
32380
32381 int
32382 rs6000_trampoline_size (void)
32383 {
32384 int ret = 0;
32385
32386 switch (DEFAULT_ABI)
32387 {
32388 default:
32389 gcc_unreachable ();
32390
32391 case ABI_AIX:
32392 ret = (TARGET_32BIT) ? 12 : 24;
32393 break;
32394
32395 case ABI_ELFv2:
32396 gcc_assert (!TARGET_32BIT);
32397 ret = 32;
32398 break;
32399
32400 case ABI_DARWIN:
32401 case ABI_V4:
32402 ret = (TARGET_32BIT) ? 40 : 48;
32403 break;
32404 }
32405
32406 return ret;
32407 }
32408
32409 /* Emit RTL insns to initialize the variable parts of a trampoline.
32410 FNADDR is an RTX for the address of the function's pure code.
32411 CXT is an RTX for the static chain value for the function. */
32412
32413 static void
32414 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32415 {
32416 int regsize = (TARGET_32BIT) ? 4 : 8;
32417 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32418 rtx ctx_reg = force_reg (Pmode, cxt);
32419 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32420
32421 switch (DEFAULT_ABI)
32422 {
32423 default:
32424 gcc_unreachable ();
32425
32426 /* Under AIX, just build the 3 word function descriptor */
32427 case ABI_AIX:
32428 {
32429 rtx fnmem, fn_reg, toc_reg;
32430
32431 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32432 error ("you cannot take the address of a nested function if you use "
32433 "the %qs option", "-mno-pointers-to-nested-functions");
32434
32435 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32436 fn_reg = gen_reg_rtx (Pmode);
32437 toc_reg = gen_reg_rtx (Pmode);
32438
32439 /* Macro to shorten the code expansions below. */
32440 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32441
32442 m_tramp = replace_equiv_address (m_tramp, addr);
32443
32444 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32445 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32446 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32447 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32448 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32449
32450 # undef MEM_PLUS
32451 }
32452 break;
32453
32454 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32455 case ABI_ELFv2:
32456 case ABI_DARWIN:
32457 case ABI_V4:
32458 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32459 LCT_NORMAL, VOIDmode,
32460 addr, Pmode,
32461 GEN_INT (rs6000_trampoline_size ()), SImode,
32462 fnaddr, Pmode,
32463 ctx_reg, Pmode);
32464 break;
32465 }
32466 }
32467
32468 \f
32469 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32470 identifier as an argument, so the front end shouldn't look it up. */
32471
32472 static bool
32473 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32474 {
32475 return is_attribute_p ("altivec", attr_id);
32476 }
32477
32478 /* Handle the "altivec" attribute. The attribute may have
32479 arguments as follows:
32480
32481 __attribute__((altivec(vector__)))
32482 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32483 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32484
32485 and may appear more than once (e.g., 'vector bool char') in a
32486 given declaration. */
32487
32488 static tree
32489 rs6000_handle_altivec_attribute (tree *node,
32490 tree name ATTRIBUTE_UNUSED,
32491 tree args,
32492 int flags ATTRIBUTE_UNUSED,
32493 bool *no_add_attrs)
32494 {
32495 tree type = *node, result = NULL_TREE;
32496 machine_mode mode;
32497 int unsigned_p;
32498 char altivec_type
32499 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32500 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32501 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32502 : '?');
32503
32504 while (POINTER_TYPE_P (type)
32505 || TREE_CODE (type) == FUNCTION_TYPE
32506 || TREE_CODE (type) == METHOD_TYPE
32507 || TREE_CODE (type) == ARRAY_TYPE)
32508 type = TREE_TYPE (type);
32509
32510 mode = TYPE_MODE (type);
32511
32512 /* Check for invalid AltiVec type qualifiers. */
32513 if (type == long_double_type_node)
32514 error ("use of %<long double%> in AltiVec types is invalid");
32515 else if (type == boolean_type_node)
32516 error ("use of boolean types in AltiVec types is invalid");
32517 else if (TREE_CODE (type) == COMPLEX_TYPE)
32518 error ("use of %<complex%> in AltiVec types is invalid");
32519 else if (DECIMAL_FLOAT_MODE_P (mode))
32520 error ("use of decimal floating point types in AltiVec types is invalid");
32521 else if (!TARGET_VSX)
32522 {
32523 if (type == long_unsigned_type_node || type == long_integer_type_node)
32524 {
32525 if (TARGET_64BIT)
32526 error ("use of %<long%> in AltiVec types is invalid for "
32527 "64-bit code without %qs", "-mvsx");
32528 else if (rs6000_warn_altivec_long)
32529 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32530 "use %<int%>");
32531 }
32532 else if (type == long_long_unsigned_type_node
32533 || type == long_long_integer_type_node)
32534 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32535 "-mvsx");
32536 else if (type == double_type_node)
32537 error ("use of %<double%> in AltiVec types is invalid without %qs",
32538 "-mvsx");
32539 }
32540
32541 switch (altivec_type)
32542 {
32543 case 'v':
32544 unsigned_p = TYPE_UNSIGNED (type);
32545 switch (mode)
32546 {
32547 case E_TImode:
32548 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32549 break;
32550 case E_DImode:
32551 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32552 break;
32553 case E_SImode:
32554 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32555 break;
32556 case E_HImode:
32557 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32558 break;
32559 case E_QImode:
32560 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32561 break;
32562 case E_SFmode: result = V4SF_type_node; break;
32563 case E_DFmode: result = V2DF_type_node; break;
32564 /* If the user says 'vector int bool', we may be handed the 'bool'
32565 attribute _before_ the 'vector' attribute, and so select the
32566 proper type in the 'b' case below. */
32567 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32568 case E_V2DImode: case E_V2DFmode:
32569 result = type;
32570 default: break;
32571 }
32572 break;
32573 case 'b':
32574 switch (mode)
32575 {
32576 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32577 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32578 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32579 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32580 default: break;
32581 }
32582 break;
32583 case 'p':
32584 switch (mode)
32585 {
32586 case E_V8HImode: result = pixel_V8HI_type_node;
32587 default: break;
32588 }
32589 default: break;
32590 }
32591
32592 /* Propagate qualifiers attached to the element type
32593 onto the vector type. */
32594 if (result && result != type && TYPE_QUALS (type))
32595 result = build_qualified_type (result, TYPE_QUALS (type));
32596
32597 *no_add_attrs = true; /* No need to hang on to the attribute. */
32598
32599 if (result)
32600 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32601
32602 return NULL_TREE;
32603 }
32604
32605 /* AltiVec defines five built-in scalar types that serve as vector
32606 elements; we must teach the compiler how to mangle them. The 128-bit
32607 floating point mangling is target-specific as well. */
32608
32609 static const char *
32610 rs6000_mangle_type (const_tree type)
32611 {
32612 type = TYPE_MAIN_VARIANT (type);
32613
32614 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32615 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32616 return NULL;
32617
32618 if (type == bool_char_type_node) return "U6__boolc";
32619 if (type == bool_short_type_node) return "U6__bools";
32620 if (type == pixel_type_node) return "u7__pixel";
32621 if (type == bool_int_type_node) return "U6__booli";
32622 if (type == bool_long_long_type_node) return "U6__boolx";
32623
32624 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32625 return "g";
32626 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32627 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32628
32629 /* For all other types, use the default mangling. */
32630 return NULL;
32631 }
32632
32633 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32634 struct attribute_spec.handler. */
32635
32636 static tree
32637 rs6000_handle_longcall_attribute (tree *node, tree name,
32638 tree args ATTRIBUTE_UNUSED,
32639 int flags ATTRIBUTE_UNUSED,
32640 bool *no_add_attrs)
32641 {
32642 if (TREE_CODE (*node) != FUNCTION_TYPE
32643 && TREE_CODE (*node) != FIELD_DECL
32644 && TREE_CODE (*node) != TYPE_DECL)
32645 {
32646 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32647 name);
32648 *no_add_attrs = true;
32649 }
32650
32651 return NULL_TREE;
32652 }
32653
32654 /* Set longcall attributes on all functions declared when
32655 rs6000_default_long_calls is true. */
32656 static void
32657 rs6000_set_default_type_attributes (tree type)
32658 {
32659 if (rs6000_default_long_calls
32660 && (TREE_CODE (type) == FUNCTION_TYPE
32661 || TREE_CODE (type) == METHOD_TYPE))
32662 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32663 NULL_TREE,
32664 TYPE_ATTRIBUTES (type));
32665
32666 #if TARGET_MACHO
32667 darwin_set_default_type_attributes (type);
32668 #endif
32669 }
32670
32671 /* Return a reference suitable for calling a function with the
32672 longcall attribute. */
32673
32674 static rtx
32675 rs6000_longcall_ref (rtx call_ref, rtx arg)
32676 {
32677 /* System V adds '.' to the internal name, so skip them. */
32678 const char *call_name = XSTR (call_ref, 0);
32679 if (*call_name == '.')
32680 {
32681 while (*call_name == '.')
32682 call_name++;
32683
32684 tree node = get_identifier (call_name);
32685 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32686 }
32687
32688 if (HAVE_AS_PLTSEQ
32689 && TARGET_TLS_MARKERS
32690 && (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4))
32691 {
32692 rtx base = const0_rtx;
32693 int regno;
32694 if (DEFAULT_ABI == ABI_ELFv2)
32695 {
32696 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32697 regno = 12;
32698 }
32699 else
32700 {
32701 if (flag_pic)
32702 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32703 regno = 11;
32704 }
32705 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32706 may be used by a function global entry point. For SysV4, r11
32707 is used by __glink_PLTresolve lazy resolver entry. */
32708 rtx reg = gen_rtx_REG (Pmode, regno);
32709 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32710 UNSPEC_PLT16_HA);
32711 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32712 UNSPEC_PLT16_LO);
32713 emit_insn (gen_rtx_SET (reg, hi));
32714 emit_insn (gen_rtx_SET (reg, lo));
32715 return reg;
32716 }
32717
32718 return force_reg (Pmode, call_ref);
32719 }
32720 \f
32721 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32722 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32723 #endif
32724
32725 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32726 struct attribute_spec.handler. */
32727 static tree
32728 rs6000_handle_struct_attribute (tree *node, tree name,
32729 tree args ATTRIBUTE_UNUSED,
32730 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32731 {
32732 tree *type = NULL;
32733 if (DECL_P (*node))
32734 {
32735 if (TREE_CODE (*node) == TYPE_DECL)
32736 type = &TREE_TYPE (*node);
32737 }
32738 else
32739 type = node;
32740
32741 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32742 || TREE_CODE (*type) == UNION_TYPE)))
32743 {
32744 warning (OPT_Wattributes, "%qE attribute ignored", name);
32745 *no_add_attrs = true;
32746 }
32747
32748 else if ((is_attribute_p ("ms_struct", name)
32749 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32750 || ((is_attribute_p ("gcc_struct", name)
32751 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32752 {
32753 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32754 name);
32755 *no_add_attrs = true;
32756 }
32757
32758 return NULL_TREE;
32759 }
32760
32761 static bool
32762 rs6000_ms_bitfield_layout_p (const_tree record_type)
32763 {
32764 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32765 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32766 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32767 }
32768 \f
32769 #ifdef USING_ELFOS_H
32770
32771 /* A get_unnamed_section callback, used for switching to toc_section. */
32772
32773 static void
32774 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32775 {
32776 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32777 && TARGET_MINIMAL_TOC)
32778 {
32779 if (!toc_initialized)
32780 {
32781 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32782 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32783 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32784 fprintf (asm_out_file, "\t.tc ");
32785 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32786 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32787 fprintf (asm_out_file, "\n");
32788
32789 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32790 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32791 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32792 fprintf (asm_out_file, " = .+32768\n");
32793 toc_initialized = 1;
32794 }
32795 else
32796 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32797 }
32798 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32799 {
32800 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32801 if (!toc_initialized)
32802 {
32803 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32804 toc_initialized = 1;
32805 }
32806 }
32807 else
32808 {
32809 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32810 if (!toc_initialized)
32811 {
32812 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32813 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32814 fprintf (asm_out_file, " = .+32768\n");
32815 toc_initialized = 1;
32816 }
32817 }
32818 }
32819
32820 /* Implement TARGET_ASM_INIT_SECTIONS. */
32821
32822 static void
32823 rs6000_elf_asm_init_sections (void)
32824 {
32825 toc_section
32826 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32827
32828 sdata2_section
32829 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32830 SDATA2_SECTION_ASM_OP);
32831 }
32832
32833 /* Implement TARGET_SELECT_RTX_SECTION. */
32834
32835 static section *
32836 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32837 unsigned HOST_WIDE_INT align)
32838 {
32839 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32840 return toc_section;
32841 else
32842 return default_elf_select_rtx_section (mode, x, align);
32843 }
32844 \f
32845 /* For a SYMBOL_REF, set generic flags and then perform some
32846 target-specific processing.
32847
32848 When the AIX ABI is requested on a non-AIX system, replace the
32849 function name with the real name (with a leading .) rather than the
32850 function descriptor name. This saves a lot of overriding code to
32851 read the prefixes. */
32852
32853 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32854 static void
32855 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32856 {
32857 default_encode_section_info (decl, rtl, first);
32858
32859 if (first
32860 && TREE_CODE (decl) == FUNCTION_DECL
32861 && !TARGET_AIX
32862 && DEFAULT_ABI == ABI_AIX)
32863 {
32864 rtx sym_ref = XEXP (rtl, 0);
32865 size_t len = strlen (XSTR (sym_ref, 0));
32866 char *str = XALLOCAVEC (char, len + 2);
32867 str[0] = '.';
32868 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32869 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32870 }
32871 }
32872
32873 static inline bool
32874 compare_section_name (const char *section, const char *templ)
32875 {
32876 int len;
32877
32878 len = strlen (templ);
32879 return (strncmp (section, templ, len) == 0
32880 && (section[len] == 0 || section[len] == '.'));
32881 }
32882
32883 bool
32884 rs6000_elf_in_small_data_p (const_tree decl)
32885 {
32886 if (rs6000_sdata == SDATA_NONE)
32887 return false;
32888
32889 /* We want to merge strings, so we never consider them small data. */
32890 if (TREE_CODE (decl) == STRING_CST)
32891 return false;
32892
32893 /* Functions are never in the small data area. */
32894 if (TREE_CODE (decl) == FUNCTION_DECL)
32895 return false;
32896
32897 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32898 {
32899 const char *section = DECL_SECTION_NAME (decl);
32900 if (compare_section_name (section, ".sdata")
32901 || compare_section_name (section, ".sdata2")
32902 || compare_section_name (section, ".gnu.linkonce.s")
32903 || compare_section_name (section, ".sbss")
32904 || compare_section_name (section, ".sbss2")
32905 || compare_section_name (section, ".gnu.linkonce.sb")
32906 || strcmp (section, ".PPC.EMB.sdata0") == 0
32907 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32908 return true;
32909 }
32910 else
32911 {
32912 /* If we are told not to put readonly data in sdata, then don't. */
32913 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32914 && !rs6000_readonly_in_sdata)
32915 return false;
32916
32917 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32918
32919 if (size > 0
32920 && size <= g_switch_value
32921 /* If it's not public, and we're not going to reference it there,
32922 there's no need to put it in the small data section. */
32923 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32924 return true;
32925 }
32926
32927 return false;
32928 }
32929
32930 #endif /* USING_ELFOS_H */
32931 \f
32932 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32933
32934 static bool
32935 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32936 {
32937 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32938 }
32939
32940 /* Do not place thread-local symbols refs in the object blocks. */
32941
32942 static bool
32943 rs6000_use_blocks_for_decl_p (const_tree decl)
32944 {
32945 return !DECL_THREAD_LOCAL_P (decl);
32946 }
32947 \f
32948 /* Return a REG that occurs in ADDR with coefficient 1.
32949 ADDR can be effectively incremented by incrementing REG.
32950
32951 r0 is special and we must not select it as an address
32952 register by this routine since our caller will try to
32953 increment the returned register via an "la" instruction. */
32954
32955 rtx
32956 find_addr_reg (rtx addr)
32957 {
32958 while (GET_CODE (addr) == PLUS)
32959 {
32960 if (GET_CODE (XEXP (addr, 0)) == REG
32961 && REGNO (XEXP (addr, 0)) != 0)
32962 addr = XEXP (addr, 0);
32963 else if (GET_CODE (XEXP (addr, 1)) == REG
32964 && REGNO (XEXP (addr, 1)) != 0)
32965 addr = XEXP (addr, 1);
32966 else if (CONSTANT_P (XEXP (addr, 0)))
32967 addr = XEXP (addr, 1);
32968 else if (CONSTANT_P (XEXP (addr, 1)))
32969 addr = XEXP (addr, 0);
32970 else
32971 gcc_unreachable ();
32972 }
32973 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32974 return addr;
32975 }
32976
32977 void
32978 rs6000_fatal_bad_address (rtx op)
32979 {
32980 fatal_insn ("bad address", op);
32981 }
32982
32983 #if TARGET_MACHO
32984
32985 typedef struct branch_island_d {
32986 tree function_name;
32987 tree label_name;
32988 int line_number;
32989 } branch_island;
32990
32991
32992 static vec<branch_island, va_gc> *branch_islands;
32993
32994 /* Remember to generate a branch island for far calls to the given
32995 function. */
32996
32997 static void
32998 add_compiler_branch_island (tree label_name, tree function_name,
32999 int line_number)
33000 {
33001 branch_island bi = {function_name, label_name, line_number};
33002 vec_safe_push (branch_islands, bi);
33003 }
33004
33005 /* Generate far-jump branch islands for everything recorded in
33006 branch_islands. Invoked immediately after the last instruction of
33007 the epilogue has been emitted; the branch islands must be appended
33008 to, and contiguous with, the function body. Mach-O stubs are
33009 generated in machopic_output_stub(). */
33010
33011 static void
33012 macho_branch_islands (void)
33013 {
33014 char tmp_buf[512];
33015
33016 while (!vec_safe_is_empty (branch_islands))
33017 {
33018 branch_island *bi = &branch_islands->last ();
33019 const char *label = IDENTIFIER_POINTER (bi->label_name);
33020 const char *name = IDENTIFIER_POINTER (bi->function_name);
33021 char name_buf[512];
33022 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33023 if (name[0] == '*' || name[0] == '&')
33024 strcpy (name_buf, name+1);
33025 else
33026 {
33027 name_buf[0] = '_';
33028 strcpy (name_buf+1, name);
33029 }
33030 strcpy (tmp_buf, "\n");
33031 strcat (tmp_buf, label);
33032 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33033 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33034 dbxout_stabd (N_SLINE, bi->line_number);
33035 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33036 if (flag_pic)
33037 {
33038 if (TARGET_LINK_STACK)
33039 {
33040 char name[32];
33041 get_ppc476_thunk_name (name);
33042 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33043 strcat (tmp_buf, name);
33044 strcat (tmp_buf, "\n");
33045 strcat (tmp_buf, label);
33046 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33047 }
33048 else
33049 {
33050 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33051 strcat (tmp_buf, label);
33052 strcat (tmp_buf, "_pic\n");
33053 strcat (tmp_buf, label);
33054 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33055 }
33056
33057 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33058 strcat (tmp_buf, name_buf);
33059 strcat (tmp_buf, " - ");
33060 strcat (tmp_buf, label);
33061 strcat (tmp_buf, "_pic)\n");
33062
33063 strcat (tmp_buf, "\tmtlr r0\n");
33064
33065 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33066 strcat (tmp_buf, name_buf);
33067 strcat (tmp_buf, " - ");
33068 strcat (tmp_buf, label);
33069 strcat (tmp_buf, "_pic)\n");
33070
33071 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33072 }
33073 else
33074 {
33075 strcat (tmp_buf, ":\nlis r12,hi16(");
33076 strcat (tmp_buf, name_buf);
33077 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33078 strcat (tmp_buf, name_buf);
33079 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33080 }
33081 output_asm_insn (tmp_buf, 0);
33082 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33083 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33084 dbxout_stabd (N_SLINE, bi->line_number);
33085 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33086 branch_islands->pop ();
33087 }
33088 }
33089
33090 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33091 already there or not. */
33092
33093 static int
33094 no_previous_def (tree function_name)
33095 {
33096 branch_island *bi;
33097 unsigned ix;
33098
33099 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33100 if (function_name == bi->function_name)
33101 return 0;
33102 return 1;
33103 }
33104
33105 /* GET_PREV_LABEL gets the label name from the previous definition of
33106 the function. */
33107
33108 static tree
33109 get_prev_label (tree function_name)
33110 {
33111 branch_island *bi;
33112 unsigned ix;
33113
33114 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33115 if (function_name == bi->function_name)
33116 return bi->label_name;
33117 return NULL_TREE;
33118 }
33119
33120 /* INSN is either a function call or a millicode call. It may have an
33121 unconditional jump in its delay slot.
33122
33123 CALL_DEST is the routine we are calling. */
33124
33125 char *
33126 macho_call_template (rtx_insn *insn, rtx *operands, int dest_operand_number,
33127 int cookie_operand_number)
33128 {
33129 static char buf[256];
33130 if (darwin_emit_branch_islands
33131 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33132 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33133 {
33134 tree labelname;
33135 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33136
33137 if (no_previous_def (funname))
33138 {
33139 rtx label_rtx = gen_label_rtx ();
33140 char *label_buf, temp_buf[256];
33141 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33142 CODE_LABEL_NUMBER (label_rtx));
33143 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33144 labelname = get_identifier (label_buf);
33145 add_compiler_branch_island (labelname, funname, insn_line (insn));
33146 }
33147 else
33148 labelname = get_prev_label (funname);
33149
33150 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33151 instruction will reach 'foo', otherwise link as 'bl L42'".
33152 "L42" should be a 'branch island', that will do a far jump to
33153 'foo'. Branch islands are generated in
33154 macho_branch_islands(). */
33155 sprintf (buf, "jbsr %%z%d,%.246s",
33156 dest_operand_number, IDENTIFIER_POINTER (labelname));
33157 }
33158 else
33159 sprintf (buf, "bl %%z%d", dest_operand_number);
33160 return buf;
33161 }
33162
33163 /* Generate PIC and indirect symbol stubs. */
33164
33165 void
33166 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33167 {
33168 unsigned int length;
33169 char *symbol_name, *lazy_ptr_name;
33170 char *local_label_0;
33171 static int label = 0;
33172
33173 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33174 symb = (*targetm.strip_name_encoding) (symb);
33175
33176
33177 length = strlen (symb);
33178 symbol_name = XALLOCAVEC (char, length + 32);
33179 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33180
33181 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33182 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33183
33184 if (flag_pic == 2)
33185 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33186 else
33187 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33188
33189 if (flag_pic == 2)
33190 {
33191 fprintf (file, "\t.align 5\n");
33192
33193 fprintf (file, "%s:\n", stub);
33194 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33195
33196 label++;
33197 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33198 sprintf (local_label_0, "\"L%011d$spb\"", label);
33199
33200 fprintf (file, "\tmflr r0\n");
33201 if (TARGET_LINK_STACK)
33202 {
33203 char name[32];
33204 get_ppc476_thunk_name (name);
33205 fprintf (file, "\tbl %s\n", name);
33206 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33207 }
33208 else
33209 {
33210 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33211 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33212 }
33213 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33214 lazy_ptr_name, local_label_0);
33215 fprintf (file, "\tmtlr r0\n");
33216 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33217 (TARGET_64BIT ? "ldu" : "lwzu"),
33218 lazy_ptr_name, local_label_0);
33219 fprintf (file, "\tmtctr r12\n");
33220 fprintf (file, "\tbctr\n");
33221 }
33222 else
33223 {
33224 fprintf (file, "\t.align 4\n");
33225
33226 fprintf (file, "%s:\n", stub);
33227 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33228
33229 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33230 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33231 (TARGET_64BIT ? "ldu" : "lwzu"),
33232 lazy_ptr_name);
33233 fprintf (file, "\tmtctr r12\n");
33234 fprintf (file, "\tbctr\n");
33235 }
33236
33237 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33238 fprintf (file, "%s:\n", lazy_ptr_name);
33239 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33240 fprintf (file, "%sdyld_stub_binding_helper\n",
33241 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33242 }
33243
33244 /* Legitimize PIC addresses. If the address is already
33245 position-independent, we return ORIG. Newly generated
33246 position-independent addresses go into a reg. This is REG if non
33247 zero, otherwise we allocate register(s) as necessary. */
33248
33249 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33250
33251 rtx
33252 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33253 rtx reg)
33254 {
33255 rtx base, offset;
33256
33257 if (reg == NULL && !reload_completed)
33258 reg = gen_reg_rtx (Pmode);
33259
33260 if (GET_CODE (orig) == CONST)
33261 {
33262 rtx reg_temp;
33263
33264 if (GET_CODE (XEXP (orig, 0)) == PLUS
33265 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33266 return orig;
33267
33268 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33269
33270 /* Use a different reg for the intermediate value, as
33271 it will be marked UNCHANGING. */
33272 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33273 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33274 Pmode, reg_temp);
33275 offset =
33276 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33277 Pmode, reg);
33278
33279 if (GET_CODE (offset) == CONST_INT)
33280 {
33281 if (SMALL_INT (offset))
33282 return plus_constant (Pmode, base, INTVAL (offset));
33283 else if (!reload_completed)
33284 offset = force_reg (Pmode, offset);
33285 else
33286 {
33287 rtx mem = force_const_mem (Pmode, orig);
33288 return machopic_legitimize_pic_address (mem, Pmode, reg);
33289 }
33290 }
33291 return gen_rtx_PLUS (Pmode, base, offset);
33292 }
33293
33294 /* Fall back on generic machopic code. */
33295 return machopic_legitimize_pic_address (orig, mode, reg);
33296 }
33297
33298 /* Output a .machine directive for the Darwin assembler, and call
33299 the generic start_file routine. */
33300
33301 static void
33302 rs6000_darwin_file_start (void)
33303 {
33304 static const struct
33305 {
33306 const char *arg;
33307 const char *name;
33308 HOST_WIDE_INT if_set;
33309 } mapping[] = {
33310 { "ppc64", "ppc64", MASK_64BIT },
33311 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33312 { "power4", "ppc970", 0 },
33313 { "G5", "ppc970", 0 },
33314 { "7450", "ppc7450", 0 },
33315 { "7400", "ppc7400", MASK_ALTIVEC },
33316 { "G4", "ppc7400", 0 },
33317 { "750", "ppc750", 0 },
33318 { "740", "ppc750", 0 },
33319 { "G3", "ppc750", 0 },
33320 { "604e", "ppc604e", 0 },
33321 { "604", "ppc604", 0 },
33322 { "603e", "ppc603", 0 },
33323 { "603", "ppc603", 0 },
33324 { "601", "ppc601", 0 },
33325 { NULL, "ppc", 0 } };
33326 const char *cpu_id = "";
33327 size_t i;
33328
33329 rs6000_file_start ();
33330 darwin_file_start ();
33331
33332 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33333
33334 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33335 cpu_id = rs6000_default_cpu;
33336
33337 if (global_options_set.x_rs6000_cpu_index)
33338 cpu_id = processor_target_table[rs6000_cpu_index].name;
33339
33340 /* Look through the mapping array. Pick the first name that either
33341 matches the argument, has a bit set in IF_SET that is also set
33342 in the target flags, or has a NULL name. */
33343
33344 i = 0;
33345 while (mapping[i].arg != NULL
33346 && strcmp (mapping[i].arg, cpu_id) != 0
33347 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33348 i++;
33349
33350 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33351 }
33352
33353 #endif /* TARGET_MACHO */
33354
33355 #if TARGET_ELF
33356 static int
33357 rs6000_elf_reloc_rw_mask (void)
33358 {
33359 if (flag_pic)
33360 return 3;
33361 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33362 return 2;
33363 else
33364 return 0;
33365 }
33366
33367 /* Record an element in the table of global constructors. SYMBOL is
33368 a SYMBOL_REF of the function to be called; PRIORITY is a number
33369 between 0 and MAX_INIT_PRIORITY.
33370
33371 This differs from default_named_section_asm_out_constructor in
33372 that we have special handling for -mrelocatable. */
33373
33374 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33375 static void
33376 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33377 {
33378 const char *section = ".ctors";
33379 char buf[18];
33380
33381 if (priority != DEFAULT_INIT_PRIORITY)
33382 {
33383 sprintf (buf, ".ctors.%.5u",
33384 /* Invert the numbering so the linker puts us in the proper
33385 order; constructors are run from right to left, and the
33386 linker sorts in increasing order. */
33387 MAX_INIT_PRIORITY - priority);
33388 section = buf;
33389 }
33390
33391 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33392 assemble_align (POINTER_SIZE);
33393
33394 if (DEFAULT_ABI == ABI_V4
33395 && (TARGET_RELOCATABLE || flag_pic > 1))
33396 {
33397 fputs ("\t.long (", asm_out_file);
33398 output_addr_const (asm_out_file, symbol);
33399 fputs (")@fixup\n", asm_out_file);
33400 }
33401 else
33402 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33403 }
33404
33405 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33406 static void
33407 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33408 {
33409 const char *section = ".dtors";
33410 char buf[18];
33411
33412 if (priority != DEFAULT_INIT_PRIORITY)
33413 {
33414 sprintf (buf, ".dtors.%.5u",
33415 /* Invert the numbering so the linker puts us in the proper
33416 order; constructors are run from right to left, and the
33417 linker sorts in increasing order. */
33418 MAX_INIT_PRIORITY - priority);
33419 section = buf;
33420 }
33421
33422 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33423 assemble_align (POINTER_SIZE);
33424
33425 if (DEFAULT_ABI == ABI_V4
33426 && (TARGET_RELOCATABLE || flag_pic > 1))
33427 {
33428 fputs ("\t.long (", asm_out_file);
33429 output_addr_const (asm_out_file, symbol);
33430 fputs (")@fixup\n", asm_out_file);
33431 }
33432 else
33433 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33434 }
33435
33436 void
33437 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33438 {
33439 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33440 {
33441 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33442 ASM_OUTPUT_LABEL (file, name);
33443 fputs (DOUBLE_INT_ASM_OP, file);
33444 rs6000_output_function_entry (file, name);
33445 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33446 if (DOT_SYMBOLS)
33447 {
33448 fputs ("\t.size\t", file);
33449 assemble_name (file, name);
33450 fputs (",24\n\t.type\t.", file);
33451 assemble_name (file, name);
33452 fputs (",@function\n", file);
33453 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33454 {
33455 fputs ("\t.globl\t.", file);
33456 assemble_name (file, name);
33457 putc ('\n', file);
33458 }
33459 }
33460 else
33461 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33462 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33463 rs6000_output_function_entry (file, name);
33464 fputs (":\n", file);
33465 return;
33466 }
33467
33468 int uses_toc;
33469 if (DEFAULT_ABI == ABI_V4
33470 && (TARGET_RELOCATABLE || flag_pic > 1)
33471 && !TARGET_SECURE_PLT
33472 && (!constant_pool_empty_p () || crtl->profile)
33473 && (uses_toc = uses_TOC ()))
33474 {
33475 char buf[256];
33476
33477 if (uses_toc == 2)
33478 switch_to_other_text_partition ();
33479 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33480
33481 fprintf (file, "\t.long ");
33482 assemble_name (file, toc_label_name);
33483 need_toc_init = 1;
33484 putc ('-', file);
33485 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33486 assemble_name (file, buf);
33487 putc ('\n', file);
33488 if (uses_toc == 2)
33489 switch_to_other_text_partition ();
33490 }
33491
33492 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33493 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33494
33495 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33496 {
33497 char buf[256];
33498
33499 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33500
33501 fprintf (file, "\t.quad .TOC.-");
33502 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33503 assemble_name (file, buf);
33504 putc ('\n', file);
33505 }
33506
33507 if (DEFAULT_ABI == ABI_AIX)
33508 {
33509 const char *desc_name, *orig_name;
33510
33511 orig_name = (*targetm.strip_name_encoding) (name);
33512 desc_name = orig_name;
33513 while (*desc_name == '.')
33514 desc_name++;
33515
33516 if (TREE_PUBLIC (decl))
33517 fprintf (file, "\t.globl %s\n", desc_name);
33518
33519 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33520 fprintf (file, "%s:\n", desc_name);
33521 fprintf (file, "\t.long %s\n", orig_name);
33522 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33523 fputs ("\t.long 0\n", file);
33524 fprintf (file, "\t.previous\n");
33525 }
33526 ASM_OUTPUT_LABEL (file, name);
33527 }
33528
33529 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33530 static void
33531 rs6000_elf_file_end (void)
33532 {
33533 #ifdef HAVE_AS_GNU_ATTRIBUTE
33534 /* ??? The value emitted depends on options active at file end.
33535 Assume anyone using #pragma or attributes that might change
33536 options knows what they are doing. */
33537 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33538 && rs6000_passes_float)
33539 {
33540 int fp;
33541
33542 if (TARGET_HARD_FLOAT)
33543 fp = 1;
33544 else
33545 fp = 2;
33546 if (rs6000_passes_long_double)
33547 {
33548 if (!TARGET_LONG_DOUBLE_128)
33549 fp |= 2 * 4;
33550 else if (TARGET_IEEEQUAD)
33551 fp |= 3 * 4;
33552 else
33553 fp |= 1 * 4;
33554 }
33555 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33556 }
33557 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33558 {
33559 if (rs6000_passes_vector)
33560 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33561 (TARGET_ALTIVEC_ABI ? 2 : 1));
33562 if (rs6000_returns_struct)
33563 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33564 aix_struct_return ? 2 : 1);
33565 }
33566 #endif
33567 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33568 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33569 file_end_indicate_exec_stack ();
33570 #endif
33571
33572 if (flag_split_stack)
33573 file_end_indicate_split_stack ();
33574
33575 if (cpu_builtin_p)
33576 {
33577 /* We have expanded a CPU builtin, so we need to emit a reference to
33578 the special symbol that LIBC uses to declare it supports the
33579 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33580 switch_to_section (data_section);
33581 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33582 fprintf (asm_out_file, "\t%s %s\n",
33583 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33584 }
33585 }
33586 #endif
33587
33588 #if TARGET_XCOFF
33589
33590 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33591 #define HAVE_XCOFF_DWARF_EXTRAS 0
33592 #endif
33593
33594 static enum unwind_info_type
33595 rs6000_xcoff_debug_unwind_info (void)
33596 {
33597 return UI_NONE;
33598 }
33599
33600 static void
33601 rs6000_xcoff_asm_output_anchor (rtx symbol)
33602 {
33603 char buffer[100];
33604
33605 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33606 SYMBOL_REF_BLOCK_OFFSET (symbol));
33607 fprintf (asm_out_file, "%s", SET_ASM_OP);
33608 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33609 fprintf (asm_out_file, ",");
33610 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33611 fprintf (asm_out_file, "\n");
33612 }
33613
33614 static void
33615 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33616 {
33617 fputs (GLOBAL_ASM_OP, stream);
33618 RS6000_OUTPUT_BASENAME (stream, name);
33619 putc ('\n', stream);
33620 }
33621
33622 /* A get_unnamed_decl callback, used for read-only sections. PTR
33623 points to the section string variable. */
33624
33625 static void
33626 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33627 {
33628 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33629 *(const char *const *) directive,
33630 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33631 }
33632
33633 /* Likewise for read-write sections. */
33634
33635 static void
33636 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33637 {
33638 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33639 *(const char *const *) directive,
33640 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33641 }
33642
33643 static void
33644 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33645 {
33646 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33647 *(const char *const *) directive,
33648 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33649 }
33650
33651 /* A get_unnamed_section callback, used for switching to toc_section. */
33652
33653 static void
33654 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33655 {
33656 if (TARGET_MINIMAL_TOC)
33657 {
33658 /* toc_section is always selected at least once from
33659 rs6000_xcoff_file_start, so this is guaranteed to
33660 always be defined once and only once in each file. */
33661 if (!toc_initialized)
33662 {
33663 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33664 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33665 toc_initialized = 1;
33666 }
33667 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33668 (TARGET_32BIT ? "" : ",3"));
33669 }
33670 else
33671 fputs ("\t.toc\n", asm_out_file);
33672 }
33673
33674 /* Implement TARGET_ASM_INIT_SECTIONS. */
33675
33676 static void
33677 rs6000_xcoff_asm_init_sections (void)
33678 {
33679 read_only_data_section
33680 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33681 &xcoff_read_only_section_name);
33682
33683 private_data_section
33684 = get_unnamed_section (SECTION_WRITE,
33685 rs6000_xcoff_output_readwrite_section_asm_op,
33686 &xcoff_private_data_section_name);
33687
33688 tls_data_section
33689 = get_unnamed_section (SECTION_TLS,
33690 rs6000_xcoff_output_tls_section_asm_op,
33691 &xcoff_tls_data_section_name);
33692
33693 tls_private_data_section
33694 = get_unnamed_section (SECTION_TLS,
33695 rs6000_xcoff_output_tls_section_asm_op,
33696 &xcoff_private_data_section_name);
33697
33698 read_only_private_data_section
33699 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33700 &xcoff_private_data_section_name);
33701
33702 toc_section
33703 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33704
33705 readonly_data_section = read_only_data_section;
33706 }
33707
33708 static int
33709 rs6000_xcoff_reloc_rw_mask (void)
33710 {
33711 return 3;
33712 }
33713
33714 static void
33715 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33716 tree decl ATTRIBUTE_UNUSED)
33717 {
33718 int smclass;
33719 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33720
33721 if (flags & SECTION_EXCLUDE)
33722 smclass = 4;
33723 else if (flags & SECTION_DEBUG)
33724 {
33725 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33726 return;
33727 }
33728 else if (flags & SECTION_CODE)
33729 smclass = 0;
33730 else if (flags & SECTION_TLS)
33731 smclass = 3;
33732 else if (flags & SECTION_WRITE)
33733 smclass = 2;
33734 else
33735 smclass = 1;
33736
33737 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33738 (flags & SECTION_CODE) ? "." : "",
33739 name, suffix[smclass], flags & SECTION_ENTSIZE);
33740 }
33741
33742 #define IN_NAMED_SECTION(DECL) \
33743 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33744 && DECL_SECTION_NAME (DECL) != NULL)
33745
33746 static section *
33747 rs6000_xcoff_select_section (tree decl, int reloc,
33748 unsigned HOST_WIDE_INT align)
33749 {
33750 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33751 named section. */
33752 if (align > BIGGEST_ALIGNMENT)
33753 {
33754 resolve_unique_section (decl, reloc, true);
33755 if (IN_NAMED_SECTION (decl))
33756 return get_named_section (decl, NULL, reloc);
33757 }
33758
33759 if (decl_readonly_section (decl, reloc))
33760 {
33761 if (TREE_PUBLIC (decl))
33762 return read_only_data_section;
33763 else
33764 return read_only_private_data_section;
33765 }
33766 else
33767 {
33768 #if HAVE_AS_TLS
33769 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33770 {
33771 if (TREE_PUBLIC (decl))
33772 return tls_data_section;
33773 else if (bss_initializer_p (decl))
33774 {
33775 /* Convert to COMMON to emit in BSS. */
33776 DECL_COMMON (decl) = 1;
33777 return tls_comm_section;
33778 }
33779 else
33780 return tls_private_data_section;
33781 }
33782 else
33783 #endif
33784 if (TREE_PUBLIC (decl))
33785 return data_section;
33786 else
33787 return private_data_section;
33788 }
33789 }
33790
33791 static void
33792 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33793 {
33794 const char *name;
33795
33796 /* Use select_section for private data and uninitialized data with
33797 alignment <= BIGGEST_ALIGNMENT. */
33798 if (!TREE_PUBLIC (decl)
33799 || DECL_COMMON (decl)
33800 || (DECL_INITIAL (decl) == NULL_TREE
33801 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33802 || DECL_INITIAL (decl) == error_mark_node
33803 || (flag_zero_initialized_in_bss
33804 && initializer_zerop (DECL_INITIAL (decl))))
33805 return;
33806
33807 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33808 name = (*targetm.strip_name_encoding) (name);
33809 set_decl_section_name (decl, name);
33810 }
33811
33812 /* Select section for constant in constant pool.
33813
33814 On RS/6000, all constants are in the private read-only data area.
33815 However, if this is being placed in the TOC it must be output as a
33816 toc entry. */
33817
33818 static section *
33819 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33820 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33821 {
33822 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33823 return toc_section;
33824 else
33825 return read_only_private_data_section;
33826 }
33827
33828 /* Remove any trailing [DS] or the like from the symbol name. */
33829
33830 static const char *
33831 rs6000_xcoff_strip_name_encoding (const char *name)
33832 {
33833 size_t len;
33834 if (*name == '*')
33835 name++;
33836 len = strlen (name);
33837 if (name[len - 1] == ']')
33838 return ggc_alloc_string (name, len - 4);
33839 else
33840 return name;
33841 }
33842
33843 /* Section attributes. AIX is always PIC. */
33844
33845 static unsigned int
33846 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33847 {
33848 unsigned int align;
33849 unsigned int flags = default_section_type_flags (decl, name, reloc);
33850
33851 /* Align to at least UNIT size. */
33852 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33853 align = MIN_UNITS_PER_WORD;
33854 else
33855 /* Increase alignment of large objects if not already stricter. */
33856 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33857 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33858 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33859
33860 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33861 }
33862
33863 /* Output at beginning of assembler file.
33864
33865 Initialize the section names for the RS/6000 at this point.
33866
33867 Specify filename, including full path, to assembler.
33868
33869 We want to go into the TOC section so at least one .toc will be emitted.
33870 Also, in order to output proper .bs/.es pairs, we need at least one static
33871 [RW] section emitted.
33872
33873 Finally, declare mcount when profiling to make the assembler happy. */
33874
33875 static void
33876 rs6000_xcoff_file_start (void)
33877 {
33878 rs6000_gen_section_name (&xcoff_bss_section_name,
33879 main_input_filename, ".bss_");
33880 rs6000_gen_section_name (&xcoff_private_data_section_name,
33881 main_input_filename, ".rw_");
33882 rs6000_gen_section_name (&xcoff_read_only_section_name,
33883 main_input_filename, ".ro_");
33884 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33885 main_input_filename, ".tls_");
33886 rs6000_gen_section_name (&xcoff_tbss_section_name,
33887 main_input_filename, ".tbss_[UL]");
33888
33889 fputs ("\t.file\t", asm_out_file);
33890 output_quoted_string (asm_out_file, main_input_filename);
33891 fputc ('\n', asm_out_file);
33892 if (write_symbols != NO_DEBUG)
33893 switch_to_section (private_data_section);
33894 switch_to_section (toc_section);
33895 switch_to_section (text_section);
33896 if (profile_flag)
33897 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33898 rs6000_file_start ();
33899 }
33900
33901 /* Output at end of assembler file.
33902 On the RS/6000, referencing data should automatically pull in text. */
33903
33904 static void
33905 rs6000_xcoff_file_end (void)
33906 {
33907 switch_to_section (text_section);
33908 fputs ("_section_.text:\n", asm_out_file);
33909 switch_to_section (data_section);
33910 fputs (TARGET_32BIT
33911 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33912 asm_out_file);
33913 }
33914
33915 struct declare_alias_data
33916 {
33917 FILE *file;
33918 bool function_descriptor;
33919 };
33920
33921 /* Declare alias N. A helper function for for_node_and_aliases. */
33922
33923 static bool
33924 rs6000_declare_alias (struct symtab_node *n, void *d)
33925 {
33926 struct declare_alias_data *data = (struct declare_alias_data *)d;
33927 /* Main symbol is output specially, because varasm machinery does part of
33928 the job for us - we do not need to declare .globl/lglobs and such. */
33929 if (!n->alias || n->weakref)
33930 return false;
33931
33932 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33933 return false;
33934
33935 /* Prevent assemble_alias from trying to use .set pseudo operation
33936 that does not behave as expected by the middle-end. */
33937 TREE_ASM_WRITTEN (n->decl) = true;
33938
33939 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33940 char *buffer = (char *) alloca (strlen (name) + 2);
33941 char *p;
33942 int dollar_inside = 0;
33943
33944 strcpy (buffer, name);
33945 p = strchr (buffer, '$');
33946 while (p) {
33947 *p = '_';
33948 dollar_inside++;
33949 p = strchr (p + 1, '$');
33950 }
33951 if (TREE_PUBLIC (n->decl))
33952 {
33953 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33954 {
33955 if (dollar_inside) {
33956 if (data->function_descriptor)
33957 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33958 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33959 }
33960 if (data->function_descriptor)
33961 {
33962 fputs ("\t.globl .", data->file);
33963 RS6000_OUTPUT_BASENAME (data->file, buffer);
33964 putc ('\n', data->file);
33965 }
33966 fputs ("\t.globl ", data->file);
33967 RS6000_OUTPUT_BASENAME (data->file, buffer);
33968 putc ('\n', data->file);
33969 }
33970 #ifdef ASM_WEAKEN_DECL
33971 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33972 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33973 #endif
33974 }
33975 else
33976 {
33977 if (dollar_inside)
33978 {
33979 if (data->function_descriptor)
33980 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33981 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33982 }
33983 if (data->function_descriptor)
33984 {
33985 fputs ("\t.lglobl .", data->file);
33986 RS6000_OUTPUT_BASENAME (data->file, buffer);
33987 putc ('\n', data->file);
33988 }
33989 fputs ("\t.lglobl ", data->file);
33990 RS6000_OUTPUT_BASENAME (data->file, buffer);
33991 putc ('\n', data->file);
33992 }
33993 if (data->function_descriptor)
33994 fputs (".", data->file);
33995 RS6000_OUTPUT_BASENAME (data->file, buffer);
33996 fputs (":\n", data->file);
33997 return false;
33998 }
33999
34000
34001 #ifdef HAVE_GAS_HIDDEN
34002 /* Helper function to calculate visibility of a DECL
34003 and return the value as a const string. */
34004
34005 static const char *
34006 rs6000_xcoff_visibility (tree decl)
34007 {
34008 static const char * const visibility_types[] = {
34009 "", ",protected", ",hidden", ",internal"
34010 };
34011
34012 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34013 return visibility_types[vis];
34014 }
34015 #endif
34016
34017
34018 /* This macro produces the initial definition of a function name.
34019 On the RS/6000, we need to place an extra '.' in the function name and
34020 output the function descriptor.
34021 Dollar signs are converted to underscores.
34022
34023 The csect for the function will have already been created when
34024 text_section was selected. We do have to go back to that csect, however.
34025
34026 The third and fourth parameters to the .function pseudo-op (16 and 044)
34027 are placeholders which no longer have any use.
34028
34029 Because AIX assembler's .set command has unexpected semantics, we output
34030 all aliases as alternative labels in front of the definition. */
34031
34032 void
34033 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34034 {
34035 char *buffer = (char *) alloca (strlen (name) + 1);
34036 char *p;
34037 int dollar_inside = 0;
34038 struct declare_alias_data data = {file, false};
34039
34040 strcpy (buffer, name);
34041 p = strchr (buffer, '$');
34042 while (p) {
34043 *p = '_';
34044 dollar_inside++;
34045 p = strchr (p + 1, '$');
34046 }
34047 if (TREE_PUBLIC (decl))
34048 {
34049 if (!RS6000_WEAK || !DECL_WEAK (decl))
34050 {
34051 if (dollar_inside) {
34052 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34053 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34054 }
34055 fputs ("\t.globl .", file);
34056 RS6000_OUTPUT_BASENAME (file, buffer);
34057 #ifdef HAVE_GAS_HIDDEN
34058 fputs (rs6000_xcoff_visibility (decl), file);
34059 #endif
34060 putc ('\n', file);
34061 }
34062 }
34063 else
34064 {
34065 if (dollar_inside) {
34066 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34067 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34068 }
34069 fputs ("\t.lglobl .", file);
34070 RS6000_OUTPUT_BASENAME (file, buffer);
34071 putc ('\n', file);
34072 }
34073 fputs ("\t.csect ", file);
34074 RS6000_OUTPUT_BASENAME (file, buffer);
34075 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34076 RS6000_OUTPUT_BASENAME (file, buffer);
34077 fputs (":\n", file);
34078 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34079 &data, true);
34080 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34081 RS6000_OUTPUT_BASENAME (file, buffer);
34082 fputs (", TOC[tc0], 0\n", file);
34083 in_section = NULL;
34084 switch_to_section (function_section (decl));
34085 putc ('.', file);
34086 RS6000_OUTPUT_BASENAME (file, buffer);
34087 fputs (":\n", file);
34088 data.function_descriptor = true;
34089 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34090 &data, true);
34091 if (!DECL_IGNORED_P (decl))
34092 {
34093 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34094 xcoffout_declare_function (file, decl, buffer);
34095 else if (write_symbols == DWARF2_DEBUG)
34096 {
34097 name = (*targetm.strip_name_encoding) (name);
34098 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34099 }
34100 }
34101 return;
34102 }
34103
34104
34105 /* Output assembly language to globalize a symbol from a DECL,
34106 possibly with visibility. */
34107
34108 void
34109 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34110 {
34111 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34112 fputs (GLOBAL_ASM_OP, stream);
34113 RS6000_OUTPUT_BASENAME (stream, name);
34114 #ifdef HAVE_GAS_HIDDEN
34115 fputs (rs6000_xcoff_visibility (decl), stream);
34116 #endif
34117 putc ('\n', stream);
34118 }
34119
34120 /* Output assembly language to define a symbol as COMMON from a DECL,
34121 possibly with visibility. */
34122
34123 void
34124 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34125 tree decl ATTRIBUTE_UNUSED,
34126 const char *name,
34127 unsigned HOST_WIDE_INT size,
34128 unsigned HOST_WIDE_INT align)
34129 {
34130 unsigned HOST_WIDE_INT align2 = 2;
34131
34132 if (align > 32)
34133 align2 = floor_log2 (align / BITS_PER_UNIT);
34134 else if (size > 4)
34135 align2 = 3;
34136
34137 fputs (COMMON_ASM_OP, stream);
34138 RS6000_OUTPUT_BASENAME (stream, name);
34139
34140 fprintf (stream,
34141 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34142 size, align2);
34143
34144 #ifdef HAVE_GAS_HIDDEN
34145 if (decl != NULL)
34146 fputs (rs6000_xcoff_visibility (decl), stream);
34147 #endif
34148 putc ('\n', stream);
34149 }
34150
34151 /* This macro produces the initial definition of a object (variable) name.
34152 Because AIX assembler's .set command has unexpected semantics, we output
34153 all aliases as alternative labels in front of the definition. */
34154
34155 void
34156 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34157 {
34158 struct declare_alias_data data = {file, false};
34159 RS6000_OUTPUT_BASENAME (file, name);
34160 fputs (":\n", file);
34161 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34162 &data, true);
34163 }
34164
34165 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34166
34167 void
34168 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34169 {
34170 fputs (integer_asm_op (size, FALSE), file);
34171 assemble_name (file, label);
34172 fputs ("-$", file);
34173 }
34174
34175 /* Output a symbol offset relative to the dbase for the current object.
34176 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34177 signed offsets.
34178
34179 __gcc_unwind_dbase is embedded in all executables/libraries through
34180 libgcc/config/rs6000/crtdbase.S. */
34181
34182 void
34183 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34184 {
34185 fputs (integer_asm_op (size, FALSE), file);
34186 assemble_name (file, label);
34187 fputs("-__gcc_unwind_dbase", file);
34188 }
34189
34190 #ifdef HAVE_AS_TLS
34191 static void
34192 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34193 {
34194 rtx symbol;
34195 int flags;
34196 const char *symname;
34197
34198 default_encode_section_info (decl, rtl, first);
34199
34200 /* Careful not to prod global register variables. */
34201 if (!MEM_P (rtl))
34202 return;
34203 symbol = XEXP (rtl, 0);
34204 if (GET_CODE (symbol) != SYMBOL_REF)
34205 return;
34206
34207 flags = SYMBOL_REF_FLAGS (symbol);
34208
34209 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34210 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34211
34212 SYMBOL_REF_FLAGS (symbol) = flags;
34213
34214 /* Append mapping class to extern decls. */
34215 symname = XSTR (symbol, 0);
34216 if (decl /* sync condition with assemble_external () */
34217 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34218 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34219 || TREE_CODE (decl) == FUNCTION_DECL)
34220 && symname[strlen (symname) - 1] != ']')
34221 {
34222 char *newname = (char *) alloca (strlen (symname) + 5);
34223 strcpy (newname, symname);
34224 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34225 ? "[DS]" : "[UA]"));
34226 XSTR (symbol, 0) = ggc_strdup (newname);
34227 }
34228 }
34229 #endif /* HAVE_AS_TLS */
34230 #endif /* TARGET_XCOFF */
34231
34232 void
34233 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34234 const char *name, const char *val)
34235 {
34236 fputs ("\t.weak\t", stream);
34237 RS6000_OUTPUT_BASENAME (stream, name);
34238 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34239 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34240 {
34241 if (TARGET_XCOFF)
34242 fputs ("[DS]", stream);
34243 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34244 if (TARGET_XCOFF)
34245 fputs (rs6000_xcoff_visibility (decl), stream);
34246 #endif
34247 fputs ("\n\t.weak\t.", stream);
34248 RS6000_OUTPUT_BASENAME (stream, name);
34249 }
34250 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34251 if (TARGET_XCOFF)
34252 fputs (rs6000_xcoff_visibility (decl), stream);
34253 #endif
34254 fputc ('\n', stream);
34255 if (val)
34256 {
34257 #ifdef ASM_OUTPUT_DEF
34258 ASM_OUTPUT_DEF (stream, name, val);
34259 #endif
34260 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34261 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34262 {
34263 fputs ("\t.set\t.", stream);
34264 RS6000_OUTPUT_BASENAME (stream, name);
34265 fputs (",.", stream);
34266 RS6000_OUTPUT_BASENAME (stream, val);
34267 fputc ('\n', stream);
34268 }
34269 }
34270 }
34271
34272
34273 /* Return true if INSN should not be copied. */
34274
34275 static bool
34276 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34277 {
34278 return recog_memoized (insn) >= 0
34279 && get_attr_cannot_copy (insn);
34280 }
34281
34282 /* Compute a (partial) cost for rtx X. Return true if the complete
34283 cost has been computed, and false if subexpressions should be
34284 scanned. In either case, *TOTAL contains the cost result. */
34285
34286 static bool
34287 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34288 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34289 {
34290 int code = GET_CODE (x);
34291
34292 switch (code)
34293 {
34294 /* On the RS/6000, if it is valid in the insn, it is free. */
34295 case CONST_INT:
34296 if (((outer_code == SET
34297 || outer_code == PLUS
34298 || outer_code == MINUS)
34299 && (satisfies_constraint_I (x)
34300 || satisfies_constraint_L (x)))
34301 || (outer_code == AND
34302 && (satisfies_constraint_K (x)
34303 || (mode == SImode
34304 ? satisfies_constraint_L (x)
34305 : satisfies_constraint_J (x))))
34306 || ((outer_code == IOR || outer_code == XOR)
34307 && (satisfies_constraint_K (x)
34308 || (mode == SImode
34309 ? satisfies_constraint_L (x)
34310 : satisfies_constraint_J (x))))
34311 || outer_code == ASHIFT
34312 || outer_code == ASHIFTRT
34313 || outer_code == LSHIFTRT
34314 || outer_code == ROTATE
34315 || outer_code == ROTATERT
34316 || outer_code == ZERO_EXTRACT
34317 || (outer_code == MULT
34318 && satisfies_constraint_I (x))
34319 || ((outer_code == DIV || outer_code == UDIV
34320 || outer_code == MOD || outer_code == UMOD)
34321 && exact_log2 (INTVAL (x)) >= 0)
34322 || (outer_code == COMPARE
34323 && (satisfies_constraint_I (x)
34324 || satisfies_constraint_K (x)))
34325 || ((outer_code == EQ || outer_code == NE)
34326 && (satisfies_constraint_I (x)
34327 || satisfies_constraint_K (x)
34328 || (mode == SImode
34329 ? satisfies_constraint_L (x)
34330 : satisfies_constraint_J (x))))
34331 || (outer_code == GTU
34332 && satisfies_constraint_I (x))
34333 || (outer_code == LTU
34334 && satisfies_constraint_P (x)))
34335 {
34336 *total = 0;
34337 return true;
34338 }
34339 else if ((outer_code == PLUS
34340 && reg_or_add_cint_operand (x, VOIDmode))
34341 || (outer_code == MINUS
34342 && reg_or_sub_cint_operand (x, VOIDmode))
34343 || ((outer_code == SET
34344 || outer_code == IOR
34345 || outer_code == XOR)
34346 && (INTVAL (x)
34347 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34348 {
34349 *total = COSTS_N_INSNS (1);
34350 return true;
34351 }
34352 /* FALLTHRU */
34353
34354 case CONST_DOUBLE:
34355 case CONST_WIDE_INT:
34356 case CONST:
34357 case HIGH:
34358 case SYMBOL_REF:
34359 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34360 return true;
34361
34362 case MEM:
34363 /* When optimizing for size, MEM should be slightly more expensive
34364 than generating address, e.g., (plus (reg) (const)).
34365 L1 cache latency is about two instructions. */
34366 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34367 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34368 *total += COSTS_N_INSNS (100);
34369 return true;
34370
34371 case LABEL_REF:
34372 *total = 0;
34373 return true;
34374
34375 case PLUS:
34376 case MINUS:
34377 if (FLOAT_MODE_P (mode))
34378 *total = rs6000_cost->fp;
34379 else
34380 *total = COSTS_N_INSNS (1);
34381 return false;
34382
34383 case MULT:
34384 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34385 && satisfies_constraint_I (XEXP (x, 1)))
34386 {
34387 if (INTVAL (XEXP (x, 1)) >= -256
34388 && INTVAL (XEXP (x, 1)) <= 255)
34389 *total = rs6000_cost->mulsi_const9;
34390 else
34391 *total = rs6000_cost->mulsi_const;
34392 }
34393 else if (mode == SFmode)
34394 *total = rs6000_cost->fp;
34395 else if (FLOAT_MODE_P (mode))
34396 *total = rs6000_cost->dmul;
34397 else if (mode == DImode)
34398 *total = rs6000_cost->muldi;
34399 else
34400 *total = rs6000_cost->mulsi;
34401 return false;
34402
34403 case FMA:
34404 if (mode == SFmode)
34405 *total = rs6000_cost->fp;
34406 else
34407 *total = rs6000_cost->dmul;
34408 break;
34409
34410 case DIV:
34411 case MOD:
34412 if (FLOAT_MODE_P (mode))
34413 {
34414 *total = mode == DFmode ? rs6000_cost->ddiv
34415 : rs6000_cost->sdiv;
34416 return false;
34417 }
34418 /* FALLTHRU */
34419
34420 case UDIV:
34421 case UMOD:
34422 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34423 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34424 {
34425 if (code == DIV || code == MOD)
34426 /* Shift, addze */
34427 *total = COSTS_N_INSNS (2);
34428 else
34429 /* Shift */
34430 *total = COSTS_N_INSNS (1);
34431 }
34432 else
34433 {
34434 if (GET_MODE (XEXP (x, 1)) == DImode)
34435 *total = rs6000_cost->divdi;
34436 else
34437 *total = rs6000_cost->divsi;
34438 }
34439 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34440 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34441 *total += COSTS_N_INSNS (2);
34442 return false;
34443
34444 case CTZ:
34445 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34446 return false;
34447
34448 case FFS:
34449 *total = COSTS_N_INSNS (4);
34450 return false;
34451
34452 case POPCOUNT:
34453 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34454 return false;
34455
34456 case PARITY:
34457 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34458 return false;
34459
34460 case NOT:
34461 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34462 *total = 0;
34463 else
34464 *total = COSTS_N_INSNS (1);
34465 return false;
34466
34467 case AND:
34468 if (CONST_INT_P (XEXP (x, 1)))
34469 {
34470 rtx left = XEXP (x, 0);
34471 rtx_code left_code = GET_CODE (left);
34472
34473 /* rotate-and-mask: 1 insn. */
34474 if ((left_code == ROTATE
34475 || left_code == ASHIFT
34476 || left_code == LSHIFTRT)
34477 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34478 {
34479 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34480 if (!CONST_INT_P (XEXP (left, 1)))
34481 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34482 *total += COSTS_N_INSNS (1);
34483 return true;
34484 }
34485
34486 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34487 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34488 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34489 || (val & 0xffff) == val
34490 || (val & 0xffff0000) == val
34491 || ((val & 0xffff) == 0 && mode == SImode))
34492 {
34493 *total = rtx_cost (left, mode, AND, 0, speed);
34494 *total += COSTS_N_INSNS (1);
34495 return true;
34496 }
34497
34498 /* 2 insns. */
34499 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34500 {
34501 *total = rtx_cost (left, mode, AND, 0, speed);
34502 *total += COSTS_N_INSNS (2);
34503 return true;
34504 }
34505 }
34506
34507 *total = COSTS_N_INSNS (1);
34508 return false;
34509
34510 case IOR:
34511 /* FIXME */
34512 *total = COSTS_N_INSNS (1);
34513 return true;
34514
34515 case CLZ:
34516 case XOR:
34517 case ZERO_EXTRACT:
34518 *total = COSTS_N_INSNS (1);
34519 return false;
34520
34521 case ASHIFT:
34522 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34523 the sign extend and shift separately within the insn. */
34524 if (TARGET_EXTSWSLI && mode == DImode
34525 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34526 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34527 {
34528 *total = 0;
34529 return false;
34530 }
34531 /* fall through */
34532
34533 case ASHIFTRT:
34534 case LSHIFTRT:
34535 case ROTATE:
34536 case ROTATERT:
34537 /* Handle mul_highpart. */
34538 if (outer_code == TRUNCATE
34539 && GET_CODE (XEXP (x, 0)) == MULT)
34540 {
34541 if (mode == DImode)
34542 *total = rs6000_cost->muldi;
34543 else
34544 *total = rs6000_cost->mulsi;
34545 return true;
34546 }
34547 else if (outer_code == AND)
34548 *total = 0;
34549 else
34550 *total = COSTS_N_INSNS (1);
34551 return false;
34552
34553 case SIGN_EXTEND:
34554 case ZERO_EXTEND:
34555 if (GET_CODE (XEXP (x, 0)) == MEM)
34556 *total = 0;
34557 else
34558 *total = COSTS_N_INSNS (1);
34559 return false;
34560
34561 case COMPARE:
34562 case NEG:
34563 case ABS:
34564 if (!FLOAT_MODE_P (mode))
34565 {
34566 *total = COSTS_N_INSNS (1);
34567 return false;
34568 }
34569 /* FALLTHRU */
34570
34571 case FLOAT:
34572 case UNSIGNED_FLOAT:
34573 case FIX:
34574 case UNSIGNED_FIX:
34575 case FLOAT_TRUNCATE:
34576 *total = rs6000_cost->fp;
34577 return false;
34578
34579 case FLOAT_EXTEND:
34580 if (mode == DFmode)
34581 *total = rs6000_cost->sfdf_convert;
34582 else
34583 *total = rs6000_cost->fp;
34584 return false;
34585
34586 case UNSPEC:
34587 switch (XINT (x, 1))
34588 {
34589 case UNSPEC_FRSP:
34590 *total = rs6000_cost->fp;
34591 return true;
34592
34593 default:
34594 break;
34595 }
34596 break;
34597
34598 case CALL:
34599 case IF_THEN_ELSE:
34600 if (!speed)
34601 {
34602 *total = COSTS_N_INSNS (1);
34603 return true;
34604 }
34605 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34606 {
34607 *total = rs6000_cost->fp;
34608 return false;
34609 }
34610 break;
34611
34612 case NE:
34613 case EQ:
34614 case GTU:
34615 case LTU:
34616 /* Carry bit requires mode == Pmode.
34617 NEG or PLUS already counted so only add one. */
34618 if (mode == Pmode
34619 && (outer_code == NEG || outer_code == PLUS))
34620 {
34621 *total = COSTS_N_INSNS (1);
34622 return true;
34623 }
34624 /* FALLTHRU */
34625
34626 case GT:
34627 case LT:
34628 case UNORDERED:
34629 if (outer_code == SET)
34630 {
34631 if (XEXP (x, 1) == const0_rtx)
34632 {
34633 *total = COSTS_N_INSNS (2);
34634 return true;
34635 }
34636 else
34637 {
34638 *total = COSTS_N_INSNS (3);
34639 return false;
34640 }
34641 }
34642 /* CC COMPARE. */
34643 if (outer_code == COMPARE)
34644 {
34645 *total = 0;
34646 return true;
34647 }
34648 break;
34649
34650 default:
34651 break;
34652 }
34653
34654 return false;
34655 }
34656
34657 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34658
34659 static bool
34660 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34661 int opno, int *total, bool speed)
34662 {
34663 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34664
34665 fprintf (stderr,
34666 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34667 "opno = %d, total = %d, speed = %s, x:\n",
34668 ret ? "complete" : "scan inner",
34669 GET_MODE_NAME (mode),
34670 GET_RTX_NAME (outer_code),
34671 opno,
34672 *total,
34673 speed ? "true" : "false");
34674
34675 debug_rtx (x);
34676
34677 return ret;
34678 }
34679
34680 static int
34681 rs6000_insn_cost (rtx_insn *insn, bool speed)
34682 {
34683 if (recog_memoized (insn) < 0)
34684 return 0;
34685
34686 if (!speed)
34687 return get_attr_length (insn);
34688
34689 int cost = get_attr_cost (insn);
34690 if (cost > 0)
34691 return cost;
34692
34693 int n = get_attr_length (insn) / 4;
34694 enum attr_type type = get_attr_type (insn);
34695
34696 switch (type)
34697 {
34698 case TYPE_LOAD:
34699 case TYPE_FPLOAD:
34700 case TYPE_VECLOAD:
34701 cost = COSTS_N_INSNS (n + 1);
34702 break;
34703
34704 case TYPE_MUL:
34705 switch (get_attr_size (insn))
34706 {
34707 case SIZE_8:
34708 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34709 break;
34710 case SIZE_16:
34711 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34712 break;
34713 case SIZE_32:
34714 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34715 break;
34716 case SIZE_64:
34717 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34718 break;
34719 default:
34720 gcc_unreachable ();
34721 }
34722 break;
34723 case TYPE_DIV:
34724 switch (get_attr_size (insn))
34725 {
34726 case SIZE_32:
34727 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34728 break;
34729 case SIZE_64:
34730 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34731 break;
34732 default:
34733 gcc_unreachable ();
34734 }
34735 break;
34736
34737 case TYPE_FP:
34738 cost = n * rs6000_cost->fp;
34739 break;
34740 case TYPE_DMUL:
34741 cost = n * rs6000_cost->dmul;
34742 break;
34743 case TYPE_SDIV:
34744 cost = n * rs6000_cost->sdiv;
34745 break;
34746 case TYPE_DDIV:
34747 cost = n * rs6000_cost->ddiv;
34748 break;
34749
34750 case TYPE_SYNC:
34751 case TYPE_LOAD_L:
34752 case TYPE_MFCR:
34753 case TYPE_MFCRF:
34754 cost = COSTS_N_INSNS (n + 2);
34755 break;
34756
34757 default:
34758 cost = COSTS_N_INSNS (n);
34759 }
34760
34761 return cost;
34762 }
34763
34764 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34765
34766 static int
34767 rs6000_debug_address_cost (rtx x, machine_mode mode,
34768 addr_space_t as, bool speed)
34769 {
34770 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34771
34772 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34773 ret, speed ? "true" : "false");
34774 debug_rtx (x);
34775
34776 return ret;
34777 }
34778
34779
34780 /* A C expression returning the cost of moving data from a register of class
34781 CLASS1 to one of CLASS2. */
34782
34783 static int
34784 rs6000_register_move_cost (machine_mode mode,
34785 reg_class_t from, reg_class_t to)
34786 {
34787 int ret;
34788
34789 if (TARGET_DEBUG_COST)
34790 dbg_cost_ctrl++;
34791
34792 /* Moves from/to GENERAL_REGS. */
34793 if (reg_classes_intersect_p (to, GENERAL_REGS)
34794 || reg_classes_intersect_p (from, GENERAL_REGS))
34795 {
34796 reg_class_t rclass = from;
34797
34798 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34799 rclass = to;
34800
34801 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34802 ret = (rs6000_memory_move_cost (mode, rclass, false)
34803 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34804
34805 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34806 shift. */
34807 else if (rclass == CR_REGS)
34808 ret = 4;
34809
34810 /* For those processors that have slow LR/CTR moves, make them more
34811 expensive than memory in order to bias spills to memory .*/
34812 else if ((rs6000_tune == PROCESSOR_POWER6
34813 || rs6000_tune == PROCESSOR_POWER7
34814 || rs6000_tune == PROCESSOR_POWER8
34815 || rs6000_tune == PROCESSOR_POWER9)
34816 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34817 ret = 6 * hard_regno_nregs (0, mode);
34818
34819 else
34820 /* A move will cost one instruction per GPR moved. */
34821 ret = 2 * hard_regno_nregs (0, mode);
34822 }
34823
34824 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34825 else if (VECTOR_MEM_VSX_P (mode)
34826 && reg_classes_intersect_p (to, VSX_REGS)
34827 && reg_classes_intersect_p (from, VSX_REGS))
34828 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34829
34830 /* Moving between two similar registers is just one instruction. */
34831 else if (reg_classes_intersect_p (to, from))
34832 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34833
34834 /* Everything else has to go through GENERAL_REGS. */
34835 else
34836 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34837 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34838
34839 if (TARGET_DEBUG_COST)
34840 {
34841 if (dbg_cost_ctrl == 1)
34842 fprintf (stderr,
34843 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34844 ret, GET_MODE_NAME (mode), reg_class_names[from],
34845 reg_class_names[to]);
34846 dbg_cost_ctrl--;
34847 }
34848
34849 return ret;
34850 }
34851
34852 /* A C expressions returning the cost of moving data of MODE from a register to
34853 or from memory. */
34854
34855 static int
34856 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34857 bool in ATTRIBUTE_UNUSED)
34858 {
34859 int ret;
34860
34861 if (TARGET_DEBUG_COST)
34862 dbg_cost_ctrl++;
34863
34864 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34865 ret = 4 * hard_regno_nregs (0, mode);
34866 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34867 || reg_classes_intersect_p (rclass, VSX_REGS)))
34868 ret = 4 * hard_regno_nregs (32, mode);
34869 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34870 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34871 else
34872 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34873
34874 if (TARGET_DEBUG_COST)
34875 {
34876 if (dbg_cost_ctrl == 1)
34877 fprintf (stderr,
34878 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34879 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34880 dbg_cost_ctrl--;
34881 }
34882
34883 return ret;
34884 }
34885
34886 /* Returns a code for a target-specific builtin that implements
34887 reciprocal of the function, or NULL_TREE if not available. */
34888
34889 static tree
34890 rs6000_builtin_reciprocal (tree fndecl)
34891 {
34892 switch (DECL_FUNCTION_CODE (fndecl))
34893 {
34894 case VSX_BUILTIN_XVSQRTDP:
34895 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34896 return NULL_TREE;
34897
34898 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34899
34900 case VSX_BUILTIN_XVSQRTSP:
34901 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34902 return NULL_TREE;
34903
34904 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34905
34906 default:
34907 return NULL_TREE;
34908 }
34909 }
34910
34911 /* Load up a constant. If the mode is a vector mode, splat the value across
34912 all of the vector elements. */
34913
34914 static rtx
34915 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34916 {
34917 rtx reg;
34918
34919 if (mode == SFmode || mode == DFmode)
34920 {
34921 rtx d = const_double_from_real_value (dconst, mode);
34922 reg = force_reg (mode, d);
34923 }
34924 else if (mode == V4SFmode)
34925 {
34926 rtx d = const_double_from_real_value (dconst, SFmode);
34927 rtvec v = gen_rtvec (4, d, d, d, d);
34928 reg = gen_reg_rtx (mode);
34929 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34930 }
34931 else if (mode == V2DFmode)
34932 {
34933 rtx d = const_double_from_real_value (dconst, DFmode);
34934 rtvec v = gen_rtvec (2, d, d);
34935 reg = gen_reg_rtx (mode);
34936 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34937 }
34938 else
34939 gcc_unreachable ();
34940
34941 return reg;
34942 }
34943
34944 /* Generate an FMA instruction. */
34945
34946 static void
34947 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34948 {
34949 machine_mode mode = GET_MODE (target);
34950 rtx dst;
34951
34952 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34953 gcc_assert (dst != NULL);
34954
34955 if (dst != target)
34956 emit_move_insn (target, dst);
34957 }
34958
34959 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34960
34961 static void
34962 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34963 {
34964 machine_mode mode = GET_MODE (dst);
34965 rtx r;
34966
34967 /* This is a tad more complicated, since the fnma_optab is for
34968 a different expression: fma(-m1, m2, a), which is the same
34969 thing except in the case of signed zeros.
34970
34971 Fortunately we know that if FMA is supported that FNMSUB is
34972 also supported in the ISA. Just expand it directly. */
34973
34974 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34975
34976 r = gen_rtx_NEG (mode, a);
34977 r = gen_rtx_FMA (mode, m1, m2, r);
34978 r = gen_rtx_NEG (mode, r);
34979 emit_insn (gen_rtx_SET (dst, r));
34980 }
34981
34982 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34983 add a reg_note saying that this was a division. Support both scalar and
34984 vector divide. Assumes no trapping math and finite arguments. */
34985
34986 void
34987 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34988 {
34989 machine_mode mode = GET_MODE (dst);
34990 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34991 int i;
34992
34993 /* Low precision estimates guarantee 5 bits of accuracy. High
34994 precision estimates guarantee 14 bits of accuracy. SFmode
34995 requires 23 bits of accuracy. DFmode requires 52 bits of
34996 accuracy. Each pass at least doubles the accuracy, leading
34997 to the following. */
34998 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34999 if (mode == DFmode || mode == V2DFmode)
35000 passes++;
35001
35002 enum insn_code code = optab_handler (smul_optab, mode);
35003 insn_gen_fn gen_mul = GEN_FCN (code);
35004
35005 gcc_assert (code != CODE_FOR_nothing);
35006
35007 one = rs6000_load_constant_and_splat (mode, dconst1);
35008
35009 /* x0 = 1./d estimate */
35010 x0 = gen_reg_rtx (mode);
35011 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35012 UNSPEC_FRES)));
35013
35014 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35015 if (passes > 1) {
35016
35017 /* e0 = 1. - d * x0 */
35018 e0 = gen_reg_rtx (mode);
35019 rs6000_emit_nmsub (e0, d, x0, one);
35020
35021 /* x1 = x0 + e0 * x0 */
35022 x1 = gen_reg_rtx (mode);
35023 rs6000_emit_madd (x1, e0, x0, x0);
35024
35025 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35026 ++i, xprev = xnext, eprev = enext) {
35027
35028 /* enext = eprev * eprev */
35029 enext = gen_reg_rtx (mode);
35030 emit_insn (gen_mul (enext, eprev, eprev));
35031
35032 /* xnext = xprev + enext * xprev */
35033 xnext = gen_reg_rtx (mode);
35034 rs6000_emit_madd (xnext, enext, xprev, xprev);
35035 }
35036
35037 } else
35038 xprev = x0;
35039
35040 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35041
35042 /* u = n * xprev */
35043 u = gen_reg_rtx (mode);
35044 emit_insn (gen_mul (u, n, xprev));
35045
35046 /* v = n - (d * u) */
35047 v = gen_reg_rtx (mode);
35048 rs6000_emit_nmsub (v, d, u, n);
35049
35050 /* dst = (v * xprev) + u */
35051 rs6000_emit_madd (dst, v, xprev, u);
35052
35053 if (note_p)
35054 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35055 }
35056
35057 /* Goldschmidt's Algorithm for single/double-precision floating point
35058 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35059
35060 void
35061 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35062 {
35063 machine_mode mode = GET_MODE (src);
35064 rtx e = gen_reg_rtx (mode);
35065 rtx g = gen_reg_rtx (mode);
35066 rtx h = gen_reg_rtx (mode);
35067
35068 /* Low precision estimates guarantee 5 bits of accuracy. High
35069 precision estimates guarantee 14 bits of accuracy. SFmode
35070 requires 23 bits of accuracy. DFmode requires 52 bits of
35071 accuracy. Each pass at least doubles the accuracy, leading
35072 to the following. */
35073 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35074 if (mode == DFmode || mode == V2DFmode)
35075 passes++;
35076
35077 int i;
35078 rtx mhalf;
35079 enum insn_code code = optab_handler (smul_optab, mode);
35080 insn_gen_fn gen_mul = GEN_FCN (code);
35081
35082 gcc_assert (code != CODE_FOR_nothing);
35083
35084 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35085
35086 /* e = rsqrt estimate */
35087 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35088 UNSPEC_RSQRT)));
35089
35090 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35091 if (!recip)
35092 {
35093 rtx zero = force_reg (mode, CONST0_RTX (mode));
35094
35095 if (mode == SFmode)
35096 {
35097 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35098 e, zero, mode, 0);
35099 if (target != e)
35100 emit_move_insn (e, target);
35101 }
35102 else
35103 {
35104 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35105 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35106 }
35107 }
35108
35109 /* g = sqrt estimate. */
35110 emit_insn (gen_mul (g, e, src));
35111 /* h = 1/(2*sqrt) estimate. */
35112 emit_insn (gen_mul (h, e, mhalf));
35113
35114 if (recip)
35115 {
35116 if (passes == 1)
35117 {
35118 rtx t = gen_reg_rtx (mode);
35119 rs6000_emit_nmsub (t, g, h, mhalf);
35120 /* Apply correction directly to 1/rsqrt estimate. */
35121 rs6000_emit_madd (dst, e, t, e);
35122 }
35123 else
35124 {
35125 for (i = 0; i < passes; i++)
35126 {
35127 rtx t1 = gen_reg_rtx (mode);
35128 rtx g1 = gen_reg_rtx (mode);
35129 rtx h1 = gen_reg_rtx (mode);
35130
35131 rs6000_emit_nmsub (t1, g, h, mhalf);
35132 rs6000_emit_madd (g1, g, t1, g);
35133 rs6000_emit_madd (h1, h, t1, h);
35134
35135 g = g1;
35136 h = h1;
35137 }
35138 /* Multiply by 2 for 1/rsqrt. */
35139 emit_insn (gen_add3_insn (dst, h, h));
35140 }
35141 }
35142 else
35143 {
35144 rtx t = gen_reg_rtx (mode);
35145 rs6000_emit_nmsub (t, g, h, mhalf);
35146 rs6000_emit_madd (dst, g, t, g);
35147 }
35148
35149 return;
35150 }
35151
35152 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35153 (Power7) targets. DST is the target, and SRC is the argument operand. */
35154
35155 void
35156 rs6000_emit_popcount (rtx dst, rtx src)
35157 {
35158 machine_mode mode = GET_MODE (dst);
35159 rtx tmp1, tmp2;
35160
35161 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35162 if (TARGET_POPCNTD)
35163 {
35164 if (mode == SImode)
35165 emit_insn (gen_popcntdsi2 (dst, src));
35166 else
35167 emit_insn (gen_popcntddi2 (dst, src));
35168 return;
35169 }
35170
35171 tmp1 = gen_reg_rtx (mode);
35172
35173 if (mode == SImode)
35174 {
35175 emit_insn (gen_popcntbsi2 (tmp1, src));
35176 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35177 NULL_RTX, 0);
35178 tmp2 = force_reg (SImode, tmp2);
35179 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35180 }
35181 else
35182 {
35183 emit_insn (gen_popcntbdi2 (tmp1, src));
35184 tmp2 = expand_mult (DImode, tmp1,
35185 GEN_INT ((HOST_WIDE_INT)
35186 0x01010101 << 32 | 0x01010101),
35187 NULL_RTX, 0);
35188 tmp2 = force_reg (DImode, tmp2);
35189 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35190 }
35191 }
35192
35193
35194 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35195 target, and SRC is the argument operand. */
35196
35197 void
35198 rs6000_emit_parity (rtx dst, rtx src)
35199 {
35200 machine_mode mode = GET_MODE (dst);
35201 rtx tmp;
35202
35203 tmp = gen_reg_rtx (mode);
35204
35205 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35206 if (TARGET_CMPB)
35207 {
35208 if (mode == SImode)
35209 {
35210 emit_insn (gen_popcntbsi2 (tmp, src));
35211 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35212 }
35213 else
35214 {
35215 emit_insn (gen_popcntbdi2 (tmp, src));
35216 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35217 }
35218 return;
35219 }
35220
35221 if (mode == SImode)
35222 {
35223 /* Is mult+shift >= shift+xor+shift+xor? */
35224 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35225 {
35226 rtx tmp1, tmp2, tmp3, tmp4;
35227
35228 tmp1 = gen_reg_rtx (SImode);
35229 emit_insn (gen_popcntbsi2 (tmp1, src));
35230
35231 tmp2 = gen_reg_rtx (SImode);
35232 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35233 tmp3 = gen_reg_rtx (SImode);
35234 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35235
35236 tmp4 = gen_reg_rtx (SImode);
35237 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35238 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35239 }
35240 else
35241 rs6000_emit_popcount (tmp, src);
35242 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35243 }
35244 else
35245 {
35246 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35247 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35248 {
35249 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35250
35251 tmp1 = gen_reg_rtx (DImode);
35252 emit_insn (gen_popcntbdi2 (tmp1, src));
35253
35254 tmp2 = gen_reg_rtx (DImode);
35255 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35256 tmp3 = gen_reg_rtx (DImode);
35257 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35258
35259 tmp4 = gen_reg_rtx (DImode);
35260 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35261 tmp5 = gen_reg_rtx (DImode);
35262 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35263
35264 tmp6 = gen_reg_rtx (DImode);
35265 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35266 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35267 }
35268 else
35269 rs6000_emit_popcount (tmp, src);
35270 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35271 }
35272 }
35273
35274 /* Expand an Altivec constant permutation for little endian mode.
35275 OP0 and OP1 are the input vectors and TARGET is the output vector.
35276 SEL specifies the constant permutation vector.
35277
35278 There are two issues: First, the two input operands must be
35279 swapped so that together they form a double-wide array in LE
35280 order. Second, the vperm instruction has surprising behavior
35281 in LE mode: it interprets the elements of the source vectors
35282 in BE mode ("left to right") and interprets the elements of
35283 the destination vector in LE mode ("right to left"). To
35284 correct for this, we must subtract each element of the permute
35285 control vector from 31.
35286
35287 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35288 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35289 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35290 serve as the permute control vector. Then, in BE mode,
35291
35292 vperm 9,10,11,12
35293
35294 places the desired result in vr9. However, in LE mode the
35295 vector contents will be
35296
35297 vr10 = 00000003 00000002 00000001 00000000
35298 vr11 = 00000007 00000006 00000005 00000004
35299
35300 The result of the vperm using the same permute control vector is
35301
35302 vr9 = 05000000 07000000 01000000 03000000
35303
35304 That is, the leftmost 4 bytes of vr10 are interpreted as the
35305 source for the rightmost 4 bytes of vr9, and so on.
35306
35307 If we change the permute control vector to
35308
35309 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35310
35311 and issue
35312
35313 vperm 9,11,10,12
35314
35315 we get the desired
35316
35317 vr9 = 00000006 00000004 00000002 00000000. */
35318
35319 static void
35320 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35321 const vec_perm_indices &sel)
35322 {
35323 unsigned int i;
35324 rtx perm[16];
35325 rtx constv, unspec;
35326
35327 /* Unpack and adjust the constant selector. */
35328 for (i = 0; i < 16; ++i)
35329 {
35330 unsigned int elt = 31 - (sel[i] & 31);
35331 perm[i] = GEN_INT (elt);
35332 }
35333
35334 /* Expand to a permute, swapping the inputs and using the
35335 adjusted selector. */
35336 if (!REG_P (op0))
35337 op0 = force_reg (V16QImode, op0);
35338 if (!REG_P (op1))
35339 op1 = force_reg (V16QImode, op1);
35340
35341 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35342 constv = force_reg (V16QImode, constv);
35343 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35344 UNSPEC_VPERM);
35345 if (!REG_P (target))
35346 {
35347 rtx tmp = gen_reg_rtx (V16QImode);
35348 emit_move_insn (tmp, unspec);
35349 unspec = tmp;
35350 }
35351
35352 emit_move_insn (target, unspec);
35353 }
35354
35355 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35356 permute control vector. But here it's not a constant, so we must
35357 generate a vector NAND or NOR to do the adjustment. */
35358
35359 void
35360 altivec_expand_vec_perm_le (rtx operands[4])
35361 {
35362 rtx notx, iorx, unspec;
35363 rtx target = operands[0];
35364 rtx op0 = operands[1];
35365 rtx op1 = operands[2];
35366 rtx sel = operands[3];
35367 rtx tmp = target;
35368 rtx norreg = gen_reg_rtx (V16QImode);
35369 machine_mode mode = GET_MODE (target);
35370
35371 /* Get everything in regs so the pattern matches. */
35372 if (!REG_P (op0))
35373 op0 = force_reg (mode, op0);
35374 if (!REG_P (op1))
35375 op1 = force_reg (mode, op1);
35376 if (!REG_P (sel))
35377 sel = force_reg (V16QImode, sel);
35378 if (!REG_P (target))
35379 tmp = gen_reg_rtx (mode);
35380
35381 if (TARGET_P9_VECTOR)
35382 {
35383 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35384 UNSPEC_VPERMR);
35385 }
35386 else
35387 {
35388 /* Invert the selector with a VNAND if available, else a VNOR.
35389 The VNAND is preferred for future fusion opportunities. */
35390 notx = gen_rtx_NOT (V16QImode, sel);
35391 iorx = (TARGET_P8_VECTOR
35392 ? gen_rtx_IOR (V16QImode, notx, notx)
35393 : gen_rtx_AND (V16QImode, notx, notx));
35394 emit_insn (gen_rtx_SET (norreg, iorx));
35395
35396 /* Permute with operands reversed and adjusted selector. */
35397 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35398 UNSPEC_VPERM);
35399 }
35400
35401 /* Copy into target, possibly by way of a register. */
35402 if (!REG_P (target))
35403 {
35404 emit_move_insn (tmp, unspec);
35405 unspec = tmp;
35406 }
35407
35408 emit_move_insn (target, unspec);
35409 }
35410
35411 /* Expand an Altivec constant permutation. Return true if we match
35412 an efficient implementation; false to fall back to VPERM.
35413
35414 OP0 and OP1 are the input vectors and TARGET is the output vector.
35415 SEL specifies the constant permutation vector. */
35416
35417 static bool
35418 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35419 const vec_perm_indices &sel)
35420 {
35421 struct altivec_perm_insn {
35422 HOST_WIDE_INT mask;
35423 enum insn_code impl;
35424 unsigned char perm[16];
35425 };
35426 static const struct altivec_perm_insn patterns[] = {
35427 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35428 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35429 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35430 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35431 { OPTION_MASK_ALTIVEC,
35432 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35433 : CODE_FOR_altivec_vmrglb_direct),
35434 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35435 { OPTION_MASK_ALTIVEC,
35436 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35437 : CODE_FOR_altivec_vmrglh_direct),
35438 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35439 { OPTION_MASK_ALTIVEC,
35440 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35441 : CODE_FOR_altivec_vmrglw_direct),
35442 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35443 { OPTION_MASK_ALTIVEC,
35444 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35445 : CODE_FOR_altivec_vmrghb_direct),
35446 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35447 { OPTION_MASK_ALTIVEC,
35448 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35449 : CODE_FOR_altivec_vmrghh_direct),
35450 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35451 { OPTION_MASK_ALTIVEC,
35452 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35453 : CODE_FOR_altivec_vmrghw_direct),
35454 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35455 { OPTION_MASK_P8_VECTOR,
35456 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35457 : CODE_FOR_p8_vmrgow_v4sf_direct),
35458 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35459 { OPTION_MASK_P8_VECTOR,
35460 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35461 : CODE_FOR_p8_vmrgew_v4sf_direct),
35462 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35463 };
35464
35465 unsigned int i, j, elt, which;
35466 unsigned char perm[16];
35467 rtx x;
35468 bool one_vec;
35469
35470 /* Unpack the constant selector. */
35471 for (i = which = 0; i < 16; ++i)
35472 {
35473 elt = sel[i] & 31;
35474 which |= (elt < 16 ? 1 : 2);
35475 perm[i] = elt;
35476 }
35477
35478 /* Simplify the constant selector based on operands. */
35479 switch (which)
35480 {
35481 default:
35482 gcc_unreachable ();
35483
35484 case 3:
35485 one_vec = false;
35486 if (!rtx_equal_p (op0, op1))
35487 break;
35488 /* FALLTHRU */
35489
35490 case 2:
35491 for (i = 0; i < 16; ++i)
35492 perm[i] &= 15;
35493 op0 = op1;
35494 one_vec = true;
35495 break;
35496
35497 case 1:
35498 op1 = op0;
35499 one_vec = true;
35500 break;
35501 }
35502
35503 /* Look for splat patterns. */
35504 if (one_vec)
35505 {
35506 elt = perm[0];
35507
35508 for (i = 0; i < 16; ++i)
35509 if (perm[i] != elt)
35510 break;
35511 if (i == 16)
35512 {
35513 if (!BYTES_BIG_ENDIAN)
35514 elt = 15 - elt;
35515 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35516 return true;
35517 }
35518
35519 if (elt % 2 == 0)
35520 {
35521 for (i = 0; i < 16; i += 2)
35522 if (perm[i] != elt || perm[i + 1] != elt + 1)
35523 break;
35524 if (i == 16)
35525 {
35526 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35527 x = gen_reg_rtx (V8HImode);
35528 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35529 GEN_INT (field)));
35530 emit_move_insn (target, gen_lowpart (V16QImode, x));
35531 return true;
35532 }
35533 }
35534
35535 if (elt % 4 == 0)
35536 {
35537 for (i = 0; i < 16; i += 4)
35538 if (perm[i] != elt
35539 || perm[i + 1] != elt + 1
35540 || perm[i + 2] != elt + 2
35541 || perm[i + 3] != elt + 3)
35542 break;
35543 if (i == 16)
35544 {
35545 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35546 x = gen_reg_rtx (V4SImode);
35547 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35548 GEN_INT (field)));
35549 emit_move_insn (target, gen_lowpart (V16QImode, x));
35550 return true;
35551 }
35552 }
35553 }
35554
35555 /* Look for merge and pack patterns. */
35556 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35557 {
35558 bool swapped;
35559
35560 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35561 continue;
35562
35563 elt = patterns[j].perm[0];
35564 if (perm[0] == elt)
35565 swapped = false;
35566 else if (perm[0] == elt + 16)
35567 swapped = true;
35568 else
35569 continue;
35570 for (i = 1; i < 16; ++i)
35571 {
35572 elt = patterns[j].perm[i];
35573 if (swapped)
35574 elt = (elt >= 16 ? elt - 16 : elt + 16);
35575 else if (one_vec && elt >= 16)
35576 elt -= 16;
35577 if (perm[i] != elt)
35578 break;
35579 }
35580 if (i == 16)
35581 {
35582 enum insn_code icode = patterns[j].impl;
35583 machine_mode omode = insn_data[icode].operand[0].mode;
35584 machine_mode imode = insn_data[icode].operand[1].mode;
35585
35586 /* For little-endian, don't use vpkuwum and vpkuhum if the
35587 underlying vector type is not V4SI and V8HI, respectively.
35588 For example, using vpkuwum with a V8HI picks up the even
35589 halfwords (BE numbering) when the even halfwords (LE
35590 numbering) are what we need. */
35591 if (!BYTES_BIG_ENDIAN
35592 && icode == CODE_FOR_altivec_vpkuwum_direct
35593 && ((GET_CODE (op0) == REG
35594 && GET_MODE (op0) != V4SImode)
35595 || (GET_CODE (op0) == SUBREG
35596 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35597 continue;
35598 if (!BYTES_BIG_ENDIAN
35599 && icode == CODE_FOR_altivec_vpkuhum_direct
35600 && ((GET_CODE (op0) == REG
35601 && GET_MODE (op0) != V8HImode)
35602 || (GET_CODE (op0) == SUBREG
35603 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35604 continue;
35605
35606 /* For little-endian, the two input operands must be swapped
35607 (or swapped back) to ensure proper right-to-left numbering
35608 from 0 to 2N-1. */
35609 if (swapped ^ !BYTES_BIG_ENDIAN)
35610 std::swap (op0, op1);
35611 if (imode != V16QImode)
35612 {
35613 op0 = gen_lowpart (imode, op0);
35614 op1 = gen_lowpart (imode, op1);
35615 }
35616 if (omode == V16QImode)
35617 x = target;
35618 else
35619 x = gen_reg_rtx (omode);
35620 emit_insn (GEN_FCN (icode) (x, op0, op1));
35621 if (omode != V16QImode)
35622 emit_move_insn (target, gen_lowpart (V16QImode, x));
35623 return true;
35624 }
35625 }
35626
35627 if (!BYTES_BIG_ENDIAN)
35628 {
35629 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35630 return true;
35631 }
35632
35633 return false;
35634 }
35635
35636 /* Expand a VSX Permute Doubleword constant permutation.
35637 Return true if we match an efficient implementation. */
35638
35639 static bool
35640 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35641 unsigned char perm0, unsigned char perm1)
35642 {
35643 rtx x;
35644
35645 /* If both selectors come from the same operand, fold to single op. */
35646 if ((perm0 & 2) == (perm1 & 2))
35647 {
35648 if (perm0 & 2)
35649 op0 = op1;
35650 else
35651 op1 = op0;
35652 }
35653 /* If both operands are equal, fold to simpler permutation. */
35654 if (rtx_equal_p (op0, op1))
35655 {
35656 perm0 = perm0 & 1;
35657 perm1 = (perm1 & 1) + 2;
35658 }
35659 /* If the first selector comes from the second operand, swap. */
35660 else if (perm0 & 2)
35661 {
35662 if (perm1 & 2)
35663 return false;
35664 perm0 -= 2;
35665 perm1 += 2;
35666 std::swap (op0, op1);
35667 }
35668 /* If the second selector does not come from the second operand, fail. */
35669 else if ((perm1 & 2) == 0)
35670 return false;
35671
35672 /* Success! */
35673 if (target != NULL)
35674 {
35675 machine_mode vmode, dmode;
35676 rtvec v;
35677
35678 vmode = GET_MODE (target);
35679 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35680 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35681 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35682 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35683 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35684 emit_insn (gen_rtx_SET (target, x));
35685 }
35686 return true;
35687 }
35688
35689 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35690
35691 static bool
35692 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35693 rtx op1, const vec_perm_indices &sel)
35694 {
35695 bool testing_p = !target;
35696
35697 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35698 if (TARGET_ALTIVEC && testing_p)
35699 return true;
35700
35701 /* Check for ps_merge* or xxpermdi insns. */
35702 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35703 {
35704 if (testing_p)
35705 {
35706 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35707 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35708 }
35709 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35710 return true;
35711 }
35712
35713 if (TARGET_ALTIVEC)
35714 {
35715 /* Force the target-independent code to lower to V16QImode. */
35716 if (vmode != V16QImode)
35717 return false;
35718 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35719 return true;
35720 }
35721
35722 return false;
35723 }
35724
35725 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35726 OP0 and OP1 are the input vectors and TARGET is the output vector.
35727 PERM specifies the constant permutation vector. */
35728
35729 static void
35730 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35731 machine_mode vmode, const vec_perm_builder &perm)
35732 {
35733 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35734 if (x != target)
35735 emit_move_insn (target, x);
35736 }
35737
35738 /* Expand an extract even operation. */
35739
35740 void
35741 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35742 {
35743 machine_mode vmode = GET_MODE (target);
35744 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35745 vec_perm_builder perm (nelt, nelt, 1);
35746
35747 for (i = 0; i < nelt; i++)
35748 perm.quick_push (i * 2);
35749
35750 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35751 }
35752
35753 /* Expand a vector interleave operation. */
35754
35755 void
35756 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35757 {
35758 machine_mode vmode = GET_MODE (target);
35759 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35760 vec_perm_builder perm (nelt, nelt, 1);
35761
35762 high = (highp ? 0 : nelt / 2);
35763 for (i = 0; i < nelt / 2; i++)
35764 {
35765 perm.quick_push (i + high);
35766 perm.quick_push (i + nelt + high);
35767 }
35768
35769 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35770 }
35771
35772 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35773 void
35774 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35775 {
35776 HOST_WIDE_INT hwi_scale (scale);
35777 REAL_VALUE_TYPE r_pow;
35778 rtvec v = rtvec_alloc (2);
35779 rtx elt;
35780 rtx scale_vec = gen_reg_rtx (V2DFmode);
35781 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35782 elt = const_double_from_real_value (r_pow, DFmode);
35783 RTVEC_ELT (v, 0) = elt;
35784 RTVEC_ELT (v, 1) = elt;
35785 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35786 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35787 }
35788
35789 /* Return an RTX representing where to find the function value of a
35790 function returning MODE. */
35791 static rtx
35792 rs6000_complex_function_value (machine_mode mode)
35793 {
35794 unsigned int regno;
35795 rtx r1, r2;
35796 machine_mode inner = GET_MODE_INNER (mode);
35797 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35798
35799 if (TARGET_FLOAT128_TYPE
35800 && (mode == KCmode
35801 || (mode == TCmode && TARGET_IEEEQUAD)))
35802 regno = ALTIVEC_ARG_RETURN;
35803
35804 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35805 regno = FP_ARG_RETURN;
35806
35807 else
35808 {
35809 regno = GP_ARG_RETURN;
35810
35811 /* 32-bit is OK since it'll go in r3/r4. */
35812 if (TARGET_32BIT && inner_bytes >= 4)
35813 return gen_rtx_REG (mode, regno);
35814 }
35815
35816 if (inner_bytes >= 8)
35817 return gen_rtx_REG (mode, regno);
35818
35819 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35820 const0_rtx);
35821 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35822 GEN_INT (inner_bytes));
35823 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35824 }
35825
35826 /* Return an rtx describing a return value of MODE as a PARALLEL
35827 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35828 stride REG_STRIDE. */
35829
35830 static rtx
35831 rs6000_parallel_return (machine_mode mode,
35832 int n_elts, machine_mode elt_mode,
35833 unsigned int regno, unsigned int reg_stride)
35834 {
35835 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35836
35837 int i;
35838 for (i = 0; i < n_elts; i++)
35839 {
35840 rtx r = gen_rtx_REG (elt_mode, regno);
35841 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35842 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35843 regno += reg_stride;
35844 }
35845
35846 return par;
35847 }
35848
35849 /* Target hook for TARGET_FUNCTION_VALUE.
35850
35851 An integer value is in r3 and a floating-point value is in fp1,
35852 unless -msoft-float. */
35853
35854 static rtx
35855 rs6000_function_value (const_tree valtype,
35856 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35857 bool outgoing ATTRIBUTE_UNUSED)
35858 {
35859 machine_mode mode;
35860 unsigned int regno;
35861 machine_mode elt_mode;
35862 int n_elts;
35863
35864 /* Special handling for structs in darwin64. */
35865 if (TARGET_MACHO
35866 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35867 {
35868 CUMULATIVE_ARGS valcum;
35869 rtx valret;
35870
35871 valcum.words = 0;
35872 valcum.fregno = FP_ARG_MIN_REG;
35873 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35874 /* Do a trial code generation as if this were going to be passed as
35875 an argument; if any part goes in memory, we return NULL. */
35876 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35877 if (valret)
35878 return valret;
35879 /* Otherwise fall through to standard ABI rules. */
35880 }
35881
35882 mode = TYPE_MODE (valtype);
35883
35884 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35885 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35886 {
35887 int first_reg, n_regs;
35888
35889 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35890 {
35891 /* _Decimal128 must use even/odd register pairs. */
35892 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35893 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35894 }
35895 else
35896 {
35897 first_reg = ALTIVEC_ARG_RETURN;
35898 n_regs = 1;
35899 }
35900
35901 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35902 }
35903
35904 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35905 if (TARGET_32BIT && TARGET_POWERPC64)
35906 switch (mode)
35907 {
35908 default:
35909 break;
35910 case E_DImode:
35911 case E_SCmode:
35912 case E_DCmode:
35913 case E_TCmode:
35914 int count = GET_MODE_SIZE (mode) / 4;
35915 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35916 }
35917
35918 if ((INTEGRAL_TYPE_P (valtype)
35919 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35920 || POINTER_TYPE_P (valtype))
35921 mode = TARGET_32BIT ? SImode : DImode;
35922
35923 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35924 /* _Decimal128 must use an even/odd register pair. */
35925 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35926 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35927 && !FLOAT128_VECTOR_P (mode))
35928 regno = FP_ARG_RETURN;
35929 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35930 && targetm.calls.split_complex_arg)
35931 return rs6000_complex_function_value (mode);
35932 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35933 return register is used in both cases, and we won't see V2DImode/V2DFmode
35934 for pure altivec, combine the two cases. */
35935 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35936 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35937 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35938 regno = ALTIVEC_ARG_RETURN;
35939 else
35940 regno = GP_ARG_RETURN;
35941
35942 return gen_rtx_REG (mode, regno);
35943 }
35944
35945 /* Define how to find the value returned by a library function
35946 assuming the value has mode MODE. */
35947 rtx
35948 rs6000_libcall_value (machine_mode mode)
35949 {
35950 unsigned int regno;
35951
35952 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35953 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35954 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35955
35956 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35957 /* _Decimal128 must use an even/odd register pair. */
35958 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35959 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35960 regno = FP_ARG_RETURN;
35961 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35962 return register is used in both cases, and we won't see V2DImode/V2DFmode
35963 for pure altivec, combine the two cases. */
35964 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35965 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35966 regno = ALTIVEC_ARG_RETURN;
35967 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35968 return rs6000_complex_function_value (mode);
35969 else
35970 regno = GP_ARG_RETURN;
35971
35972 return gen_rtx_REG (mode, regno);
35973 }
35974
35975 /* Compute register pressure classes. We implement the target hook to avoid
35976 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35977 lead to incorrect estimates of number of available registers and therefor
35978 increased register pressure/spill. */
35979 static int
35980 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35981 {
35982 int n;
35983
35984 n = 0;
35985 pressure_classes[n++] = GENERAL_REGS;
35986 if (TARGET_VSX)
35987 pressure_classes[n++] = VSX_REGS;
35988 else
35989 {
35990 if (TARGET_ALTIVEC)
35991 pressure_classes[n++] = ALTIVEC_REGS;
35992 if (TARGET_HARD_FLOAT)
35993 pressure_classes[n++] = FLOAT_REGS;
35994 }
35995 pressure_classes[n++] = CR_REGS;
35996 pressure_classes[n++] = SPECIAL_REGS;
35997
35998 return n;
35999 }
36000
36001 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36002 Frame pointer elimination is automatically handled.
36003
36004 For the RS/6000, if frame pointer elimination is being done, we would like
36005 to convert ap into fp, not sp.
36006
36007 We need r30 if -mminimal-toc was specified, and there are constant pool
36008 references. */
36009
36010 static bool
36011 rs6000_can_eliminate (const int from, const int to)
36012 {
36013 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36014 ? ! frame_pointer_needed
36015 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36016 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36017 || constant_pool_empty_p ()
36018 : true);
36019 }
36020
36021 /* Define the offset between two registers, FROM to be eliminated and its
36022 replacement TO, at the start of a routine. */
36023 HOST_WIDE_INT
36024 rs6000_initial_elimination_offset (int from, int to)
36025 {
36026 rs6000_stack_t *info = rs6000_stack_info ();
36027 HOST_WIDE_INT offset;
36028
36029 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36030 offset = info->push_p ? 0 : -info->total_size;
36031 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36032 {
36033 offset = info->push_p ? 0 : -info->total_size;
36034 if (FRAME_GROWS_DOWNWARD)
36035 offset += info->fixed_size + info->vars_size + info->parm_size;
36036 }
36037 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36038 offset = FRAME_GROWS_DOWNWARD
36039 ? info->fixed_size + info->vars_size + info->parm_size
36040 : 0;
36041 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36042 offset = info->total_size;
36043 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36044 offset = info->push_p ? info->total_size : 0;
36045 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36046 offset = 0;
36047 else
36048 gcc_unreachable ();
36049
36050 return offset;
36051 }
36052
36053 /* Fill in sizes of registers used by unwinder. */
36054
36055 static void
36056 rs6000_init_dwarf_reg_sizes_extra (tree address)
36057 {
36058 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36059 {
36060 int i;
36061 machine_mode mode = TYPE_MODE (char_type_node);
36062 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36063 rtx mem = gen_rtx_MEM (BLKmode, addr);
36064 rtx value = gen_int_mode (16, mode);
36065
36066 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36067 The unwinder still needs to know the size of Altivec registers. */
36068
36069 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36070 {
36071 int column = DWARF_REG_TO_UNWIND_COLUMN
36072 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36073 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36074
36075 emit_move_insn (adjust_address (mem, mode, offset), value);
36076 }
36077 }
36078 }
36079
36080 /* Map internal gcc register numbers to debug format register numbers.
36081 FORMAT specifies the type of debug register number to use:
36082 0 -- debug information, except for frame-related sections
36083 1 -- DWARF .debug_frame section
36084 2 -- DWARF .eh_frame section */
36085
36086 unsigned int
36087 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36088 {
36089 /* Except for the above, we use the internal number for non-DWARF
36090 debug information, and also for .eh_frame. */
36091 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36092 return regno;
36093
36094 /* On some platforms, we use the standard DWARF register
36095 numbering for .debug_info and .debug_frame. */
36096 #ifdef RS6000_USE_DWARF_NUMBERING
36097 if (regno <= 63)
36098 return regno;
36099 if (regno == LR_REGNO)
36100 return 108;
36101 if (regno == CTR_REGNO)
36102 return 109;
36103 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36104 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36105 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36106 to the DWARF reg for CR. */
36107 if (format == 1 && regno == CR2_REGNO)
36108 return 64;
36109 if (CR_REGNO_P (regno))
36110 return regno - CR0_REGNO + 86;
36111 if (regno == CA_REGNO)
36112 return 101; /* XER */
36113 if (ALTIVEC_REGNO_P (regno))
36114 return regno - FIRST_ALTIVEC_REGNO + 1124;
36115 if (regno == VRSAVE_REGNO)
36116 return 356;
36117 if (regno == VSCR_REGNO)
36118 return 67;
36119 #endif
36120 return regno;
36121 }
36122
36123 /* target hook eh_return_filter_mode */
36124 static scalar_int_mode
36125 rs6000_eh_return_filter_mode (void)
36126 {
36127 return TARGET_32BIT ? SImode : word_mode;
36128 }
36129
36130 /* Target hook for translate_mode_attribute. */
36131 static machine_mode
36132 rs6000_translate_mode_attribute (machine_mode mode)
36133 {
36134 if ((FLOAT128_IEEE_P (mode)
36135 && ieee128_float_type_node == long_double_type_node)
36136 || (FLOAT128_IBM_P (mode)
36137 && ibm128_float_type_node == long_double_type_node))
36138 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36139 return mode;
36140 }
36141
36142 /* Target hook for scalar_mode_supported_p. */
36143 static bool
36144 rs6000_scalar_mode_supported_p (scalar_mode mode)
36145 {
36146 /* -m32 does not support TImode. This is the default, from
36147 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36148 same ABI as for -m32. But default_scalar_mode_supported_p allows
36149 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36150 for -mpowerpc64. */
36151 if (TARGET_32BIT && mode == TImode)
36152 return false;
36153
36154 if (DECIMAL_FLOAT_MODE_P (mode))
36155 return default_decimal_float_supported_p ();
36156 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36157 return true;
36158 else
36159 return default_scalar_mode_supported_p (mode);
36160 }
36161
36162 /* Target hook for vector_mode_supported_p. */
36163 static bool
36164 rs6000_vector_mode_supported_p (machine_mode mode)
36165 {
36166 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36167 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36168 double-double. */
36169 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36170 return true;
36171
36172 else
36173 return false;
36174 }
36175
36176 /* Target hook for floatn_mode. */
36177 static opt_scalar_float_mode
36178 rs6000_floatn_mode (int n, bool extended)
36179 {
36180 if (extended)
36181 {
36182 switch (n)
36183 {
36184 case 32:
36185 return DFmode;
36186
36187 case 64:
36188 if (TARGET_FLOAT128_TYPE)
36189 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36190 else
36191 return opt_scalar_float_mode ();
36192
36193 case 128:
36194 return opt_scalar_float_mode ();
36195
36196 default:
36197 /* Those are the only valid _FloatNx types. */
36198 gcc_unreachable ();
36199 }
36200 }
36201 else
36202 {
36203 switch (n)
36204 {
36205 case 32:
36206 return SFmode;
36207
36208 case 64:
36209 return DFmode;
36210
36211 case 128:
36212 if (TARGET_FLOAT128_TYPE)
36213 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36214 else
36215 return opt_scalar_float_mode ();
36216
36217 default:
36218 return opt_scalar_float_mode ();
36219 }
36220 }
36221
36222 }
36223
36224 /* Target hook for c_mode_for_suffix. */
36225 static machine_mode
36226 rs6000_c_mode_for_suffix (char suffix)
36227 {
36228 if (TARGET_FLOAT128_TYPE)
36229 {
36230 if (suffix == 'q' || suffix == 'Q')
36231 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36232
36233 /* At the moment, we are not defining a suffix for IBM extended double.
36234 If/when the default for -mabi=ieeelongdouble is changed, and we want
36235 to support __ibm128 constants in legacy library code, we may need to
36236 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36237 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36238 __float80 constants. */
36239 }
36240
36241 return VOIDmode;
36242 }
36243
36244 /* Target hook for invalid_arg_for_unprototyped_fn. */
36245 static const char *
36246 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36247 {
36248 return (!rs6000_darwin64_abi
36249 && typelist == 0
36250 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36251 && (funcdecl == NULL_TREE
36252 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36253 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36254 ? N_("AltiVec argument passed to unprototyped function")
36255 : NULL;
36256 }
36257
36258 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36259 setup by using __stack_chk_fail_local hidden function instead of
36260 calling __stack_chk_fail directly. Otherwise it is better to call
36261 __stack_chk_fail directly. */
36262
36263 static tree ATTRIBUTE_UNUSED
36264 rs6000_stack_protect_fail (void)
36265 {
36266 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36267 ? default_hidden_stack_protect_fail ()
36268 : default_external_stack_protect_fail ();
36269 }
36270
36271 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36272
36273 #if TARGET_ELF
36274 static unsigned HOST_WIDE_INT
36275 rs6000_asan_shadow_offset (void)
36276 {
36277 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36278 }
36279 #endif
36280 \f
36281 /* Mask options that we want to support inside of attribute((target)) and
36282 #pragma GCC target operations. Note, we do not include things like
36283 64/32-bit, endianness, hard/soft floating point, etc. that would have
36284 different calling sequences. */
36285
36286 struct rs6000_opt_mask {
36287 const char *name; /* option name */
36288 HOST_WIDE_INT mask; /* mask to set */
36289 bool invert; /* invert sense of mask */
36290 bool valid_target; /* option is a target option */
36291 };
36292
36293 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36294 {
36295 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36296 { "cmpb", OPTION_MASK_CMPB, false, true },
36297 { "crypto", OPTION_MASK_CRYPTO, false, true },
36298 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36299 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36300 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36301 false, true },
36302 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36303 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36304 { "fprnd", OPTION_MASK_FPRND, false, true },
36305 { "hard-dfp", OPTION_MASK_DFP, false, true },
36306 { "htm", OPTION_MASK_HTM, false, true },
36307 { "isel", OPTION_MASK_ISEL, false, true },
36308 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36309 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36310 { "modulo", OPTION_MASK_MODULO, false, true },
36311 { "mulhw", OPTION_MASK_MULHW, false, true },
36312 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36313 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36314 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36315 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36316 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36317 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36318 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36319 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36320 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36321 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36322 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36323 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36324 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36325 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36326 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36327 { "string", 0, false, true },
36328 { "update", OPTION_MASK_NO_UPDATE, true , true },
36329 { "vsx", OPTION_MASK_VSX, false, true },
36330 #ifdef OPTION_MASK_64BIT
36331 #if TARGET_AIX_OS
36332 { "aix64", OPTION_MASK_64BIT, false, false },
36333 { "aix32", OPTION_MASK_64BIT, true, false },
36334 #else
36335 { "64", OPTION_MASK_64BIT, false, false },
36336 { "32", OPTION_MASK_64BIT, true, false },
36337 #endif
36338 #endif
36339 #ifdef OPTION_MASK_EABI
36340 { "eabi", OPTION_MASK_EABI, false, false },
36341 #endif
36342 #ifdef OPTION_MASK_LITTLE_ENDIAN
36343 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36344 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36345 #endif
36346 #ifdef OPTION_MASK_RELOCATABLE
36347 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36348 #endif
36349 #ifdef OPTION_MASK_STRICT_ALIGN
36350 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36351 #endif
36352 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36353 { "string", 0, false, false },
36354 };
36355
36356 /* Builtin mask mapping for printing the flags. */
36357 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36358 {
36359 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36360 { "vsx", RS6000_BTM_VSX, false, false },
36361 { "fre", RS6000_BTM_FRE, false, false },
36362 { "fres", RS6000_BTM_FRES, false, false },
36363 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36364 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36365 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36366 { "cell", RS6000_BTM_CELL, false, false },
36367 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36368 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36369 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36370 { "crypto", RS6000_BTM_CRYPTO, false, false },
36371 { "htm", RS6000_BTM_HTM, false, false },
36372 { "hard-dfp", RS6000_BTM_DFP, false, false },
36373 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36374 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36375 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36376 { "float128", RS6000_BTM_FLOAT128, false, false },
36377 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36378 };
36379
36380 /* Option variables that we want to support inside attribute((target)) and
36381 #pragma GCC target operations. */
36382
36383 struct rs6000_opt_var {
36384 const char *name; /* option name */
36385 size_t global_offset; /* offset of the option in global_options. */
36386 size_t target_offset; /* offset of the option in target options. */
36387 };
36388
36389 static struct rs6000_opt_var const rs6000_opt_vars[] =
36390 {
36391 { "friz",
36392 offsetof (struct gcc_options, x_TARGET_FRIZ),
36393 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36394 { "avoid-indexed-addresses",
36395 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36396 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36397 { "longcall",
36398 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36399 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36400 { "optimize-swaps",
36401 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36402 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36403 { "allow-movmisalign",
36404 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36405 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36406 { "sched-groups",
36407 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36408 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36409 { "always-hint",
36410 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36411 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36412 { "align-branch-targets",
36413 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36414 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36415 { "tls-markers",
36416 offsetof (struct gcc_options, x_tls_markers),
36417 offsetof (struct cl_target_option, x_tls_markers), },
36418 { "sched-prolog",
36419 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36420 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36421 { "sched-epilog",
36422 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36423 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36424 { "speculate-indirect-jumps",
36425 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36426 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36427 };
36428
36429 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36430 parsing. Return true if there were no errors. */
36431
36432 static bool
36433 rs6000_inner_target_options (tree args, bool attr_p)
36434 {
36435 bool ret = true;
36436
36437 if (args == NULL_TREE)
36438 ;
36439
36440 else if (TREE_CODE (args) == STRING_CST)
36441 {
36442 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36443 char *q;
36444
36445 while ((q = strtok (p, ",")) != NULL)
36446 {
36447 bool error_p = false;
36448 bool not_valid_p = false;
36449 const char *cpu_opt = NULL;
36450
36451 p = NULL;
36452 if (strncmp (q, "cpu=", 4) == 0)
36453 {
36454 int cpu_index = rs6000_cpu_name_lookup (q+4);
36455 if (cpu_index >= 0)
36456 rs6000_cpu_index = cpu_index;
36457 else
36458 {
36459 error_p = true;
36460 cpu_opt = q+4;
36461 }
36462 }
36463 else if (strncmp (q, "tune=", 5) == 0)
36464 {
36465 int tune_index = rs6000_cpu_name_lookup (q+5);
36466 if (tune_index >= 0)
36467 rs6000_tune_index = tune_index;
36468 else
36469 {
36470 error_p = true;
36471 cpu_opt = q+5;
36472 }
36473 }
36474 else
36475 {
36476 size_t i;
36477 bool invert = false;
36478 char *r = q;
36479
36480 error_p = true;
36481 if (strncmp (r, "no-", 3) == 0)
36482 {
36483 invert = true;
36484 r += 3;
36485 }
36486
36487 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36488 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36489 {
36490 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36491
36492 if (!rs6000_opt_masks[i].valid_target)
36493 not_valid_p = true;
36494 else
36495 {
36496 error_p = false;
36497 rs6000_isa_flags_explicit |= mask;
36498
36499 /* VSX needs altivec, so -mvsx automagically sets
36500 altivec and disables -mavoid-indexed-addresses. */
36501 if (!invert)
36502 {
36503 if (mask == OPTION_MASK_VSX)
36504 {
36505 mask |= OPTION_MASK_ALTIVEC;
36506 TARGET_AVOID_XFORM = 0;
36507 }
36508 }
36509
36510 if (rs6000_opt_masks[i].invert)
36511 invert = !invert;
36512
36513 if (invert)
36514 rs6000_isa_flags &= ~mask;
36515 else
36516 rs6000_isa_flags |= mask;
36517 }
36518 break;
36519 }
36520
36521 if (error_p && !not_valid_p)
36522 {
36523 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36524 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36525 {
36526 size_t j = rs6000_opt_vars[i].global_offset;
36527 *((int *) ((char *)&global_options + j)) = !invert;
36528 error_p = false;
36529 not_valid_p = false;
36530 break;
36531 }
36532 }
36533 }
36534
36535 if (error_p)
36536 {
36537 const char *eprefix, *esuffix;
36538
36539 ret = false;
36540 if (attr_p)
36541 {
36542 eprefix = "__attribute__((__target__(";
36543 esuffix = ")))";
36544 }
36545 else
36546 {
36547 eprefix = "#pragma GCC target ";
36548 esuffix = "";
36549 }
36550
36551 if (cpu_opt)
36552 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36553 q, esuffix);
36554 else if (not_valid_p)
36555 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36556 else
36557 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36558 }
36559 }
36560 }
36561
36562 else if (TREE_CODE (args) == TREE_LIST)
36563 {
36564 do
36565 {
36566 tree value = TREE_VALUE (args);
36567 if (value)
36568 {
36569 bool ret2 = rs6000_inner_target_options (value, attr_p);
36570 if (!ret2)
36571 ret = false;
36572 }
36573 args = TREE_CHAIN (args);
36574 }
36575 while (args != NULL_TREE);
36576 }
36577
36578 else
36579 {
36580 error ("attribute %<target%> argument not a string");
36581 return false;
36582 }
36583
36584 return ret;
36585 }
36586
36587 /* Print out the target options as a list for -mdebug=target. */
36588
36589 static void
36590 rs6000_debug_target_options (tree args, const char *prefix)
36591 {
36592 if (args == NULL_TREE)
36593 fprintf (stderr, "%s<NULL>", prefix);
36594
36595 else if (TREE_CODE (args) == STRING_CST)
36596 {
36597 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36598 char *q;
36599
36600 while ((q = strtok (p, ",")) != NULL)
36601 {
36602 p = NULL;
36603 fprintf (stderr, "%s\"%s\"", prefix, q);
36604 prefix = ", ";
36605 }
36606 }
36607
36608 else if (TREE_CODE (args) == TREE_LIST)
36609 {
36610 do
36611 {
36612 tree value = TREE_VALUE (args);
36613 if (value)
36614 {
36615 rs6000_debug_target_options (value, prefix);
36616 prefix = ", ";
36617 }
36618 args = TREE_CHAIN (args);
36619 }
36620 while (args != NULL_TREE);
36621 }
36622
36623 else
36624 gcc_unreachable ();
36625
36626 return;
36627 }
36628
36629 \f
36630 /* Hook to validate attribute((target("..."))). */
36631
36632 static bool
36633 rs6000_valid_attribute_p (tree fndecl,
36634 tree ARG_UNUSED (name),
36635 tree args,
36636 int flags)
36637 {
36638 struct cl_target_option cur_target;
36639 bool ret;
36640 tree old_optimize;
36641 tree new_target, new_optimize;
36642 tree func_optimize;
36643
36644 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36645
36646 if (TARGET_DEBUG_TARGET)
36647 {
36648 tree tname = DECL_NAME (fndecl);
36649 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36650 if (tname)
36651 fprintf (stderr, "function: %.*s\n",
36652 (int) IDENTIFIER_LENGTH (tname),
36653 IDENTIFIER_POINTER (tname));
36654 else
36655 fprintf (stderr, "function: unknown\n");
36656
36657 fprintf (stderr, "args:");
36658 rs6000_debug_target_options (args, " ");
36659 fprintf (stderr, "\n");
36660
36661 if (flags)
36662 fprintf (stderr, "flags: 0x%x\n", flags);
36663
36664 fprintf (stderr, "--------------------\n");
36665 }
36666
36667 /* attribute((target("default"))) does nothing, beyond
36668 affecting multi-versioning. */
36669 if (TREE_VALUE (args)
36670 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36671 && TREE_CHAIN (args) == NULL_TREE
36672 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36673 return true;
36674
36675 old_optimize = build_optimization_node (&global_options);
36676 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36677
36678 /* If the function changed the optimization levels as well as setting target
36679 options, start with the optimizations specified. */
36680 if (func_optimize && func_optimize != old_optimize)
36681 cl_optimization_restore (&global_options,
36682 TREE_OPTIMIZATION (func_optimize));
36683
36684 /* The target attributes may also change some optimization flags, so update
36685 the optimization options if necessary. */
36686 cl_target_option_save (&cur_target, &global_options);
36687 rs6000_cpu_index = rs6000_tune_index = -1;
36688 ret = rs6000_inner_target_options (args, true);
36689
36690 /* Set up any additional state. */
36691 if (ret)
36692 {
36693 ret = rs6000_option_override_internal (false);
36694 new_target = build_target_option_node (&global_options);
36695 }
36696 else
36697 new_target = NULL;
36698
36699 new_optimize = build_optimization_node (&global_options);
36700
36701 if (!new_target)
36702 ret = false;
36703
36704 else if (fndecl)
36705 {
36706 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36707
36708 if (old_optimize != new_optimize)
36709 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36710 }
36711
36712 cl_target_option_restore (&global_options, &cur_target);
36713
36714 if (old_optimize != new_optimize)
36715 cl_optimization_restore (&global_options,
36716 TREE_OPTIMIZATION (old_optimize));
36717
36718 return ret;
36719 }
36720
36721 \f
36722 /* Hook to validate the current #pragma GCC target and set the state, and
36723 update the macros based on what was changed. If ARGS is NULL, then
36724 POP_TARGET is used to reset the options. */
36725
36726 bool
36727 rs6000_pragma_target_parse (tree args, tree pop_target)
36728 {
36729 tree prev_tree = build_target_option_node (&global_options);
36730 tree cur_tree;
36731 struct cl_target_option *prev_opt, *cur_opt;
36732 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36733 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36734
36735 if (TARGET_DEBUG_TARGET)
36736 {
36737 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36738 fprintf (stderr, "args:");
36739 rs6000_debug_target_options (args, " ");
36740 fprintf (stderr, "\n");
36741
36742 if (pop_target)
36743 {
36744 fprintf (stderr, "pop_target:\n");
36745 debug_tree (pop_target);
36746 }
36747 else
36748 fprintf (stderr, "pop_target: <NULL>\n");
36749
36750 fprintf (stderr, "--------------------\n");
36751 }
36752
36753 if (! args)
36754 {
36755 cur_tree = ((pop_target)
36756 ? pop_target
36757 : target_option_default_node);
36758 cl_target_option_restore (&global_options,
36759 TREE_TARGET_OPTION (cur_tree));
36760 }
36761 else
36762 {
36763 rs6000_cpu_index = rs6000_tune_index = -1;
36764 if (!rs6000_inner_target_options (args, false)
36765 || !rs6000_option_override_internal (false)
36766 || (cur_tree = build_target_option_node (&global_options))
36767 == NULL_TREE)
36768 {
36769 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36770 fprintf (stderr, "invalid pragma\n");
36771
36772 return false;
36773 }
36774 }
36775
36776 target_option_current_node = cur_tree;
36777 rs6000_activate_target_options (target_option_current_node);
36778
36779 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36780 change the macros that are defined. */
36781 if (rs6000_target_modify_macros_ptr)
36782 {
36783 prev_opt = TREE_TARGET_OPTION (prev_tree);
36784 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36785 prev_flags = prev_opt->x_rs6000_isa_flags;
36786
36787 cur_opt = TREE_TARGET_OPTION (cur_tree);
36788 cur_flags = cur_opt->x_rs6000_isa_flags;
36789 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36790
36791 diff_bumask = (prev_bumask ^ cur_bumask);
36792 diff_flags = (prev_flags ^ cur_flags);
36793
36794 if ((diff_flags != 0) || (diff_bumask != 0))
36795 {
36796 /* Delete old macros. */
36797 rs6000_target_modify_macros_ptr (false,
36798 prev_flags & diff_flags,
36799 prev_bumask & diff_bumask);
36800
36801 /* Define new macros. */
36802 rs6000_target_modify_macros_ptr (true,
36803 cur_flags & diff_flags,
36804 cur_bumask & diff_bumask);
36805 }
36806 }
36807
36808 return true;
36809 }
36810
36811 \f
36812 /* Remember the last target of rs6000_set_current_function. */
36813 static GTY(()) tree rs6000_previous_fndecl;
36814
36815 /* Restore target's globals from NEW_TREE and invalidate the
36816 rs6000_previous_fndecl cache. */
36817
36818 void
36819 rs6000_activate_target_options (tree new_tree)
36820 {
36821 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36822 if (TREE_TARGET_GLOBALS (new_tree))
36823 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36824 else if (new_tree == target_option_default_node)
36825 restore_target_globals (&default_target_globals);
36826 else
36827 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36828 rs6000_previous_fndecl = NULL_TREE;
36829 }
36830
36831 /* Establish appropriate back-end context for processing the function
36832 FNDECL. The argument might be NULL to indicate processing at top
36833 level, outside of any function scope. */
36834 static void
36835 rs6000_set_current_function (tree fndecl)
36836 {
36837 if (TARGET_DEBUG_TARGET)
36838 {
36839 fprintf (stderr, "\n==================== rs6000_set_current_function");
36840
36841 if (fndecl)
36842 fprintf (stderr, ", fndecl %s (%p)",
36843 (DECL_NAME (fndecl)
36844 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36845 : "<unknown>"), (void *)fndecl);
36846
36847 if (rs6000_previous_fndecl)
36848 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36849
36850 fprintf (stderr, "\n");
36851 }
36852
36853 /* Only change the context if the function changes. This hook is called
36854 several times in the course of compiling a function, and we don't want to
36855 slow things down too much or call target_reinit when it isn't safe. */
36856 if (fndecl == rs6000_previous_fndecl)
36857 return;
36858
36859 tree old_tree;
36860 if (rs6000_previous_fndecl == NULL_TREE)
36861 old_tree = target_option_current_node;
36862 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36863 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36864 else
36865 old_tree = target_option_default_node;
36866
36867 tree new_tree;
36868 if (fndecl == NULL_TREE)
36869 {
36870 if (old_tree != target_option_current_node)
36871 new_tree = target_option_current_node;
36872 else
36873 new_tree = NULL_TREE;
36874 }
36875 else
36876 {
36877 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36878 if (new_tree == NULL_TREE)
36879 new_tree = target_option_default_node;
36880 }
36881
36882 if (TARGET_DEBUG_TARGET)
36883 {
36884 if (new_tree)
36885 {
36886 fprintf (stderr, "\nnew fndecl target specific options:\n");
36887 debug_tree (new_tree);
36888 }
36889
36890 if (old_tree)
36891 {
36892 fprintf (stderr, "\nold fndecl target specific options:\n");
36893 debug_tree (old_tree);
36894 }
36895
36896 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36897 fprintf (stderr, "--------------------\n");
36898 }
36899
36900 if (new_tree && old_tree != new_tree)
36901 rs6000_activate_target_options (new_tree);
36902
36903 if (fndecl)
36904 rs6000_previous_fndecl = fndecl;
36905 }
36906
36907 \f
36908 /* Save the current options */
36909
36910 static void
36911 rs6000_function_specific_save (struct cl_target_option *ptr,
36912 struct gcc_options *opts)
36913 {
36914 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36915 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36916 }
36917
36918 /* Restore the current options */
36919
36920 static void
36921 rs6000_function_specific_restore (struct gcc_options *opts,
36922 struct cl_target_option *ptr)
36923
36924 {
36925 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36926 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36927 (void) rs6000_option_override_internal (false);
36928 }
36929
36930 /* Print the current options */
36931
36932 static void
36933 rs6000_function_specific_print (FILE *file, int indent,
36934 struct cl_target_option *ptr)
36935 {
36936 rs6000_print_isa_options (file, indent, "Isa options set",
36937 ptr->x_rs6000_isa_flags);
36938
36939 rs6000_print_isa_options (file, indent, "Isa options explicit",
36940 ptr->x_rs6000_isa_flags_explicit);
36941 }
36942
36943 /* Helper function to print the current isa or misc options on a line. */
36944
36945 static void
36946 rs6000_print_options_internal (FILE *file,
36947 int indent,
36948 const char *string,
36949 HOST_WIDE_INT flags,
36950 const char *prefix,
36951 const struct rs6000_opt_mask *opts,
36952 size_t num_elements)
36953 {
36954 size_t i;
36955 size_t start_column = 0;
36956 size_t cur_column;
36957 size_t max_column = 120;
36958 size_t prefix_len = strlen (prefix);
36959 size_t comma_len = 0;
36960 const char *comma = "";
36961
36962 if (indent)
36963 start_column += fprintf (file, "%*s", indent, "");
36964
36965 if (!flags)
36966 {
36967 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36968 return;
36969 }
36970
36971 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36972
36973 /* Print the various mask options. */
36974 cur_column = start_column;
36975 for (i = 0; i < num_elements; i++)
36976 {
36977 bool invert = opts[i].invert;
36978 const char *name = opts[i].name;
36979 const char *no_str = "";
36980 HOST_WIDE_INT mask = opts[i].mask;
36981 size_t len = comma_len + prefix_len + strlen (name);
36982
36983 if (!invert)
36984 {
36985 if ((flags & mask) == 0)
36986 {
36987 no_str = "no-";
36988 len += sizeof ("no-") - 1;
36989 }
36990
36991 flags &= ~mask;
36992 }
36993
36994 else
36995 {
36996 if ((flags & mask) != 0)
36997 {
36998 no_str = "no-";
36999 len += sizeof ("no-") - 1;
37000 }
37001
37002 flags |= mask;
37003 }
37004
37005 cur_column += len;
37006 if (cur_column > max_column)
37007 {
37008 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37009 cur_column = start_column + len;
37010 comma = "";
37011 }
37012
37013 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37014 comma = ", ";
37015 comma_len = sizeof (", ") - 1;
37016 }
37017
37018 fputs ("\n", file);
37019 }
37020
37021 /* Helper function to print the current isa options on a line. */
37022
37023 static void
37024 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37025 HOST_WIDE_INT flags)
37026 {
37027 rs6000_print_options_internal (file, indent, string, flags, "-m",
37028 &rs6000_opt_masks[0],
37029 ARRAY_SIZE (rs6000_opt_masks));
37030 }
37031
37032 static void
37033 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37034 HOST_WIDE_INT flags)
37035 {
37036 rs6000_print_options_internal (file, indent, string, flags, "",
37037 &rs6000_builtin_mask_names[0],
37038 ARRAY_SIZE (rs6000_builtin_mask_names));
37039 }
37040
37041 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37042 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37043 -mupper-regs-df, etc.).
37044
37045 If the user used -mno-power8-vector, we need to turn off all of the implicit
37046 ISA 2.07 and 3.0 options that relate to the vector unit.
37047
37048 If the user used -mno-power9-vector, we need to turn off all of the implicit
37049 ISA 3.0 options that relate to the vector unit.
37050
37051 This function does not handle explicit options such as the user specifying
37052 -mdirect-move. These are handled in rs6000_option_override_internal, and
37053 the appropriate error is given if needed.
37054
37055 We return a mask of all of the implicit options that should not be enabled
37056 by default. */
37057
37058 static HOST_WIDE_INT
37059 rs6000_disable_incompatible_switches (void)
37060 {
37061 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37062 size_t i, j;
37063
37064 static const struct {
37065 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37066 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37067 const char *const name; /* name of the switch. */
37068 } flags[] = {
37069 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37070 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37071 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37072 };
37073
37074 for (i = 0; i < ARRAY_SIZE (flags); i++)
37075 {
37076 HOST_WIDE_INT no_flag = flags[i].no_flag;
37077
37078 if ((rs6000_isa_flags & no_flag) == 0
37079 && (rs6000_isa_flags_explicit & no_flag) != 0)
37080 {
37081 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37082 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37083 & rs6000_isa_flags
37084 & dep_flags);
37085
37086 if (set_flags)
37087 {
37088 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37089 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37090 {
37091 set_flags &= ~rs6000_opt_masks[j].mask;
37092 error ("%<-mno-%s%> turns off %<-m%s%>",
37093 flags[i].name,
37094 rs6000_opt_masks[j].name);
37095 }
37096
37097 gcc_assert (!set_flags);
37098 }
37099
37100 rs6000_isa_flags &= ~dep_flags;
37101 ignore_masks |= no_flag | dep_flags;
37102 }
37103 }
37104
37105 return ignore_masks;
37106 }
37107
37108 \f
37109 /* Helper function for printing the function name when debugging. */
37110
37111 static const char *
37112 get_decl_name (tree fn)
37113 {
37114 tree name;
37115
37116 if (!fn)
37117 return "<null>";
37118
37119 name = DECL_NAME (fn);
37120 if (!name)
37121 return "<no-name>";
37122
37123 return IDENTIFIER_POINTER (name);
37124 }
37125
37126 /* Return the clone id of the target we are compiling code for in a target
37127 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37128 the priority list for the target clones (ordered from lowest to
37129 highest). */
37130
37131 static int
37132 rs6000_clone_priority (tree fndecl)
37133 {
37134 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37135 HOST_WIDE_INT isa_masks;
37136 int ret = CLONE_DEFAULT;
37137 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37138 const char *attrs_str = NULL;
37139
37140 attrs = TREE_VALUE (TREE_VALUE (attrs));
37141 attrs_str = TREE_STRING_POINTER (attrs);
37142
37143 /* Return priority zero for default function. Return the ISA needed for the
37144 function if it is not the default. */
37145 if (strcmp (attrs_str, "default") != 0)
37146 {
37147 if (fn_opts == NULL_TREE)
37148 fn_opts = target_option_default_node;
37149
37150 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37151 isa_masks = rs6000_isa_flags;
37152 else
37153 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37154
37155 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37156 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37157 break;
37158 }
37159
37160 if (TARGET_DEBUG_TARGET)
37161 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37162 get_decl_name (fndecl), ret);
37163
37164 return ret;
37165 }
37166
37167 /* This compares the priority of target features in function DECL1 and DECL2.
37168 It returns positive value if DECL1 is higher priority, negative value if
37169 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37170 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37171
37172 static int
37173 rs6000_compare_version_priority (tree decl1, tree decl2)
37174 {
37175 int priority1 = rs6000_clone_priority (decl1);
37176 int priority2 = rs6000_clone_priority (decl2);
37177 int ret = priority1 - priority2;
37178
37179 if (TARGET_DEBUG_TARGET)
37180 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37181 get_decl_name (decl1), get_decl_name (decl2), ret);
37182
37183 return ret;
37184 }
37185
37186 /* Make a dispatcher declaration for the multi-versioned function DECL.
37187 Calls to DECL function will be replaced with calls to the dispatcher
37188 by the front-end. Returns the decl of the dispatcher function. */
37189
37190 static tree
37191 rs6000_get_function_versions_dispatcher (void *decl)
37192 {
37193 tree fn = (tree) decl;
37194 struct cgraph_node *node = NULL;
37195 struct cgraph_node *default_node = NULL;
37196 struct cgraph_function_version_info *node_v = NULL;
37197 struct cgraph_function_version_info *first_v = NULL;
37198
37199 tree dispatch_decl = NULL;
37200
37201 struct cgraph_function_version_info *default_version_info = NULL;
37202 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37203
37204 if (TARGET_DEBUG_TARGET)
37205 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37206 get_decl_name (fn));
37207
37208 node = cgraph_node::get (fn);
37209 gcc_assert (node != NULL);
37210
37211 node_v = node->function_version ();
37212 gcc_assert (node_v != NULL);
37213
37214 if (node_v->dispatcher_resolver != NULL)
37215 return node_v->dispatcher_resolver;
37216
37217 /* Find the default version and make it the first node. */
37218 first_v = node_v;
37219 /* Go to the beginning of the chain. */
37220 while (first_v->prev != NULL)
37221 first_v = first_v->prev;
37222
37223 default_version_info = first_v;
37224 while (default_version_info != NULL)
37225 {
37226 const tree decl2 = default_version_info->this_node->decl;
37227 if (is_function_default_version (decl2))
37228 break;
37229 default_version_info = default_version_info->next;
37230 }
37231
37232 /* If there is no default node, just return NULL. */
37233 if (default_version_info == NULL)
37234 return NULL;
37235
37236 /* Make default info the first node. */
37237 if (first_v != default_version_info)
37238 {
37239 default_version_info->prev->next = default_version_info->next;
37240 if (default_version_info->next)
37241 default_version_info->next->prev = default_version_info->prev;
37242 first_v->prev = default_version_info;
37243 default_version_info->next = first_v;
37244 default_version_info->prev = NULL;
37245 }
37246
37247 default_node = default_version_info->this_node;
37248
37249 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37250 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37251 "target_clones attribute needs GLIBC (2.23 and newer) that "
37252 "exports hardware capability bits");
37253 #else
37254
37255 if (targetm.has_ifunc_p ())
37256 {
37257 struct cgraph_function_version_info *it_v = NULL;
37258 struct cgraph_node *dispatcher_node = NULL;
37259 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37260
37261 /* Right now, the dispatching is done via ifunc. */
37262 dispatch_decl = make_dispatcher_decl (default_node->decl);
37263
37264 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37265 gcc_assert (dispatcher_node != NULL);
37266 dispatcher_node->dispatcher_function = 1;
37267 dispatcher_version_info
37268 = dispatcher_node->insert_new_function_version ();
37269 dispatcher_version_info->next = default_version_info;
37270 dispatcher_node->definition = 1;
37271
37272 /* Set the dispatcher for all the versions. */
37273 it_v = default_version_info;
37274 while (it_v != NULL)
37275 {
37276 it_v->dispatcher_resolver = dispatch_decl;
37277 it_v = it_v->next;
37278 }
37279 }
37280 else
37281 {
37282 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37283 "multiversioning needs ifunc which is not supported "
37284 "on this target");
37285 }
37286 #endif
37287
37288 return dispatch_decl;
37289 }
37290
37291 /* Make the resolver function decl to dispatch the versions of a multi-
37292 versioned function, DEFAULT_DECL. Create an empty basic block in the
37293 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37294 function. */
37295
37296 static tree
37297 make_resolver_func (const tree default_decl,
37298 const tree dispatch_decl,
37299 basic_block *empty_bb)
37300 {
37301 /* Make the resolver function static. The resolver function returns
37302 void *. */
37303 tree decl_name = clone_function_name (default_decl, "resolver");
37304 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37305 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37306 tree decl = build_fn_decl (resolver_name, type);
37307 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37308
37309 DECL_NAME (decl) = decl_name;
37310 TREE_USED (decl) = 1;
37311 DECL_ARTIFICIAL (decl) = 1;
37312 DECL_IGNORED_P (decl) = 0;
37313 TREE_PUBLIC (decl) = 0;
37314 DECL_UNINLINABLE (decl) = 1;
37315
37316 /* Resolver is not external, body is generated. */
37317 DECL_EXTERNAL (decl) = 0;
37318 DECL_EXTERNAL (dispatch_decl) = 0;
37319
37320 DECL_CONTEXT (decl) = NULL_TREE;
37321 DECL_INITIAL (decl) = make_node (BLOCK);
37322 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37323
37324 /* Build result decl and add to function_decl. */
37325 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37326 DECL_ARTIFICIAL (t) = 1;
37327 DECL_IGNORED_P (t) = 1;
37328 DECL_RESULT (decl) = t;
37329
37330 gimplify_function_tree (decl);
37331 push_cfun (DECL_STRUCT_FUNCTION (decl));
37332 *empty_bb = init_lowered_empty_function (decl, false,
37333 profile_count::uninitialized ());
37334
37335 cgraph_node::add_new_function (decl, true);
37336 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37337
37338 pop_cfun ();
37339
37340 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37341 DECL_ATTRIBUTES (dispatch_decl)
37342 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37343
37344 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37345
37346 return decl;
37347 }
37348
37349 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37350 return a pointer to VERSION_DECL if we are running on a machine that
37351 supports the index CLONE_ISA hardware architecture bits. This function will
37352 be called during version dispatch to decide which function version to
37353 execute. It returns the basic block at the end, to which more conditions
37354 can be added. */
37355
37356 static basic_block
37357 add_condition_to_bb (tree function_decl, tree version_decl,
37358 int clone_isa, basic_block new_bb)
37359 {
37360 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37361
37362 gcc_assert (new_bb != NULL);
37363 gimple_seq gseq = bb_seq (new_bb);
37364
37365
37366 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37367 build_fold_addr_expr (version_decl));
37368 tree result_var = create_tmp_var (ptr_type_node);
37369 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37370 gimple *return_stmt = gimple_build_return (result_var);
37371
37372 if (clone_isa == CLONE_DEFAULT)
37373 {
37374 gimple_seq_add_stmt (&gseq, convert_stmt);
37375 gimple_seq_add_stmt (&gseq, return_stmt);
37376 set_bb_seq (new_bb, gseq);
37377 gimple_set_bb (convert_stmt, new_bb);
37378 gimple_set_bb (return_stmt, new_bb);
37379 pop_cfun ();
37380 return new_bb;
37381 }
37382
37383 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37384 tree cond_var = create_tmp_var (bool_int_type_node);
37385 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37386 const char *arg_str = rs6000_clone_map[clone_isa].name;
37387 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37388 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37389 gimple_call_set_lhs (call_cond_stmt, cond_var);
37390
37391 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37392 gimple_set_bb (call_cond_stmt, new_bb);
37393 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37394
37395 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37396 NULL_TREE, NULL_TREE);
37397 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37398 gimple_set_bb (if_else_stmt, new_bb);
37399 gimple_seq_add_stmt (&gseq, if_else_stmt);
37400
37401 gimple_seq_add_stmt (&gseq, convert_stmt);
37402 gimple_seq_add_stmt (&gseq, return_stmt);
37403 set_bb_seq (new_bb, gseq);
37404
37405 basic_block bb1 = new_bb;
37406 edge e12 = split_block (bb1, if_else_stmt);
37407 basic_block bb2 = e12->dest;
37408 e12->flags &= ~EDGE_FALLTHRU;
37409 e12->flags |= EDGE_TRUE_VALUE;
37410
37411 edge e23 = split_block (bb2, return_stmt);
37412 gimple_set_bb (convert_stmt, bb2);
37413 gimple_set_bb (return_stmt, bb2);
37414
37415 basic_block bb3 = e23->dest;
37416 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37417
37418 remove_edge (e23);
37419 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37420
37421 pop_cfun ();
37422 return bb3;
37423 }
37424
37425 /* This function generates the dispatch function for multi-versioned functions.
37426 DISPATCH_DECL is the function which will contain the dispatch logic.
37427 FNDECLS are the function choices for dispatch, and is a tree chain.
37428 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37429 code is generated. */
37430
37431 static int
37432 dispatch_function_versions (tree dispatch_decl,
37433 void *fndecls_p,
37434 basic_block *empty_bb)
37435 {
37436 int ix;
37437 tree ele;
37438 vec<tree> *fndecls;
37439 tree clones[CLONE_MAX];
37440
37441 if (TARGET_DEBUG_TARGET)
37442 fputs ("dispatch_function_versions, top\n", stderr);
37443
37444 gcc_assert (dispatch_decl != NULL
37445 && fndecls_p != NULL
37446 && empty_bb != NULL);
37447
37448 /* fndecls_p is actually a vector. */
37449 fndecls = static_cast<vec<tree> *> (fndecls_p);
37450
37451 /* At least one more version other than the default. */
37452 gcc_assert (fndecls->length () >= 2);
37453
37454 /* The first version in the vector is the default decl. */
37455 memset ((void *) clones, '\0', sizeof (clones));
37456 clones[CLONE_DEFAULT] = (*fndecls)[0];
37457
37458 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37459 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37460 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37461 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37462 to insert the code here to do the call. */
37463
37464 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37465 {
37466 int priority = rs6000_clone_priority (ele);
37467 if (!clones[priority])
37468 clones[priority] = ele;
37469 }
37470
37471 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37472 if (clones[ix])
37473 {
37474 if (TARGET_DEBUG_TARGET)
37475 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37476 ix, get_decl_name (clones[ix]));
37477
37478 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37479 *empty_bb);
37480 }
37481
37482 return 0;
37483 }
37484
37485 /* Generate the dispatching code body to dispatch multi-versioned function
37486 DECL. The target hook is called to process the "target" attributes and
37487 provide the code to dispatch the right function at run-time. NODE points
37488 to the dispatcher decl whose body will be created. */
37489
37490 static tree
37491 rs6000_generate_version_dispatcher_body (void *node_p)
37492 {
37493 tree resolver;
37494 basic_block empty_bb;
37495 struct cgraph_node *node = (cgraph_node *) node_p;
37496 struct cgraph_function_version_info *ninfo = node->function_version ();
37497
37498 if (ninfo->dispatcher_resolver)
37499 return ninfo->dispatcher_resolver;
37500
37501 /* node is going to be an alias, so remove the finalized bit. */
37502 node->definition = false;
37503
37504 /* The first version in the chain corresponds to the default version. */
37505 ninfo->dispatcher_resolver = resolver
37506 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37507
37508 if (TARGET_DEBUG_TARGET)
37509 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37510 get_decl_name (resolver));
37511
37512 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37513 auto_vec<tree, 2> fn_ver_vec;
37514
37515 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37516 vinfo;
37517 vinfo = vinfo->next)
37518 {
37519 struct cgraph_node *version = vinfo->this_node;
37520 /* Check for virtual functions here again, as by this time it should
37521 have been determined if this function needs a vtable index or
37522 not. This happens for methods in derived classes that override
37523 virtual methods in base classes but are not explicitly marked as
37524 virtual. */
37525 if (DECL_VINDEX (version->decl))
37526 sorry ("Virtual function multiversioning not supported");
37527
37528 fn_ver_vec.safe_push (version->decl);
37529 }
37530
37531 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37532 cgraph_edge::rebuild_edges ();
37533 pop_cfun ();
37534 return resolver;
37535 }
37536
37537 \f
37538 /* Hook to determine if one function can safely inline another. */
37539
37540 static bool
37541 rs6000_can_inline_p (tree caller, tree callee)
37542 {
37543 bool ret = false;
37544 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37545 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37546
37547 /* If callee has no option attributes, then it is ok to inline. */
37548 if (!callee_tree)
37549 ret = true;
37550
37551 /* If caller has no option attributes, but callee does then it is not ok to
37552 inline. */
37553 else if (!caller_tree)
37554 ret = false;
37555
37556 else
37557 {
37558 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37559 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37560
37561 /* Callee's options should a subset of the caller's, i.e. a vsx function
37562 can inline an altivec function but a non-vsx function can't inline a
37563 vsx function. */
37564 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37565 == callee_opts->x_rs6000_isa_flags)
37566 ret = true;
37567 }
37568
37569 if (TARGET_DEBUG_TARGET)
37570 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37571 get_decl_name (caller), get_decl_name (callee),
37572 (ret ? "can" : "cannot"));
37573
37574 return ret;
37575 }
37576 \f
37577 /* Allocate a stack temp and fixup the address so it meets the particular
37578 memory requirements (either offetable or REG+REG addressing). */
37579
37580 rtx
37581 rs6000_allocate_stack_temp (machine_mode mode,
37582 bool offsettable_p,
37583 bool reg_reg_p)
37584 {
37585 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37586 rtx addr = XEXP (stack, 0);
37587 int strict_p = reload_completed;
37588
37589 if (!legitimate_indirect_address_p (addr, strict_p))
37590 {
37591 if (offsettable_p
37592 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37593 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37594
37595 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37596 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37597 }
37598
37599 return stack;
37600 }
37601
37602 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37603 convert to such a form to deal with memory reference instructions
37604 like STFIWX and LDBRX that only take reg+reg addressing. */
37605
37606 rtx
37607 rs6000_force_indexed_or_indirect_mem (rtx x)
37608 {
37609 machine_mode mode = GET_MODE (x);
37610
37611 gcc_assert (MEM_P (x));
37612 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37613 {
37614 rtx addr = XEXP (x, 0);
37615 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37616 {
37617 rtx reg = XEXP (addr, 0);
37618 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37619 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37620 gcc_assert (REG_P (reg));
37621 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37622 addr = reg;
37623 }
37624 else if (GET_CODE (addr) == PRE_MODIFY)
37625 {
37626 rtx reg = XEXP (addr, 0);
37627 rtx expr = XEXP (addr, 1);
37628 gcc_assert (REG_P (reg));
37629 gcc_assert (GET_CODE (expr) == PLUS);
37630 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37631 addr = reg;
37632 }
37633
37634 x = replace_equiv_address (x, force_reg (Pmode, addr));
37635 }
37636
37637 return x;
37638 }
37639
37640 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37641
37642 On the RS/6000, all integer constants are acceptable, most won't be valid
37643 for particular insns, though. Only easy FP constants are acceptable. */
37644
37645 static bool
37646 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37647 {
37648 if (TARGET_ELF && tls_referenced_p (x))
37649 return false;
37650
37651 if (CONST_DOUBLE_P (x))
37652 return easy_fp_constant (x, mode);
37653
37654 if (GET_CODE (x) == CONST_VECTOR)
37655 return easy_vector_constant (x, mode);
37656
37657 return true;
37658 }
37659
37660 \f
37661 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37662
37663 static bool
37664 chain_already_loaded (rtx_insn *last)
37665 {
37666 for (; last != NULL; last = PREV_INSN (last))
37667 {
37668 if (NONJUMP_INSN_P (last))
37669 {
37670 rtx patt = PATTERN (last);
37671
37672 if (GET_CODE (patt) == SET)
37673 {
37674 rtx lhs = XEXP (patt, 0);
37675
37676 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37677 return true;
37678 }
37679 }
37680 }
37681 return false;
37682 }
37683
37684 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37685
37686 void
37687 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37688 {
37689 rtx func = func_desc;
37690 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37691 rtx toc_load = NULL_RTX;
37692 rtx toc_restore = NULL_RTX;
37693 rtx func_addr;
37694 rtx abi_reg = NULL_RTX;
37695 rtx call[4];
37696 int n_call;
37697 rtx insn;
37698
37699 if (global_tlsarg)
37700 tlsarg = global_tlsarg;
37701
37702 /* Handle longcall attributes. */
37703 if ((INTVAL (cookie) & CALL_LONG) != 0
37704 && GET_CODE (func_desc) == SYMBOL_REF)
37705 func = rs6000_longcall_ref (func_desc, tlsarg);
37706
37707 /* Handle indirect calls. */
37708 if (GET_CODE (func) != SYMBOL_REF
37709 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37710 {
37711 /* Save the TOC into its reserved slot before the call,
37712 and prepare to restore it after the call. */
37713 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37714 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37715 gen_rtvec (1, stack_toc_offset),
37716 UNSPEC_TOCSLOT);
37717 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37718
37719 /* Can we optimize saving the TOC in the prologue or
37720 do we need to do it at every call? */
37721 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37722 cfun->machine->save_toc_in_prologue = true;
37723 else
37724 {
37725 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37726 rtx stack_toc_mem = gen_frame_mem (Pmode,
37727 gen_rtx_PLUS (Pmode, stack_ptr,
37728 stack_toc_offset));
37729 MEM_VOLATILE_P (stack_toc_mem) = 1;
37730 if (HAVE_AS_PLTSEQ
37731 && TARGET_TLS_MARKERS
37732 && DEFAULT_ABI == ABI_ELFv2
37733 && GET_CODE (func_desc) == SYMBOL_REF)
37734 {
37735 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37736 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37737 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37738 }
37739 else
37740 emit_move_insn (stack_toc_mem, toc_reg);
37741 }
37742
37743 if (DEFAULT_ABI == ABI_ELFv2)
37744 {
37745 /* A function pointer in the ELFv2 ABI is just a plain address, but
37746 the ABI requires it to be loaded into r12 before the call. */
37747 func_addr = gen_rtx_REG (Pmode, 12);
37748 if (!rtx_equal_p (func_addr, func))
37749 emit_move_insn (func_addr, func);
37750 abi_reg = func_addr;
37751 /* Indirect calls via CTR are strongly preferred over indirect
37752 calls via LR, so move the address there. Needed to mark
37753 this insn for linker plt sequence editing too. */
37754 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37755 if (HAVE_AS_PLTSEQ
37756 && TARGET_TLS_MARKERS
37757 && GET_CODE (func_desc) == SYMBOL_REF)
37758 {
37759 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37760 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37761 emit_insn (gen_rtx_SET (func_addr, mark_func));
37762 v = gen_rtvec (2, func_addr, func_desc);
37763 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37764 }
37765 else
37766 emit_move_insn (func_addr, abi_reg);
37767 }
37768 else
37769 {
37770 /* A function pointer under AIX is a pointer to a data area whose
37771 first word contains the actual address of the function, whose
37772 second word contains a pointer to its TOC, and whose third word
37773 contains a value to place in the static chain register (r11).
37774 Note that if we load the static chain, our "trampoline" need
37775 not have any executable code. */
37776
37777 /* Load up address of the actual function. */
37778 func = force_reg (Pmode, func);
37779 func_addr = gen_reg_rtx (Pmode);
37780 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37781
37782 /* Indirect calls via CTR are strongly preferred over indirect
37783 calls via LR, so move the address there. */
37784 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37785 emit_move_insn (ctr_reg, func_addr);
37786 func_addr = ctr_reg;
37787
37788 /* Prepare to load the TOC of the called function. Note that the
37789 TOC load must happen immediately before the actual call so
37790 that unwinding the TOC registers works correctly. See the
37791 comment in frob_update_context. */
37792 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37793 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37794 gen_rtx_PLUS (Pmode, func,
37795 func_toc_offset));
37796 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37797
37798 /* If we have a static chain, load it up. But, if the call was
37799 originally direct, the 3rd word has not been written since no
37800 trampoline has been built, so we ought not to load it, lest we
37801 override a static chain value. */
37802 if (!(GET_CODE (func_desc) == SYMBOL_REF
37803 && SYMBOL_REF_FUNCTION_P (func_desc))
37804 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37805 && !chain_already_loaded (get_current_sequence ()->next->last))
37806 {
37807 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37808 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37809 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37810 gen_rtx_PLUS (Pmode, func,
37811 func_sc_offset));
37812 emit_move_insn (sc_reg, func_sc_mem);
37813 abi_reg = sc_reg;
37814 }
37815 }
37816 }
37817 else
37818 {
37819 /* Direct calls use the TOC: for local calls, the callee will
37820 assume the TOC register is set; for non-local calls, the
37821 PLT stub needs the TOC register. */
37822 abi_reg = toc_reg;
37823 func_addr = func;
37824 }
37825
37826 /* Create the call. */
37827 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37828 if (value != NULL_RTX)
37829 call[0] = gen_rtx_SET (value, call[0]);
37830 n_call = 1;
37831
37832 if (toc_load)
37833 call[n_call++] = toc_load;
37834 if (toc_restore)
37835 call[n_call++] = toc_restore;
37836
37837 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37838
37839 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37840 insn = emit_call_insn (insn);
37841
37842 /* Mention all registers defined by the ABI to hold information
37843 as uses in CALL_INSN_FUNCTION_USAGE. */
37844 if (abi_reg)
37845 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37846 }
37847
37848 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37849
37850 void
37851 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37852 {
37853 rtx call[2];
37854 rtx insn;
37855
37856 gcc_assert (INTVAL (cookie) == 0);
37857
37858 if (global_tlsarg)
37859 tlsarg = global_tlsarg;
37860
37861 /* Create the call. */
37862 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
37863 if (value != NULL_RTX)
37864 call[0] = gen_rtx_SET (value, call[0]);
37865
37866 call[1] = simple_return_rtx;
37867
37868 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37869 insn = emit_call_insn (insn);
37870
37871 /* Note use of the TOC register. */
37872 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37873 }
37874
37875 /* Expand code to perform a call under the SYSV4 ABI. */
37876
37877 void
37878 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37879 {
37880 rtx func = func_desc;
37881 rtx func_addr;
37882 rtx call[3];
37883 rtx insn;
37884 rtx abi_reg = NULL_RTX;
37885
37886 if (global_tlsarg)
37887 tlsarg = global_tlsarg;
37888
37889 /* Handle longcall attributes. */
37890 if ((INTVAL (cookie) & CALL_LONG) != 0
37891 && GET_CODE (func_desc) == SYMBOL_REF)
37892 {
37893 func = rs6000_longcall_ref (func_desc, tlsarg);
37894 /* If the longcall was implemented using PLT16 relocs, then r11
37895 needs to be valid at the call for lazy linking. */
37896 if (HAVE_AS_PLTSEQ
37897 && TARGET_TLS_MARKERS)
37898 abi_reg = func;
37899 }
37900
37901 /* Handle indirect calls. */
37902 if (GET_CODE (func) != SYMBOL_REF)
37903 {
37904 func = force_reg (Pmode, func);
37905
37906 /* Indirect calls via CTR are strongly preferred over indirect
37907 calls via LR, so move the address there. Needed to mark
37908 this insn for linker plt sequence editing too. */
37909 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37910 if (HAVE_AS_PLTSEQ
37911 && TARGET_TLS_MARKERS
37912 && GET_CODE (func_desc) == SYMBOL_REF)
37913 {
37914 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37915 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37916 emit_insn (gen_rtx_SET (func_addr, mark_func));
37917 v = gen_rtvec (2, func_addr, func_desc);
37918 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37919 }
37920 else
37921 emit_move_insn (func_addr, func);
37922 }
37923 else
37924 func_addr = func;
37925
37926 /* Create the call. */
37927 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37928 if (value != NULL_RTX)
37929 call[0] = gen_rtx_SET (value, call[0]);
37930
37931 unsigned int mask = CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS;
37932 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (INTVAL (cookie) & mask));
37933
37934 call[2] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37935
37936 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
37937 insn = emit_call_insn (insn);
37938 if (abi_reg)
37939 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37940 }
37941
37942 /* Expand code to perform a sibling call under the SysV4 ABI. */
37943
37944 void
37945 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37946 {
37947 rtx func = func_desc;
37948 rtx func_addr;
37949 rtx call[3];
37950 rtx insn;
37951 rtx abi_reg = NULL_RTX;
37952
37953 if (global_tlsarg)
37954 tlsarg = global_tlsarg;
37955
37956 /* Handle longcall attributes. */
37957 if ((INTVAL (cookie) & CALL_LONG) != 0
37958 && GET_CODE (func_desc) == SYMBOL_REF)
37959 {
37960 func = rs6000_longcall_ref (func_desc, tlsarg);
37961 /* If the longcall was implemented using PLT16 relocs, then r11
37962 needs to be valid at the call for lazy linking. */
37963 if (HAVE_AS_PLTSEQ
37964 && TARGET_TLS_MARKERS)
37965 abi_reg = func;
37966 }
37967
37968 /* Handle indirect calls. */
37969 if (GET_CODE (func) != SYMBOL_REF)
37970 {
37971 func = force_reg (Pmode, func);
37972
37973 /* Indirect sibcalls must go via CTR. Needed to mark
37974 this insn for linker plt sequence editing too. */
37975 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37976 if (HAVE_AS_PLTSEQ
37977 && TARGET_TLS_MARKERS
37978 && GET_CODE (func_desc) == SYMBOL_REF)
37979 {
37980 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37981 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37982 emit_insn (gen_rtx_SET (func_addr, mark_func));
37983 v = gen_rtvec (2, func_addr, func_desc);
37984 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37985 }
37986 else
37987 emit_move_insn (func_addr, func);
37988 }
37989 else
37990 func_addr = func;
37991
37992 /* Create the call. */
37993 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37994 if (value != NULL_RTX)
37995 call[0] = gen_rtx_SET (value, call[0]);
37996
37997 unsigned int mask = CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS;
37998 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (INTVAL (cookie) & mask));
37999 call[2] = simple_return_rtx;
38000
38001 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38002 insn = emit_call_insn (insn);
38003 if (abi_reg)
38004 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38005 }
38006
38007 /* Return whether we need to always update the saved TOC pointer when we update
38008 the stack pointer. */
38009
38010 static bool
38011 rs6000_save_toc_in_prologue_p (void)
38012 {
38013 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38014 }
38015
38016 #ifdef HAVE_GAS_HIDDEN
38017 # define USE_HIDDEN_LINKONCE 1
38018 #else
38019 # define USE_HIDDEN_LINKONCE 0
38020 #endif
38021
38022 /* Fills in the label name that should be used for a 476 link stack thunk. */
38023
38024 void
38025 get_ppc476_thunk_name (char name[32])
38026 {
38027 gcc_assert (TARGET_LINK_STACK);
38028
38029 if (USE_HIDDEN_LINKONCE)
38030 sprintf (name, "__ppc476.get_thunk");
38031 else
38032 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38033 }
38034
38035 /* This function emits the simple thunk routine that is used to preserve
38036 the link stack on the 476 cpu. */
38037
38038 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38039 static void
38040 rs6000_code_end (void)
38041 {
38042 char name[32];
38043 tree decl;
38044
38045 if (!TARGET_LINK_STACK)
38046 return;
38047
38048 get_ppc476_thunk_name (name);
38049
38050 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38051 build_function_type_list (void_type_node, NULL_TREE));
38052 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38053 NULL_TREE, void_type_node);
38054 TREE_PUBLIC (decl) = 1;
38055 TREE_STATIC (decl) = 1;
38056
38057 #if RS6000_WEAK
38058 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38059 {
38060 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38061 targetm.asm_out.unique_section (decl, 0);
38062 switch_to_section (get_named_section (decl, NULL, 0));
38063 DECL_WEAK (decl) = 1;
38064 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38065 targetm.asm_out.globalize_label (asm_out_file, name);
38066 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38067 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38068 }
38069 else
38070 #endif
38071 {
38072 switch_to_section (text_section);
38073 ASM_OUTPUT_LABEL (asm_out_file, name);
38074 }
38075
38076 DECL_INITIAL (decl) = make_node (BLOCK);
38077 current_function_decl = decl;
38078 allocate_struct_function (decl, false);
38079 init_function_start (decl);
38080 first_function_block_is_cold = false;
38081 /* Make sure unwind info is emitted for the thunk if needed. */
38082 final_start_function (emit_barrier (), asm_out_file, 1);
38083
38084 fputs ("\tblr\n", asm_out_file);
38085
38086 final_end_function ();
38087 init_insn_lengths ();
38088 free_after_compilation (cfun);
38089 set_cfun (NULL);
38090 current_function_decl = NULL;
38091 }
38092
38093 /* Add r30 to hard reg set if the prologue sets it up and it is not
38094 pic_offset_table_rtx. */
38095
38096 static void
38097 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38098 {
38099 if (!TARGET_SINGLE_PIC_BASE
38100 && TARGET_TOC
38101 && TARGET_MINIMAL_TOC
38102 && !constant_pool_empty_p ())
38103 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38104 if (cfun->machine->split_stack_argp_used)
38105 add_to_hard_reg_set (&set->set, Pmode, 12);
38106
38107 /* Make sure the hard reg set doesn't include r2, which was possibly added
38108 via PIC_OFFSET_TABLE_REGNUM. */
38109 if (TARGET_TOC)
38110 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38111 }
38112
38113 \f
38114 /* Helper function for rs6000_split_logical to emit a logical instruction after
38115 spliting the operation to single GPR registers.
38116
38117 DEST is the destination register.
38118 OP1 and OP2 are the input source registers.
38119 CODE is the base operation (AND, IOR, XOR, NOT).
38120 MODE is the machine mode.
38121 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38122 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38123 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38124
38125 static void
38126 rs6000_split_logical_inner (rtx dest,
38127 rtx op1,
38128 rtx op2,
38129 enum rtx_code code,
38130 machine_mode mode,
38131 bool complement_final_p,
38132 bool complement_op1_p,
38133 bool complement_op2_p)
38134 {
38135 rtx bool_rtx;
38136
38137 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38138 if (op2 && GET_CODE (op2) == CONST_INT
38139 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38140 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38141 {
38142 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38143 HOST_WIDE_INT value = INTVAL (op2) & mask;
38144
38145 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38146 if (code == AND)
38147 {
38148 if (value == 0)
38149 {
38150 emit_insn (gen_rtx_SET (dest, const0_rtx));
38151 return;
38152 }
38153
38154 else if (value == mask)
38155 {
38156 if (!rtx_equal_p (dest, op1))
38157 emit_insn (gen_rtx_SET (dest, op1));
38158 return;
38159 }
38160 }
38161
38162 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38163 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38164 else if (code == IOR || code == XOR)
38165 {
38166 if (value == 0)
38167 {
38168 if (!rtx_equal_p (dest, op1))
38169 emit_insn (gen_rtx_SET (dest, op1));
38170 return;
38171 }
38172 }
38173 }
38174
38175 if (code == AND && mode == SImode
38176 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38177 {
38178 emit_insn (gen_andsi3 (dest, op1, op2));
38179 return;
38180 }
38181
38182 if (complement_op1_p)
38183 op1 = gen_rtx_NOT (mode, op1);
38184
38185 if (complement_op2_p)
38186 op2 = gen_rtx_NOT (mode, op2);
38187
38188 /* For canonical RTL, if only one arm is inverted it is the first. */
38189 if (!complement_op1_p && complement_op2_p)
38190 std::swap (op1, op2);
38191
38192 bool_rtx = ((code == NOT)
38193 ? gen_rtx_NOT (mode, op1)
38194 : gen_rtx_fmt_ee (code, mode, op1, op2));
38195
38196 if (complement_final_p)
38197 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38198
38199 emit_insn (gen_rtx_SET (dest, bool_rtx));
38200 }
38201
38202 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38203 operations are split immediately during RTL generation to allow for more
38204 optimizations of the AND/IOR/XOR.
38205
38206 OPERANDS is an array containing the destination and two input operands.
38207 CODE is the base operation (AND, IOR, XOR, NOT).
38208 MODE is the machine mode.
38209 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38210 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38211 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38212 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38213 formation of the AND instructions. */
38214
38215 static void
38216 rs6000_split_logical_di (rtx operands[3],
38217 enum rtx_code code,
38218 bool complement_final_p,
38219 bool complement_op1_p,
38220 bool complement_op2_p)
38221 {
38222 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38223 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38224 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38225 enum hi_lo { hi = 0, lo = 1 };
38226 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38227 size_t i;
38228
38229 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38230 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38231 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38232 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38233
38234 if (code == NOT)
38235 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38236 else
38237 {
38238 if (GET_CODE (operands[2]) != CONST_INT)
38239 {
38240 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38241 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38242 }
38243 else
38244 {
38245 HOST_WIDE_INT value = INTVAL (operands[2]);
38246 HOST_WIDE_INT value_hi_lo[2];
38247
38248 gcc_assert (!complement_final_p);
38249 gcc_assert (!complement_op1_p);
38250 gcc_assert (!complement_op2_p);
38251
38252 value_hi_lo[hi] = value >> 32;
38253 value_hi_lo[lo] = value & lower_32bits;
38254
38255 for (i = 0; i < 2; i++)
38256 {
38257 HOST_WIDE_INT sub_value = value_hi_lo[i];
38258
38259 if (sub_value & sign_bit)
38260 sub_value |= upper_32bits;
38261
38262 op2_hi_lo[i] = GEN_INT (sub_value);
38263
38264 /* If this is an AND instruction, check to see if we need to load
38265 the value in a register. */
38266 if (code == AND && sub_value != -1 && sub_value != 0
38267 && !and_operand (op2_hi_lo[i], SImode))
38268 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38269 }
38270 }
38271 }
38272
38273 for (i = 0; i < 2; i++)
38274 {
38275 /* Split large IOR/XOR operations. */
38276 if ((code == IOR || code == XOR)
38277 && GET_CODE (op2_hi_lo[i]) == CONST_INT
38278 && !complement_final_p
38279 && !complement_op1_p
38280 && !complement_op2_p
38281 && !logical_const_operand (op2_hi_lo[i], SImode))
38282 {
38283 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38284 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38285 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38286 rtx tmp = gen_reg_rtx (SImode);
38287
38288 /* Make sure the constant is sign extended. */
38289 if ((hi_16bits & sign_bit) != 0)
38290 hi_16bits |= upper_32bits;
38291
38292 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38293 code, SImode, false, false, false);
38294
38295 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38296 code, SImode, false, false, false);
38297 }
38298 else
38299 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38300 code, SImode, complement_final_p,
38301 complement_op1_p, complement_op2_p);
38302 }
38303
38304 return;
38305 }
38306
38307 /* Split the insns that make up boolean operations operating on multiple GPR
38308 registers. The boolean MD patterns ensure that the inputs either are
38309 exactly the same as the output registers, or there is no overlap.
38310
38311 OPERANDS is an array containing the destination and two input operands.
38312 CODE is the base operation (AND, IOR, XOR, NOT).
38313 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38314 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38315 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38316
38317 void
38318 rs6000_split_logical (rtx operands[3],
38319 enum rtx_code code,
38320 bool complement_final_p,
38321 bool complement_op1_p,
38322 bool complement_op2_p)
38323 {
38324 machine_mode mode = GET_MODE (operands[0]);
38325 machine_mode sub_mode;
38326 rtx op0, op1, op2;
38327 int sub_size, regno0, regno1, nregs, i;
38328
38329 /* If this is DImode, use the specialized version that can run before
38330 register allocation. */
38331 if (mode == DImode && !TARGET_POWERPC64)
38332 {
38333 rs6000_split_logical_di (operands, code, complement_final_p,
38334 complement_op1_p, complement_op2_p);
38335 return;
38336 }
38337
38338 op0 = operands[0];
38339 op1 = operands[1];
38340 op2 = (code == NOT) ? NULL_RTX : operands[2];
38341 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38342 sub_size = GET_MODE_SIZE (sub_mode);
38343 regno0 = REGNO (op0);
38344 regno1 = REGNO (op1);
38345
38346 gcc_assert (reload_completed);
38347 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38348 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38349
38350 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38351 gcc_assert (nregs > 1);
38352
38353 if (op2 && REG_P (op2))
38354 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38355
38356 for (i = 0; i < nregs; i++)
38357 {
38358 int offset = i * sub_size;
38359 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38360 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38361 rtx sub_op2 = ((code == NOT)
38362 ? NULL_RTX
38363 : simplify_subreg (sub_mode, op2, mode, offset));
38364
38365 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38366 complement_final_p, complement_op1_p,
38367 complement_op2_p);
38368 }
38369
38370 return;
38371 }
38372
38373 \f
38374 /* Return true if the peephole2 can combine a load involving a combination of
38375 an addis instruction and a load with an offset that can be fused together on
38376 a power8. */
38377
38378 bool
38379 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38380 rtx addis_value, /* addis value. */
38381 rtx target, /* target register that is loaded. */
38382 rtx mem) /* bottom part of the memory addr. */
38383 {
38384 rtx addr;
38385 rtx base_reg;
38386
38387 /* Validate arguments. */
38388 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38389 return false;
38390
38391 if (!base_reg_operand (target, GET_MODE (target)))
38392 return false;
38393
38394 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38395 return false;
38396
38397 /* Allow sign/zero extension. */
38398 if (GET_CODE (mem) == ZERO_EXTEND
38399 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38400 mem = XEXP (mem, 0);
38401
38402 if (!MEM_P (mem))
38403 return false;
38404
38405 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38406 return false;
38407
38408 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38409 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38410 return false;
38411
38412 /* Validate that the register used to load the high value is either the
38413 register being loaded, or we can safely replace its use.
38414
38415 This function is only called from the peephole2 pass and we assume that
38416 there are 2 instructions in the peephole (addis and load), so we want to
38417 check if the target register was not used in the memory address and the
38418 register to hold the addis result is dead after the peephole. */
38419 if (REGNO (addis_reg) != REGNO (target))
38420 {
38421 if (reg_mentioned_p (target, mem))
38422 return false;
38423
38424 if (!peep2_reg_dead_p (2, addis_reg))
38425 return false;
38426
38427 /* If the target register being loaded is the stack pointer, we must
38428 avoid loading any other value into it, even temporarily. */
38429 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38430 return false;
38431 }
38432
38433 base_reg = XEXP (addr, 0);
38434 return REGNO (addis_reg) == REGNO (base_reg);
38435 }
38436
38437 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38438 sequence. We adjust the addis register to use the target register. If the
38439 load sign extends, we adjust the code to do the zero extending load, and an
38440 explicit sign extension later since the fusion only covers zero extending
38441 loads.
38442
38443 The operands are:
38444 operands[0] register set with addis (to be replaced with target)
38445 operands[1] value set via addis
38446 operands[2] target register being loaded
38447 operands[3] D-form memory reference using operands[0]. */
38448
38449 void
38450 expand_fusion_gpr_load (rtx *operands)
38451 {
38452 rtx addis_value = operands[1];
38453 rtx target = operands[2];
38454 rtx orig_mem = operands[3];
38455 rtx new_addr, new_mem, orig_addr, offset;
38456 enum rtx_code plus_or_lo_sum;
38457 machine_mode target_mode = GET_MODE (target);
38458 machine_mode extend_mode = target_mode;
38459 machine_mode ptr_mode = Pmode;
38460 enum rtx_code extend = UNKNOWN;
38461
38462 if (GET_CODE (orig_mem) == ZERO_EXTEND
38463 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38464 {
38465 extend = GET_CODE (orig_mem);
38466 orig_mem = XEXP (orig_mem, 0);
38467 target_mode = GET_MODE (orig_mem);
38468 }
38469
38470 gcc_assert (MEM_P (orig_mem));
38471
38472 orig_addr = XEXP (orig_mem, 0);
38473 plus_or_lo_sum = GET_CODE (orig_addr);
38474 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38475
38476 offset = XEXP (orig_addr, 1);
38477 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38478 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38479
38480 if (extend != UNKNOWN)
38481 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38482
38483 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38484 UNSPEC_FUSION_GPR);
38485 emit_insn (gen_rtx_SET (target, new_mem));
38486
38487 if (extend == SIGN_EXTEND)
38488 {
38489 int sub_off = ((BYTES_BIG_ENDIAN)
38490 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38491 : 0);
38492 rtx sign_reg
38493 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38494
38495 emit_insn (gen_rtx_SET (target,
38496 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38497 }
38498
38499 return;
38500 }
38501
38502 /* Emit the addis instruction that will be part of a fused instruction
38503 sequence. */
38504
38505 void
38506 emit_fusion_addis (rtx target, rtx addis_value)
38507 {
38508 rtx fuse_ops[10];
38509 const char *addis_str = NULL;
38510
38511 /* Emit the addis instruction. */
38512 fuse_ops[0] = target;
38513 if (satisfies_constraint_L (addis_value))
38514 {
38515 fuse_ops[1] = addis_value;
38516 addis_str = "lis %0,%v1";
38517 }
38518
38519 else if (GET_CODE (addis_value) == PLUS)
38520 {
38521 rtx op0 = XEXP (addis_value, 0);
38522 rtx op1 = XEXP (addis_value, 1);
38523
38524 if (REG_P (op0) && CONST_INT_P (op1)
38525 && satisfies_constraint_L (op1))
38526 {
38527 fuse_ops[1] = op0;
38528 fuse_ops[2] = op1;
38529 addis_str = "addis %0,%1,%v2";
38530 }
38531 }
38532
38533 else if (GET_CODE (addis_value) == HIGH)
38534 {
38535 rtx value = XEXP (addis_value, 0);
38536 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38537 {
38538 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38539 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38540 if (TARGET_ELF)
38541 addis_str = "addis %0,%2,%1@toc@ha";
38542
38543 else if (TARGET_XCOFF)
38544 addis_str = "addis %0,%1@u(%2)";
38545
38546 else
38547 gcc_unreachable ();
38548 }
38549
38550 else if (GET_CODE (value) == PLUS)
38551 {
38552 rtx op0 = XEXP (value, 0);
38553 rtx op1 = XEXP (value, 1);
38554
38555 if (GET_CODE (op0) == UNSPEC
38556 && XINT (op0, 1) == UNSPEC_TOCREL
38557 && CONST_INT_P (op1))
38558 {
38559 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38560 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38561 fuse_ops[3] = op1;
38562 if (TARGET_ELF)
38563 addis_str = "addis %0,%2,%1+%3@toc@ha";
38564
38565 else if (TARGET_XCOFF)
38566 addis_str = "addis %0,%1+%3@u(%2)";
38567
38568 else
38569 gcc_unreachable ();
38570 }
38571 }
38572
38573 else if (satisfies_constraint_L (value))
38574 {
38575 fuse_ops[1] = value;
38576 addis_str = "lis %0,%v1";
38577 }
38578
38579 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38580 {
38581 fuse_ops[1] = value;
38582 addis_str = "lis %0,%1@ha";
38583 }
38584 }
38585
38586 if (!addis_str)
38587 fatal_insn ("Could not generate addis value for fusion", addis_value);
38588
38589 output_asm_insn (addis_str, fuse_ops);
38590 }
38591
38592 /* Emit a D-form load or store instruction that is the second instruction
38593 of a fusion sequence. */
38594
38595 static void
38596 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38597 {
38598 rtx fuse_ops[10];
38599 char insn_template[80];
38600
38601 fuse_ops[0] = load_reg;
38602 fuse_ops[1] = addis_reg;
38603
38604 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38605 {
38606 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38607 fuse_ops[2] = offset;
38608 output_asm_insn (insn_template, fuse_ops);
38609 }
38610
38611 else if (GET_CODE (offset) == UNSPEC
38612 && XINT (offset, 1) == UNSPEC_TOCREL)
38613 {
38614 if (TARGET_ELF)
38615 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38616
38617 else if (TARGET_XCOFF)
38618 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38619
38620 else
38621 gcc_unreachable ();
38622
38623 fuse_ops[2] = XVECEXP (offset, 0, 0);
38624 output_asm_insn (insn_template, fuse_ops);
38625 }
38626
38627 else if (GET_CODE (offset) == PLUS
38628 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38629 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38630 && CONST_INT_P (XEXP (offset, 1)))
38631 {
38632 rtx tocrel_unspec = XEXP (offset, 0);
38633 if (TARGET_ELF)
38634 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38635
38636 else if (TARGET_XCOFF)
38637 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38638
38639 else
38640 gcc_unreachable ();
38641
38642 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38643 fuse_ops[3] = XEXP (offset, 1);
38644 output_asm_insn (insn_template, fuse_ops);
38645 }
38646
38647 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38648 {
38649 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38650
38651 fuse_ops[2] = offset;
38652 output_asm_insn (insn_template, fuse_ops);
38653 }
38654
38655 else
38656 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38657
38658 return;
38659 }
38660
38661 /* Given an address, convert it into the addis and load offset parts. Addresses
38662 created during the peephole2 process look like:
38663 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38664 (unspec [(...)] UNSPEC_TOCREL)) */
38665
38666 static void
38667 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38668 {
38669 rtx hi, lo;
38670
38671 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38672 {
38673 hi = XEXP (addr, 0);
38674 lo = XEXP (addr, 1);
38675 }
38676 else
38677 gcc_unreachable ();
38678
38679 *p_hi = hi;
38680 *p_lo = lo;
38681 }
38682
38683 /* Return a string to fuse an addis instruction with a gpr load to the same
38684 register that we loaded up the addis instruction. The address that is used
38685 is the logical address that was formed during peephole2:
38686 (lo_sum (high) (low-part))
38687
38688 The code is complicated, so we call output_asm_insn directly, and just
38689 return "". */
38690
38691 const char *
38692 emit_fusion_gpr_load (rtx target, rtx mem)
38693 {
38694 rtx addis_value;
38695 rtx addr;
38696 rtx load_offset;
38697 const char *load_str = NULL;
38698 machine_mode mode;
38699
38700 if (GET_CODE (mem) == ZERO_EXTEND)
38701 mem = XEXP (mem, 0);
38702
38703 gcc_assert (REG_P (target) && MEM_P (mem));
38704
38705 addr = XEXP (mem, 0);
38706 fusion_split_address (addr, &addis_value, &load_offset);
38707
38708 /* Now emit the load instruction to the same register. */
38709 mode = GET_MODE (mem);
38710 switch (mode)
38711 {
38712 case E_QImode:
38713 load_str = "lbz";
38714 break;
38715
38716 case E_HImode:
38717 load_str = "lhz";
38718 break;
38719
38720 case E_SImode:
38721 case E_SFmode:
38722 load_str = "lwz";
38723 break;
38724
38725 case E_DImode:
38726 case E_DFmode:
38727 gcc_assert (TARGET_POWERPC64);
38728 load_str = "ld";
38729 break;
38730
38731 default:
38732 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38733 }
38734
38735 /* Emit the addis instruction. */
38736 emit_fusion_addis (target, addis_value);
38737
38738 /* Emit the D-form load instruction. */
38739 emit_fusion_load (target, target, load_offset, load_str);
38740
38741 return "";
38742 }
38743 \f
38744
38745 #ifdef RS6000_GLIBC_ATOMIC_FENV
38746 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38747 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38748 #endif
38749
38750 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38751
38752 static void
38753 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38754 {
38755 if (!TARGET_HARD_FLOAT)
38756 {
38757 #ifdef RS6000_GLIBC_ATOMIC_FENV
38758 if (atomic_hold_decl == NULL_TREE)
38759 {
38760 atomic_hold_decl
38761 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38762 get_identifier ("__atomic_feholdexcept"),
38763 build_function_type_list (void_type_node,
38764 double_ptr_type_node,
38765 NULL_TREE));
38766 TREE_PUBLIC (atomic_hold_decl) = 1;
38767 DECL_EXTERNAL (atomic_hold_decl) = 1;
38768 }
38769
38770 if (atomic_clear_decl == NULL_TREE)
38771 {
38772 atomic_clear_decl
38773 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38774 get_identifier ("__atomic_feclearexcept"),
38775 build_function_type_list (void_type_node,
38776 NULL_TREE));
38777 TREE_PUBLIC (atomic_clear_decl) = 1;
38778 DECL_EXTERNAL (atomic_clear_decl) = 1;
38779 }
38780
38781 tree const_double = build_qualified_type (double_type_node,
38782 TYPE_QUAL_CONST);
38783 tree const_double_ptr = build_pointer_type (const_double);
38784 if (atomic_update_decl == NULL_TREE)
38785 {
38786 atomic_update_decl
38787 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38788 get_identifier ("__atomic_feupdateenv"),
38789 build_function_type_list (void_type_node,
38790 const_double_ptr,
38791 NULL_TREE));
38792 TREE_PUBLIC (atomic_update_decl) = 1;
38793 DECL_EXTERNAL (atomic_update_decl) = 1;
38794 }
38795
38796 tree fenv_var = create_tmp_var_raw (double_type_node);
38797 TREE_ADDRESSABLE (fenv_var) = 1;
38798 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38799
38800 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38801 *clear = build_call_expr (atomic_clear_decl, 0);
38802 *update = build_call_expr (atomic_update_decl, 1,
38803 fold_convert (const_double_ptr, fenv_addr));
38804 #endif
38805 return;
38806 }
38807
38808 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38809 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38810 tree call_mffs = build_call_expr (mffs, 0);
38811
38812 /* Generates the equivalent of feholdexcept (&fenv_var)
38813
38814 *fenv_var = __builtin_mffs ();
38815 double fenv_hold;
38816 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38817 __builtin_mtfsf (0xff, fenv_hold); */
38818
38819 /* Mask to clear everything except for the rounding modes and non-IEEE
38820 arithmetic flag. */
38821 const unsigned HOST_WIDE_INT hold_exception_mask =
38822 HOST_WIDE_INT_C (0xffffffff00000007);
38823
38824 tree fenv_var = create_tmp_var_raw (double_type_node);
38825
38826 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38827
38828 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38829 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38830 build_int_cst (uint64_type_node,
38831 hold_exception_mask));
38832
38833 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38834 fenv_llu_and);
38835
38836 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38837 build_int_cst (unsigned_type_node, 0xff),
38838 fenv_hold_mtfsf);
38839
38840 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38841
38842 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38843
38844 double fenv_clear = __builtin_mffs ();
38845 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38846 __builtin_mtfsf (0xff, fenv_clear); */
38847
38848 /* Mask to clear everything except for the rounding modes and non-IEEE
38849 arithmetic flag. */
38850 const unsigned HOST_WIDE_INT clear_exception_mask =
38851 HOST_WIDE_INT_C (0xffffffff00000000);
38852
38853 tree fenv_clear = create_tmp_var_raw (double_type_node);
38854
38855 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38856
38857 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38858 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38859 fenv_clean_llu,
38860 build_int_cst (uint64_type_node,
38861 clear_exception_mask));
38862
38863 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38864 fenv_clear_llu_and);
38865
38866 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38867 build_int_cst (unsigned_type_node, 0xff),
38868 fenv_clear_mtfsf);
38869
38870 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38871
38872 /* Generates the equivalent of feupdateenv (&fenv_var)
38873
38874 double old_fenv = __builtin_mffs ();
38875 double fenv_update;
38876 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38877 (*(uint64_t*)fenv_var 0x1ff80fff);
38878 __builtin_mtfsf (0xff, fenv_update); */
38879
38880 const unsigned HOST_WIDE_INT update_exception_mask =
38881 HOST_WIDE_INT_C (0xffffffff1fffff00);
38882 const unsigned HOST_WIDE_INT new_exception_mask =
38883 HOST_WIDE_INT_C (0x1ff80fff);
38884
38885 tree old_fenv = create_tmp_var_raw (double_type_node);
38886 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38887
38888 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38889 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38890 build_int_cst (uint64_type_node,
38891 update_exception_mask));
38892
38893 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38894 build_int_cst (uint64_type_node,
38895 new_exception_mask));
38896
38897 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38898 old_llu_and, new_llu_and);
38899
38900 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38901 new_llu_mask);
38902
38903 tree update_mtfsf = build_call_expr (mtfsf, 2,
38904 build_int_cst (unsigned_type_node, 0xff),
38905 fenv_update_mtfsf);
38906
38907 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
38908 }
38909
38910 void
38911 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
38912 {
38913 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38914
38915 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38916 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38917
38918 /* The destination of the vmrgew instruction layout is:
38919 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38920 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38921 vmrgew instruction will be correct. */
38922 if (BYTES_BIG_ENDIAN)
38923 {
38924 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
38925 GEN_INT (0)));
38926 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
38927 GEN_INT (3)));
38928 }
38929 else
38930 {
38931 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
38932 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
38933 }
38934
38935 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38936 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38937
38938 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
38939 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
38940
38941 if (BYTES_BIG_ENDIAN)
38942 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38943 else
38944 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38945 }
38946
38947 void
38948 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
38949 {
38950 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38951
38952 rtx_tmp0 = gen_reg_rtx (V2DImode);
38953 rtx_tmp1 = gen_reg_rtx (V2DImode);
38954
38955 /* The destination of the vmrgew instruction layout is:
38956 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38957 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38958 vmrgew instruction will be correct. */
38959 if (BYTES_BIG_ENDIAN)
38960 {
38961 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
38962 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
38963 }
38964 else
38965 {
38966 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
38967 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
38968 }
38969
38970 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38971 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38972
38973 if (signed_convert)
38974 {
38975 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
38976 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
38977 }
38978 else
38979 {
38980 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
38981 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
38982 }
38983
38984 if (BYTES_BIG_ENDIAN)
38985 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38986 else
38987 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38988 }
38989
38990 void
38991 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
38992 rtx src2)
38993 {
38994 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38995
38996 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38997 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38998
38999 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39000 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39001
39002 rtx_tmp2 = gen_reg_rtx (V4SImode);
39003 rtx_tmp3 = gen_reg_rtx (V4SImode);
39004
39005 if (signed_convert)
39006 {
39007 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39008 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39009 }
39010 else
39011 {
39012 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39013 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39014 }
39015
39016 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39017 }
39018
39019 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39020
39021 static bool
39022 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39023 optimization_type opt_type)
39024 {
39025 switch (op)
39026 {
39027 case rsqrt_optab:
39028 return (opt_type == OPTIMIZE_FOR_SPEED
39029 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39030
39031 default:
39032 return true;
39033 }
39034 }
39035
39036 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39037
39038 static HOST_WIDE_INT
39039 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39040 {
39041 if (TREE_CODE (exp) == STRING_CST
39042 && (STRICT_ALIGNMENT || !optimize_size))
39043 return MAX (align, BITS_PER_WORD);
39044 return align;
39045 }
39046
39047 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39048
39049 static HOST_WIDE_INT
39050 rs6000_starting_frame_offset (void)
39051 {
39052 if (FRAME_GROWS_DOWNWARD)
39053 return 0;
39054 return RS6000_STARTING_FRAME_OFFSET;
39055 }
39056 \f
39057
39058 /* Create an alias for a mangled name where we have changed the mangling (in
39059 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39060 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39061
39062 #if TARGET_ELF && RS6000_WEAK
39063 static void
39064 rs6000_globalize_decl_name (FILE * stream, tree decl)
39065 {
39066 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39067
39068 targetm.asm_out.globalize_label (stream, name);
39069
39070 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39071 {
39072 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39073 const char *old_name;
39074
39075 ieee128_mangling_gcc_8_1 = true;
39076 lang_hooks.set_decl_assembler_name (decl);
39077 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39078 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39079 ieee128_mangling_gcc_8_1 = false;
39080
39081 if (strcmp (name, old_name) != 0)
39082 {
39083 fprintf (stream, "\t.weak %s\n", old_name);
39084 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39085 }
39086 }
39087 }
39088 #endif
39089
39090 \f
39091 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39092 function names from <foo>l to <foo>f128 if the default long double type is
39093 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39094 include file switches the names on systems that support long double as IEEE
39095 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39096 In the future, glibc will export names like __ieee128_sinf128 and we can
39097 switch to using those instead of using sinf128, which pollutes the user's
39098 namespace.
39099
39100 This will switch the names for Fortran math functions as well (which doesn't
39101 use math.h). However, Fortran needs other changes to the compiler and
39102 library before you can switch the real*16 type at compile time.
39103
39104 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39105 only do this if the default is that long double is IBM extended double, and
39106 the user asked for IEEE 128-bit. */
39107
39108 static tree
39109 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39110 {
39111 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39112 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39113 {
39114 size_t len = IDENTIFIER_LENGTH (id);
39115 const char *name = IDENTIFIER_POINTER (id);
39116
39117 if (name[len - 1] == 'l')
39118 {
39119 bool uses_ieee128_p = false;
39120 tree type = TREE_TYPE (decl);
39121 machine_mode ret_mode = TYPE_MODE (type);
39122
39123 /* See if the function returns a IEEE 128-bit floating point type or
39124 complex type. */
39125 if (ret_mode == TFmode || ret_mode == TCmode)
39126 uses_ieee128_p = true;
39127 else
39128 {
39129 function_args_iterator args_iter;
39130 tree arg;
39131
39132 /* See if the function passes a IEEE 128-bit floating point type
39133 or complex type. */
39134 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39135 {
39136 machine_mode arg_mode = TYPE_MODE (arg);
39137 if (arg_mode == TFmode || arg_mode == TCmode)
39138 {
39139 uses_ieee128_p = true;
39140 break;
39141 }
39142 }
39143 }
39144
39145 /* If we passed or returned an IEEE 128-bit floating point type,
39146 change the name. */
39147 if (uses_ieee128_p)
39148 {
39149 char *name2 = (char *) alloca (len + 4);
39150 memcpy (name2, name, len - 1);
39151 strcpy (name2 + len - 1, "f128");
39152 id = get_identifier (name2);
39153 }
39154 }
39155 }
39156
39157 return id;
39158 }
39159
39160 \f
39161 struct gcc_target targetm = TARGET_INITIALIZER;
39162
39163 #include "gt-rs6000.h"